var/home/core/zuul-output/0000755000175000017500000000000015116476354014541 5ustar corecorevar/home/core/zuul-output/logs/0000755000175000017500000000000015116515545015501 5ustar corecorevar/home/core/zuul-output/logs/kubelet.log0000644000000000000000005633273615116515536017724 0ustar rootrootDec 11 08:15:49 crc systemd[1]: Starting Kubernetes Kubelet... Dec 11 08:15:49 crc restorecon[4680]: Relabeled /var/lib/kubelet/config.json from system_u:object_r:unlabeled_t:s0 to system_u:object_r:container_var_lib_t:s0 Dec 11 08:15:49 crc restorecon[4680]: /var/lib/kubelet/device-plugins not reset as customized by admin to system_u:object_r:container_file_t:s0 Dec 11 08:15:49 crc restorecon[4680]: /var/lib/kubelet/device-plugins/kubelet.sock not reset as customized by admin to system_u:object_r:container_file_t:s0 Dec 11 08:15:49 crc restorecon[4680]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/volumes/kubernetes.io~configmap/nginx-conf/..2025_02_23_05_40_35.4114275528/nginx.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Dec 11 08:15:49 crc restorecon[4680]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Dec 11 08:15:49 crc restorecon[4680]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/22e96971 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Dec 11 08:15:49 crc restorecon[4680]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/21c98286 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Dec 11 08:15:49 crc restorecon[4680]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/0f1869e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Dec 11 08:15:49 crc restorecon[4680]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Dec 11 08:15:49 crc restorecon[4680]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/46889d52 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Dec 11 08:15:49 crc restorecon[4680]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/5b6a5969 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c963 Dec 11 08:15:49 crc restorecon[4680]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/6c7921f5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Dec 11 08:15:49 crc restorecon[4680]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/4804f443 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Dec 11 08:15:49 crc restorecon[4680]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/2a46b283 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Dec 11 08:15:49 crc restorecon[4680]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/a6b5573e not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Dec 11 08:15:49 crc restorecon[4680]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/4f88ee5b not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Dec 11 08:15:49 crc restorecon[4680]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/5a4eee4b not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c963 Dec 11 08:15:49 crc restorecon[4680]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/cd87c521 not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Dec 11 08:15:49 crc restorecon[4680]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Dec 11 08:15:49 crc restorecon[4680]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_33_42.2574241751 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Dec 11 08:15:49 crc restorecon[4680]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_33_42.2574241751/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Dec 11 08:15:49 crc restorecon[4680]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Dec 11 08:15:49 crc restorecon[4680]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/38602af4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/1483b002 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/0346718b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/d3ed4ada not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/3bb473a5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/8cd075a9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/00ab4760 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/54a21c09 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c589,c726 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/70478888 not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/43802770 not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/955a0edc not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/bca2d009 not reset as customized by admin to system_u:object_r:container_file_t:s0:c140,c1009 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/b295f9bd not reset as customized by admin to system_u:object_r:container_file_t:s0:c589,c726 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..2025_02_23_05_21_22.3617465230 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..2025_02_23_05_21_22.3617465230/cnibincopy.sh not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/cnibincopy.sh not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..2025_02_23_05_21_22.2050650026 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..2025_02_23_05_21_22.2050650026/allowlist.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/allowlist.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/bc46ea27 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/5731fc1b not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/5e1b2a3c not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/943f0936 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/3f764ee4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/8695e3f9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/aed7aa86 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/c64d7448 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/0ba16bd2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/207a939f not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/54aa8cdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/1f5fa595 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/bf9c8153 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/47fba4ea not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/7ae55ce9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/7906a268 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/ce43fa69 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/7fc7ea3a not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/d8c38b7d not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/9ef015fb not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/b9db6a41 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/b1733d79 not reset as customized by admin to system_u:object_r:container_file_t:s0:c476,c820 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/afccd338 not reset as customized by admin to system_u:object_r:container_file_t:s0:c272,c818 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/9df0a185 not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/18938cf8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c476,c820 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/7ab4eb23 not reset as customized by admin to system_u:object_r:container_file_t:s0:c272,c818 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/56930be6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides/..2025_02_23_05_21_35.630010865 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..2025_02_23_05_21_35.1088506337 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..2025_02_23_05_21_35.1088506337/ovnkube.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/ovnkube.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/0d8e3722 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/d22b2e76 not reset as customized by admin to system_u:object_r:container_file_t:s0:c382,c850 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/e036759f not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/2734c483 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/57878fe7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/3f3c2e58 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/375bec3e not reset as customized by admin to system_u:object_r:container_file_t:s0:c382,c850 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/7bc41e08 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/48c7a72d not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/4b66701f not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/a5a1c202 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666/additional-cert-acceptance-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666/additional-pod-admission-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/additional-cert-acceptance-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/additional-pod-admission-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides/..2025_02_23_05_21_40.1388695756 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/26f3df5b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/6d8fb21d not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/50e94777 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/208473b3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/ec9e08ba not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/3b787c39 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/208eaed5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/93aa3a2b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/3c697968 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/ba950ec9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/cb5cdb37 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/f2df9827 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..2025_02_23_05_22_30.473230615 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..2025_02_23_05_22_30.473230615/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_24_06_22_02.1904938450 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_24_06_22_02.1904938450/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/fedaa673 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/9ca2df95 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/b2d7460e not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/2207853c not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/241c1c29 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/2d910eaf not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..2025_02_23_05_23_49.3726007728 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..2025_02_23_05_23_49.3726007728/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..2025_02_23_05_23_49.841175008 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..2025_02_23_05_23_49.841175008/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.843437178 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.843437178/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/c6c0f2e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/399edc97 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/8049f7cc not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/0cec5484 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/312446d0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c406,c828 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/8e56a35d not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.133159589 not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.133159589/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/2d30ddb9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c380,c909 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/eca8053d not reset as customized by admin to system_u:object_r:container_file_t:s0:c380,c909 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/c3a25c9a not reset as customized by admin to system_u:object_r:container_file_t:s0:c168,c522 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/b9609c22 not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/e8b0eca9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c106,c418 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/b36a9c3f not reset as customized by admin to system_u:object_r:container_file_t:s0:c529,c711 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/38af7b07 not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/ae821620 not reset as customized by admin to system_u:object_r:container_file_t:s0:c106,c418 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/baa23338 not reset as customized by admin to system_u:object_r:container_file_t:s0:c529,c711 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/2c534809 not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3532625537 not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3532625537/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/59b29eae not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c381 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/c91a8e4f not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c381 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/4d87494a not reset as customized by admin to system_u:object_r:container_file_t:s0:c442,c857 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/1e33ca63 not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/8dea7be2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/d0b04a99 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/d84f01e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/4109059b not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/a7258a3e not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/05bdf2b6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/f3261b51 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/315d045e not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/5fdcf278 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/d053f757 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/c2850dc7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..2025_02_23_05_22_30.2390596521 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..2025_02_23_05_22_30.2390596521/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/fcfb0b2b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/c7ac9b7d not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/fa0c0d52 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/c609b6ba not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/2be6c296 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/89a32653 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/4eb9afeb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/13af6efa not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/b03f9724 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/e3d105cc not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/3aed4d83 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1906041176 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1906041176/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/0765fa6e not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/2cefc627 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/3dcc6345 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/365af391 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-Default.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-TechPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-DevPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-TechPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-DevPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-Default.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/b1130c0f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/236a5913 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/b9432e26 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/5ddb0e3f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/986dc4fd not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/8a23ff9a not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/9728ae68 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/665f31d0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1255385357 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1255385357/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_23_57.573792656 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_23_57.573792656/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_22_30.3254245399 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_22_30.3254245399/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/136c9b42 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/98a1575b not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/cac69136 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/5deb77a7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/2ae53400 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3608339744 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3608339744/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/e46f2326 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/dc688d3c not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/3497c3cd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/177eb008 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3819292994 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3819292994/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/af5a2afa not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/d780cb1f not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/49b0f374 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/26fbb125 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.3244779536 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.3244779536/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/cf14125a not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/b7f86972 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/e51d739c not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/88ba6a69 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/669a9acf not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/5cd51231 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/75349ec7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/15c26839 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/45023dcd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/2bb66a50 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/64d03bdd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/ab8e7ca0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/bb9be25f not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.2034221258 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.2034221258/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/9a0b61d3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/d471b9d2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/8cb76b8e not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/11a00840 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/ec355a92 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/992f735e not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1782968797 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1782968797/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/d59cdbbc not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/72133ff0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/c56c834c not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/d13724c7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/0a498258 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fa471982 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fc900d92 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fa7d68da not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/4bacf9b4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/424021b1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/fc2e31a3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/f51eefac not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/c8997f2f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/7481f599 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..2025_02_23_05_22_49.2255460704 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..2025_02_23_05_22_49.2255460704/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/fdafea19 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/d0e1c571 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/ee398915 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/682bb6b8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/a3e67855 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/a989f289 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/915431bd not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/7796fdab not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/dcdb5f19 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/a3aaa88c not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/5508e3e6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/160585de not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/e99f8da3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/8bc85570 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/a5861c91 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/84db1135 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/9e1a6043 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/c1aba1c2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/d55ccd6d not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/971cc9f6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/8f2e3dcf not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/ceb35e9c not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/1c192745 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/5209e501 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/f83de4df not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/e7b978ac not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/c64304a1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/5384386b not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/multus-admission-controller/cce3e3ff not reset as customized by admin to system_u:object_r:container_file_t:s0:c435,c756 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/multus-admission-controller/8fb75465 not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/kube-rbac-proxy/740f573e not reset as customized by admin to system_u:object_r:container_file_t:s0:c435,c756 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/kube-rbac-proxy/32fd1134 not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/0a861bd3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/80363026 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/bfa952a8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_23_05_33_31.2122464563 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_23_05_33_31.2122464563/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config/..2025_02_23_05_33_31.333075221 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/793bf43d not reset as customized by admin to system_u:object_r:container_file_t:s0:c381,c387 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/7db1bb6e not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/4f6a0368 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/c12c7d86 not reset as customized by admin to system_u:object_r:container_file_t:s0:c381,c387 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/36c4a773 not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/4c1e98ae not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/a4c8115c not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/setup/7db1802e not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver/a008a7ab not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-cert-syncer/2c836bac not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-cert-regeneration-controller/0ce62299 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-insecure-readyz/945d2457 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-check-endpoints/7d5c1dd8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/advanced-cluster-management not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/advanced-cluster-management/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-broker-rhel8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-broker-rhel8/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-online not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-online/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams-console not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams-console/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq7-interconnect-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq7-interconnect-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-automation-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-automation-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-cloud-addons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-cloud-addons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry-3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry-3/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-load-balancer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-load-balancer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-businessautomation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-businessautomation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator/index.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/businessautomation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/businessautomation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cephcsi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cephcsi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cincinnati-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cincinnati-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-kube-descheduler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-kube-descheduler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-logging not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-logging/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/compliance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/compliance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/container-security-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/container-security-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/costmanagement-metrics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/costmanagement-metrics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cryostat-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cryostat-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datagrid not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datagrid/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devspaces not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devspaces/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devworkspace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devworkspace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dpu-network-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dpu-network-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eap not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eap/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-dns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-dns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/file-integrity-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/file-integrity-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-apicurito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-apicurito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-console not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-console/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-online not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-online/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gatekeeper-operator-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gatekeeper-operator-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jws-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jws-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management-hub not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management-hub/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kiali-ossm not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kiali-ossm/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubevirt-hyperconverged not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubevirt-hyperconverged/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logic-operator-rhel8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logic-operator-rhel8/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lvms-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lvms-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mcg-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mcg-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mta-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mta-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtr-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtr-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-client-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-client-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-csi-addons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-csi-addons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-multicluster-orchestrator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-multicluster-orchestrator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-prometheus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-prometheus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-hub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-hub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/bundle-v1.15.0.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/channel.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/package.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-custom-metrics-autoscaler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-custom-metrics-autoscaler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-gitops-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-gitops-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-pipelines-operator-rh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-pipelines-operator-rh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-secondary-scheduler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-secondary-scheduler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-bridge-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-bridge-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/recipe not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/recipe/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-camel-k not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-camel-k/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-hawtio-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-hawtio-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redhat-oadp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redhat-oadp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rh-service-binding-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rh-service-binding-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhacs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhacs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhbk-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhbk-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhdh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhdh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-prometheus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-prometheus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhpam-kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhpam-kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhsso-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhsso-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rook-ceph-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rook-ceph-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/run-once-duration-override-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/run-once-duration-override-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sandboxed-containers-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sandboxed-containers-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/security-profiles-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/security-profiles-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/serverless-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/serverless-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-registry-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-registry-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator3/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/submariner not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/submariner/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tang-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tang-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustee-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustee-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volsync-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volsync-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/web-terminal not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/web-terminal/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/bc8d0691 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/6b76097a not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/34d1af30 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/312ba61c not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/645d5dd1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/16e825f0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/4cf51fc9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/2a23d348 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/075dbd49 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/dd585ddd not reset as customized by admin to system_u:object_r:container_file_t:s0:c377,c642 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/17ebd0ab not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c343 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/005579f4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_23_05_23_11.449897510 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_23_05_23_11.449897510/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_23_11.1287037894 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..2025_02_23_05_23_11.1301053334 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..2025_02_23_05_23_11.1301053334/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/bf5f3b9c not reset as customized by admin to system_u:object_r:container_file_t:s0:c49,c263 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/af276eb7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c701 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/ea28e322 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/692e6683 not reset as customized by admin to system_u:object_r:container_file_t:s0:c49,c263 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/871746a7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c701 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/4eb2e958 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..2025_02_24_06_09_06.2875086261 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..2025_02_24_06_09_06.2875086261/console-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/console-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_09_06.286118152 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_09_06.286118152/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..2025_02_24_06_09_06.3865795478 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..2025_02_24_06_09_06.3865795478/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..2025_02_24_06_09_06.584414814 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..2025_02_24_06_09_06.584414814/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/containers/console/ca9b62da not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/containers/console/0edd6fce not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.openshift-global-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.openshift-global-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.1071801880 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.1071801880/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..2025_02_24_06_20_07.2494444877 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..2025_02_24_06_20_07.2494444877/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/containers/controller-manager/89b4555f not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..2025_02_23_05_23_22.4071100442 not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..2025_02_23_05_23_22.4071100442/Corefile not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/Corefile not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/655fcd71 not reset as customized by admin to system_u:object_r:container_file_t:s0:c457,c841 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/0d43c002 not reset as customized by admin to system_u:object_r:container_file_t:s0:c55,c1022 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/e68efd17 not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/9acf9b65 not reset as customized by admin to system_u:object_r:container_file_t:s0:c457,c841 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/5ae3ff11 not reset as customized by admin to system_u:object_r:container_file_t:s0:c55,c1022 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/1e59206a not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/27af16d1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c304,c1017 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/7918e729 not reset as customized by admin to system_u:object_r:container_file_t:s0:c853,c893 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/5d976d0e not reset as customized by admin to system_u:object_r:container_file_t:s0:c585,c981 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..2025_02_23_05_38_56.1112187283 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..2025_02_23_05_38_56.1112187283/controller-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/controller-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_38_56.2839772658 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_38_56.2839772658/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/d7f55cbb not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/f0812073 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/1a56cbeb not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/7fdd437e not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/cdfb5652 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_24_06_17_29.3844392896 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_24_06_17_29.3844392896/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..2025_02_24_06_17_29.848549803 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..2025_02_24_06_17_29.848549803/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..2025_02_24_06_17_29.780046231 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..2025_02_24_06_17_29.780046231/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_17_29.2729721485 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_17_29.2729721485/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/fix-audit-permissions/fb93119e not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/openshift-apiserver/f1e8fc0e not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/openshift-apiserver-check-endpoints/218511f3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs/k8s-webhook-server not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs/k8s-webhook-server/serving-certs not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/ca8af7b3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/72cc8a75 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/6e8a3760 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..2025_02_23_05_27_30.557428972 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..2025_02_23_05_27_30.557428972/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/4c3455c0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/2278acb0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/4b453e4f not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/3ec09bda not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132/anchors not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132/anchors/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/anchors not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/edk2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/edk2/cacerts.bin not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/java not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/java/cacerts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/openssl not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/openssl/ca-bundle.trust.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/email-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/objsign-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2ae6433e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fde84897.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/75680d2e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/openshift-service-serving-signer_1740288168.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/facfc4fa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8f5a969c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CFCA_EV_ROOT.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9ef4a08a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ingress-operator_1740288202.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2f332aed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/248c8271.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d10a21f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ACCVRAIZ1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a94d09e5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c9a4d3b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/40193066.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AC_RAIZ_FNMT-RCM.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cd8c0d63.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b936d1c6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CA_Disig_Root_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4fd49c6c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AC_RAIZ_FNMT-RCM_SERVIDORES_SEGUROS.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b81b93f0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f9a69fa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certigna.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b30d5fda.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ANF_Secure_Server_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b433981b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/93851c9e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9282e51c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e7dd1bc4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Actalis_Authentication_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/930ac5d2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f47b495.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e113c810.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5931b5bc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Commercial.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2b349938.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e48193cf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/302904dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a716d4ed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Networking.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/93bc0acc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/86212b19.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certigna_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Premium.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b727005e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dbc54cab.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f51bb24c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c28a8a30.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Premium_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9c8dfbd4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ccc52f49.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cb1c3204.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ce5e74ef.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fd08c599.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6d41d539.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fb5fa911.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e35234b1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8cb5ee0f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a7c655d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f8fc53da.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/de6d66f3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d41b5e2a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/41a3f684.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1df5a75f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_2011.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e36a6752.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b872f2b4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9576d26b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/228f89db.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_Root_CA_ECC_TLS_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fb717492.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2d21b73c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0b1b94ef.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/595e996b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_Root_CA_RSA_TLS_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9b46e03d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/128f4b91.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Buypass_Class_3_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/81f2d2b1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Autoridad_de_Certificacion_Firmaprofesional_CIF_A62634068.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3bde41ac.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d16a5865.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_EC-384_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/BJCA_Global_Root_CA1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0179095f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ffa7f1eb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9482e63a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d4dae3dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/BJCA_Global_Root_CA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3e359ba6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7e067d03.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/95aff9e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d7746a63.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Baltimore_CyberTrust_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/653b494a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3ad48a91.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Network_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Buypass_Class_2_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/54657681.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/82223c44.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e8de2f56.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2d9dafe4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d96b65e2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee64a828.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/40547a79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5a3f0ff8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a780d93.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/34d996fb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_ECC_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/eed8c118.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/89c02a45.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certainly_Root_R1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b1159c4c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_RSA_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d6325660.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d4c339cb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8312c4c1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certainly_Root_E1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8508e720.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5fdd185d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/48bec511.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/69105f4f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0b9bc432.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Network_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/32888f65.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_ECC_Root-01.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b03dec0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/219d9499.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_ECC_Root-02.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5acf816d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cbf06781.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_RSA_Root-01.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dc99f41e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_RSA_Root-02.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AAA_Certificate_Services.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/985c1f52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8794b4e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_BR_Root_CA_1_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e7c037b4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ef954a4e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_EV_Root_CA_1_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2add47b6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/90c5a3c8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_Root_Class_3_CA_2_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0f3e76e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/53a1b57a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_Root_Class_3_CA_2_EV_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5ad8a5d6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/68dd7389.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9d04f354.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d6437c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/062cdee6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bd43e1dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7f3d5d1d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c491639e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_E46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3513523f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/399e7759.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/feffd413.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d18e9066.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/607986c7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c90bc37d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1b0f7e5c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e08bfd1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dd8e9d41.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ed39abd0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a3418fda.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bc3f2570.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_High_Assurance_EV_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/244b5494.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/81b9768f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4be590e0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_TLS_ECC_P384_Root_G5.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9846683b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/252252d2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e8e7201.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ISRG_Root_X1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_TLS_RSA4096_Root_G5.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d52c538d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c44cc0c0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_R46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Trusted_Root_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/75d1b2ed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a2c66da8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ecccd8db.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust.net_Certification_Authority__2048_.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/aee5f10d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3e7271e8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0e59380.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4c3982f2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b99d060.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bf64f35b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0a775a30.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/002c0b4f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cc450945.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_EC1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/106f3e4d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b3fb433b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4042bcee.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/02265526.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/455f1b52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0d69c7e1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9f727ac7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5e98733a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f0cd152c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dc4d6a89.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6187b673.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/FIRMAPROFESIONAL_CA_ROOT-A_WEB.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ba8887ce.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/068570d1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f081611a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/48a195d8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GDCA_TrustAUTH_R5_ROOT.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0f6fa695.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ab59055e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b92fd57f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GLOBALTRUST_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fa5da96b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1ec40989.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7719f463.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1001acf7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f013ecaf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/626dceaf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c559d742.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1d3472b9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9479c8c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a81e292b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4bfab552.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Go_Daddy_Class_2_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Sectigo_Public_Server_Authentication_Root_E46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Go_Daddy_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e071171e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/57bcb2da.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HARICA_TLS_ECC_Root_CA_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ab5346f4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5046c355.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HARICA_TLS_RSA_Root_CA_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/865fbdf9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/da0cfd1d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/85cde254.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hellenic_Academic_and_Research_Institutions_ECC_RootCA_2015.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cbb3f32b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SecureSign_RootCA11.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hellenic_Academic_and_Research_Institutions_RootCA_2015.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5860aaa6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/31188b5e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HiPKI_Root_CA_-_G1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c7f1359b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f15c80c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hongkong_Post_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/09789157.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ISRG_Root_X2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/18856ac4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e09d511.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/IdenTrust_Commercial_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cf701eeb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d06393bb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/IdenTrust_Public_Sector_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/10531352.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Izenpe.com.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SecureTrust_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0ed035a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsec_e-Szigno_Root_CA_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8160b96c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e8651083.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2c63f966.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_RootCA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsoft_ECC_Root_Certificate_Authority_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d89cda1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/01419da9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_TLS_RSA_Root_CA_2022.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b7a5b843.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsoft_RSA_Root_Certificate_Authority_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bf53fb88.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9591a472.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3afde786.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SwissSign_Gold_CA_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/NAVER_Global_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3fb36b73.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d39b0a2c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a89d74c2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cd58d51e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b7db1890.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/NetLock_Arany__Class_Gold__F__tan__s__tv__ny.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/988a38cb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/60afe812.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f39fc864.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5443e9e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/OISTE_WISeKey_Global_Root_GB_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e73d606e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dfc0fe80.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b66938e9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e1eab7c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/OISTE_WISeKey_Global_Root_GC_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/773e07ad.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c899c73.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d59297b8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ddcda989.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_1_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/749e9e03.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/52b525c7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_RootCA3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d7e8dc79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a819ef2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/08063a00.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b483515.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_2_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/064e0aa9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1f58a078.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6f7454b3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7fa05551.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/76faf6c0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9339512a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f387163d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee37c333.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_3_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e18bfb83.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e442e424.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fe8a2cd8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/23f4c490.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5cd81ad7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_EV_Root_Certification_Authority_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f0c70a8d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7892ad52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SZAFIR_ROOT_CA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4f316efb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_EV_Root_Certification_Authority_RSA_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/06dc52d5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/583d0756.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Sectigo_Public_Server_Authentication_Root_R46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_Root_Certification_Authority_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0bf05006.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/88950faa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9046744a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c860d51.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_Root_Certification_Authority_RSA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6fa5da56.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/33ee480d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Secure_Global_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/63a2c897.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_TLS_ECC_Root_CA_2022.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bdacca6f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ff34af3f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dbff3a01.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_ECC_RootCA1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_Root_CA_-_C1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Class_2_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/406c9bb1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_ECC_Root_CA_-_C3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Services_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SwissSign_Silver_CA_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/99e1b953.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/T-TeleSec_GlobalRoot_Class_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/vTrus_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/T-TeleSec_GlobalRoot_Class_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/14bc7599.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TUBITAK_Kamu_SM_SSL_Kok_Sertifikasi_-_Surum_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TWCA_Global_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a3adc42.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TWCA_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f459871d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telekom_Security_TLS_ECC_Root_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_Root_CA_-_G1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telekom_Security_TLS_RSA_Root_2023.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TeliaSonera_Root_CA_v1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telia_Root_CA_v2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8f103249.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f058632f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca-certificates.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TrustAsia_Global_Root_CA_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9bf03295.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/98aaf404.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TrustAsia_Global_Root_CA_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1cef98f5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/073bfcc5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2923b3f9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f249de83.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/edcbddb5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_ECC_Root_CA_-_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_ECC_P256_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9b5697b0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1ae85e5e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b74d2bd5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_ECC_P384_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d887a5bb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9aef356c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TunTrust_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fd64f3fc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e13665f9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/UCA_Extended_Validation_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0f5dc4f3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/da7377f6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/UCA_Global_G2_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c01eb047.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/304d27c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ed858448.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/USERTrust_ECC_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f30dd6ad.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/04f60c28.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/vTrus_ECC_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/USERTrust_RSA_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fc5a8f99.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/35105088.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee532fd5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/XRamp_Global_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/706f604c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/76579174.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/certSIGN_ROOT_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d86cdd1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/882de061.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/certSIGN_ROOT_CA_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f618aec.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a9d40e02.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e-Szigno_Root_CA_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e868b802.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/83e9984f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ePKI_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca6e4ad9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9d6523ce.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4b718d9b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/869fbf79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/containers/registry/f8d22bdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/6e8bbfac not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/54dd7996 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/a4f1bb05 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/207129da not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/c1df39e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/15b8f1cd not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3523263858 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3523263858/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..2025_02_23_05_27_49.3256605594 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..2025_02_23_05_27_49.3256605594/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/77bd6913 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/2382c1b1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/704ce128 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/70d16fe0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/bfb95535 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/57a8e8e2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3413793711 not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3413793711/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/1b9d3e5e not reset as customized by admin to system_u:object_r:container_file_t:s0:c107,c917 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/fddb173c not reset as customized by admin to system_u:object_r:container_file_t:s0:c202,c983 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/95d3c6c4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/bfb5fff5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/2aef40aa not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/c0391cad not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/1119e69d not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/660608b4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/8220bd53 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/cluster-policy-controller/85f99d5c not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/cluster-policy-controller/4b0225f6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-cert-syncer/9c2a3394 not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-cert-syncer/e820b243 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-recovery-controller/1ca52ea0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-recovery-controller/e6988e45 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..2025_02_24_06_09_21.2517297950 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..2025_02_24_06_09_21.2517297950/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/6655f00b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/98bc3986 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/08e3458a not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/2a191cb0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/6c4eeefb not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/f61a549c not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/hostpath-provisioner/24891863 not reset as customized by admin to system_u:object_r:container_file_t:s0:c37,c572 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/hostpath-provisioner/fbdfd89c not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/liveness-probe/9b63b3bc not reset as customized by admin to system_u:object_r:container_file_t:s0:c37,c572 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/liveness-probe/8acde6d6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/node-driver-registrar/59ecbba3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/csi-provisioner/685d4be3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/openshift-route-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/openshift-route-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/openshift-route-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/openshift-route-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.2950937851 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.2950937851/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/containers/route-controller-manager/feaea55e not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abinitio-runtime-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abinitio-runtime-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/accuknox-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/accuknox-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aci-containers-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aci-containers-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airlock-microgateway not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airlock-microgateway/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ako-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ako-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloy not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloy/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anchore-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anchore-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-cloud-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-cloud-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-dcap-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-dcap-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cfm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cfm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium-enterprise not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium-enterprise/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloud-native-postgresql not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloud-native-postgresql/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudera-streams-messaging-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudera-streams-messaging-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudnative-pg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudnative-pg/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cnfv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cnfv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/conjur-follower-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/conjur-follower-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/coroot-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/coroot-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cte-k8s-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:50 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cte-k8s-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-deploy-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-deploy-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-release-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-release-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edb-hcp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edb-hcp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-eck-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-eck-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/federatorai-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/federatorai-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fujitsu-enterprise-postgres-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fujitsu-enterprise-postgres-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/function-mesh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/function-mesh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/harness-gitops-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/harness-gitops-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hcp-terraform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hcp-terraform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hpe-ezmeral-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hpe-ezmeral-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-application-gateway-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-application-gateway-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-directory-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-directory-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-dr-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-dr-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-licensing-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-licensing-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-sds-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-sds-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infrastructure-asset-orchestrator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infrastructure-asset-orchestrator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-device-plugins-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-device-plugins-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-kubernetes-power-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-kubernetes-power-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-openshift-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-openshift-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8s-triliovault not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8s-triliovault/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-ati-updates not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-ati-updates/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-framework not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-framework/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-ingress not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-ingress/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-licensing not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-licensing/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-sso not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-sso/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-load-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-load-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-loadcore-agents not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-loadcore-agents/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nats-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nats-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nimbusmosaic-dusim not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nimbusmosaic-dusim/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-rest-api-browser-v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-rest-api-browser-v1/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-appsec not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-appsec/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-db/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-diagnostics not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-diagnostics/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-logging not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-logging/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-migration not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-migration/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-msg-broker not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-msg-broker/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-notifications not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-notifications/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-stats-dashboards not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-stats-dashboards/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-storage not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-storage/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-test-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-test-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-ui not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-ui/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-websocket-service not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-websocket-service/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kong-gateway-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kong-gateway-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubearmor-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubearmor-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lenovo-locd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lenovo-locd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memcached-operator-ogaye not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memcached-operator-ogaye/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memory-machine-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memory-machine-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-enterprise not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-enterprise/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netapp-spark-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netapp-spark-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-adm-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-adm-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-repository-ha-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-repository-ha-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nginx-ingress-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nginx-ingress-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nim-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nim-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxiq-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxiq-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxrm-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxrm-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odigos-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odigos-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/open-liberty-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/open-liberty-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftartifactoryha-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftartifactoryha-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftxray-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftxray-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/operator-certification-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/operator-certification-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pmem-csi-operator-os not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pmem-csi-operator-os/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-component-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-component-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-fabric-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-fabric-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sanstoragecsi-operator-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sanstoragecsi-operator-bundle/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/smilecdr-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/smilecdr-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sriov-fec not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sriov-fec/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-commons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-commons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-zookeeper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-zookeeper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-tsc-client-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-tsc-client-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tawon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tawon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tigera-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tigera-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-secrets-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-secrets-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vcp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vcp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/webotx-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/webotx-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/63709497 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/d966b7fd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/f5773757 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/81c9edb9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/57bf57ee not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/86f5e6aa not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/0aabe31d not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/d2af85c2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/09d157d9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acm-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acm-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acmpca-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acmpca-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigateway-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigateway-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigatewayv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigatewayv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-applicationautoscaling-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-applicationautoscaling-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-athena-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-athena-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudfront-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudfront-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudtrail-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudtrail-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatch-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatch-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatchlogs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatchlogs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-documentdb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-documentdb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-dynamodb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-dynamodb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ec2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ec2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecr-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecr-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-efs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-efs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eks-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eks-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elasticache-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elasticache-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elbv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elbv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-emrcontainers-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-emrcontainers-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eventbridge-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eventbridge-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-iam-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-iam-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kafka-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kafka-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-keyspaces-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-keyspaces-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kinesis-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kinesis-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kms-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kms-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-lambda-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-lambda-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-memorydb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-memorydb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-mq-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-mq-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-networkfirewall-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-networkfirewall-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-opensearchservice-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-opensearchservice-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-organizations-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-organizations-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-pipes-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-pipes-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-prometheusservice-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-prometheusservice-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-rds-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-rds-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-recyclebin-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-recyclebin-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53resolver-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53resolver-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-s3-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-s3-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sagemaker-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sagemaker-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-secretsmanager-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-secretsmanager-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ses-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ses-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sfn-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sfn-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sns-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sns-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sqs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sqs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ssm-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ssm-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-wafv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-wafv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airflow-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airflow-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloydb-omni-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloydb-omni-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alvearie-imaging-ingestion not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alvearie-imaging-ingestion/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amd-gpu-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amd-gpu-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/analytics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/analytics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/annotationlab not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/annotationlab/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-api-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-api-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apimatic-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apimatic-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/application-services-metering-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/application-services-metering-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/argocd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/argocd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/assisted-service-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/assisted-service-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/automotive-infra not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/automotive-infra/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-efs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-efs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/awss3-operator-registry not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/awss3-operator-registry/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/azure-service-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/azure-service-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/beegfs-csi-driver-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/beegfs-csi-driver-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-k not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-k/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-karavan-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-karavan-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator-community not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator-community/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-utils-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-utils-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-aas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-aas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-impairment-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-impairment-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/codeflare-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/codeflare-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-kubevirt-hyperconverged not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-kubevirt-hyperconverged/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-trivy-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-trivy-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-windows-machine-config-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-windows-machine-config-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/customized-user-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/customized-user-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cxl-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cxl-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dapr-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dapr-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datatrucker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datatrucker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dbaas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dbaas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/debezium-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/debezium-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/deployment-validation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/deployment-validation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devopsinabox not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devopsinabox/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-amlen-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-amlen-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-che not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-che/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ecr-secret-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ecr-secret-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edp-keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edp-keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/egressip-ipam-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/egressip-ipam-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ember-csi-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ember-csi-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/etcd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/etcd/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eventing-kogito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eventing-kogito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-secrets-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-secrets-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flink-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flink-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8gb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8gb/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fossul-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fossul-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/github-arc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/github-arc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitops-primer not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitops-primer/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitwebhook-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitwebhook-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/global-load-balancer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/global-load-balancer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/grafana-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/grafana-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/group-sync-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/group-sync-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hawtio-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hawtio-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hedvig-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hedvig-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hive-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hive-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/horreum-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/horreum-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hyperfoil-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hyperfoil-bundle/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator-community not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator-community/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-spectrum-scale-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-spectrum-scale-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibmcloud-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibmcloud-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infinispan not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infinispan/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/integrity-shield-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/integrity-shield-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ipfs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ipfs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/istio-workspace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/istio-workspace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kaoto-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kaoto-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keda not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keda/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keepalived-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keepalived-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-permissions-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-permissions-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/klusterlet not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/klusterlet/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/koku-metrics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/koku-metrics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/konveyor-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/konveyor-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/korrel8r not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/korrel8r/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kuadrant-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kuadrant-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kube-green not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kube-green/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubernetes-imagepuller-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubernetes-imagepuller-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/l5-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/l5-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/layer7-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/layer7-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lbconfig-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lbconfig-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lib-bucket-provisioner not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lib-bucket-provisioner/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/limitador-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/limitador-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logging-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logging-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mariadb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mariadb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marin3r not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marin3r/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mercury-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mercury-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/microcks not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/microcks/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/move2kube-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/move2kube-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multi-nic-cni-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multi-nic-cni-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-global-hub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-global-hub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-operators-subscription not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-operators-subscription/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/must-gather-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/must-gather-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/namespace-configuration-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/namespace-configuration-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ncn-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ncn-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ndmspc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ndmspc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator-m88i not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator-m88i/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nfs-provisioner-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nfs-provisioner-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nlp-server not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nlp-server/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-discovery-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-discovery-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nsm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nsm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oadp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oadp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oci-ccm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oci-ccm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odoo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odoo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opendatahub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opendatahub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openebs not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openebs/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-nfd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-nfd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-node-upgrade-mutex-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-node-upgrade-mutex-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-qiskit-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-qiskit-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patch-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patch-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patterns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patterns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pelorus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pelorus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/percona-xtradb-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/percona-xtradb-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-essentials not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-essentials/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/postgresql not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/postgresql/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/proactive-node-scaling-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/proactive-node-scaling-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/project-quay not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/project-quay/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus-exporter-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus-exporter-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pulp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pulp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-messaging-topology-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-messaging-topology-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/reportportal-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/reportportal-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/resource-locker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/resource-locker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhoas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhoas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ripsaw not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ripsaw/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sailoperator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sailoperator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-commerce-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-commerce-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-data-intelligence-observer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-data-intelligence-observer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-hana-express-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-hana-express-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-binding-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-binding-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/shipwright-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/shipwright-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sigstore-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sigstore-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snapscheduler not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snapscheduler/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snyk-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snyk-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/socmmd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/socmmd/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonar-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonar-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosivio not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosivio/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonataflow-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonataflow-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosreport-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosreport-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/spark-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/spark-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/special-resource-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/special-resource-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/strimzi-kafka-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/strimzi-kafka-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/syndesis not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/syndesis/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tagger not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tagger/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tf-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tf-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tidb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tidb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trident-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trident-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustify-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustify-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ucs-ci-solutions-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ucs-ci-solutions-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/universal-crossplane not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/universal-crossplane/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/varnish-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/varnish-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-config-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-config-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/verticadb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/verticadb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volume-expander-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volume-expander-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/wandb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/wandb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/windup-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/windup-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yaks not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yaks/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/c0fe7256 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/c30319e4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/e6b1dd45 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/2bb643f0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/920de426 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/70fa1e87 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/a1c12a2f not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/9442e6c7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/5b45ec72 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abot-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abot-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/entando-k8s-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/entando-k8s-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-paygo-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-paygo-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-term-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-term-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/linstor-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/linstor-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-deploy-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-deploy-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-paygo-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-paygo-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vfunction-server-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vfunction-server-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yugabyte-platform-operator-bundle-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yugabyte-platform-operator-bundle-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/3c9f3a59 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/1091c11b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/9a6821c6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/ec0c35e2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/517f37e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/6214fe78 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/ba189c8b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/351e4f31 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/c0f219ff not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/8069f607 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/559c3d82 not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/605ad488 not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/148df488 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/3bf6dcb4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/022a2feb not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/938c3924 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/729fe23e not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/1fd5cbd4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/a96697e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/e155ddca not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/10dd0e0f not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..2025_02_24_06_09_35.3018472960 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..2025_02_24_06_09_35.3018472960/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..2025_02_24_06_09_35.4262376737 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..2025_02_24_06_09_35.4262376737/audit.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/audit.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..2025_02_24_06_09_35.2630275752 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..2025_02_24_06_09_35.2630275752/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..2025_02_24_06_09_35.2376963788 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..2025_02_24_06_09_35.2376963788/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/containers/oauth-openshift/6f2c8392 not reset as customized by admin to system_u:object_r:container_file_t:s0:c267,c588 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/containers/oauth-openshift/bd241ad9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/plugins not reset as customized by admin to system_u:object_r:container_file_t:s0 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/plugins/csi-hostpath not reset as customized by admin to system_u:object_r:container_file_t:s0 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/plugins/csi-hostpath/csi.sock not reset as customized by admin to system_u:object_r:container_file_t:s0 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/plugins/kubernetes.io not reset as customized by admin to system_u:object_r:container_file_t:s0 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/plugins/kubernetes.io/csi not reset as customized by admin to system_u:object_r:container_file_t:s0 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner not reset as customized by admin to system_u:object_r:container_file_t:s0 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983 not reset as customized by admin to system_u:object_r:container_file_t:s0 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount not reset as customized by admin to system_u:object_r:container_file_t:s0 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/vol_data.json not reset as customized by admin to system_u:object_r:container_file_t:s0 Dec 11 08:15:51 crc restorecon[4680]: /var/lib/kubelet/plugins_registry not reset as customized by admin to system_u:object_r:container_file_t:s0 Dec 11 08:15:51 crc restorecon[4680]: Relabeled /var/usrlocal/bin/kubenswrapper from system_u:object_r:bin_t:s0 to system_u:object_r:kubelet_exec_t:s0 Dec 11 08:15:52 crc kubenswrapper[4881]: Flag --container-runtime-endpoint has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Dec 11 08:15:52 crc kubenswrapper[4881]: Flag --minimum-container-ttl-duration has been deprecated, Use --eviction-hard or --eviction-soft instead. Will be removed in a future version. Dec 11 08:15:52 crc kubenswrapper[4881]: Flag --volume-plugin-dir has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Dec 11 08:15:52 crc kubenswrapper[4881]: Flag --register-with-taints has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Dec 11 08:15:52 crc kubenswrapper[4881]: Flag --pod-infra-container-image has been deprecated, will be removed in a future release. Image garbage collector will get sandbox image information from CRI. Dec 11 08:15:52 crc kubenswrapper[4881]: Flag --system-reserved has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.553007 4881 server.go:211] "--pod-infra-container-image will not be pruned by the image garbage collector in kubelet and should also be set in the remote runtime" Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.556902 4881 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.556918 4881 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.556923 4881 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.556927 4881 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.556931 4881 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.556935 4881 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.556938 4881 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.556942 4881 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.556946 4881 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.556950 4881 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.556953 4881 feature_gate.go:330] unrecognized feature gate: OVNObservability Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.556957 4881 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.556961 4881 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.556974 4881 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.556979 4881 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.556982 4881 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.556986 4881 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.556989 4881 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.556993 4881 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.556996 4881 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.557000 4881 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.557003 4881 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.557008 4881 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.557013 4881 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.557017 4881 feature_gate.go:330] unrecognized feature gate: PinnedImages Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.557021 4881 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.557026 4881 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.557029 4881 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.557033 4881 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.557037 4881 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.557041 4881 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.557046 4881 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.557049 4881 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.557053 4881 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.557057 4881 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.557061 4881 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.557064 4881 feature_gate.go:330] unrecognized feature gate: PlatformOperators Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.557068 4881 feature_gate.go:330] unrecognized feature gate: InsightsConfig Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.557072 4881 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.557077 4881 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.557080 4881 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.557085 4881 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.557088 4881 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.557092 4881 feature_gate.go:330] unrecognized feature gate: NewOLM Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.557097 4881 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.557100 4881 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.557104 4881 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.557108 4881 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.557113 4881 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.557116 4881 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.557120 4881 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.557124 4881 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.557127 4881 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.557131 4881 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.557134 4881 feature_gate.go:330] unrecognized feature gate: Example Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.557137 4881 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.557141 4881 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.557144 4881 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.557148 4881 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.557151 4881 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.557155 4881 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.557158 4881 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.557163 4881 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.557166 4881 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.557170 4881 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.557173 4881 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.557177 4881 feature_gate.go:330] unrecognized feature gate: GatewayAPI Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.557180 4881 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.557184 4881 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.557187 4881 feature_gate.go:330] unrecognized feature gate: SignatureStores Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.557191 4881 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.557267 4881 flags.go:64] FLAG: --address="0.0.0.0" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.557277 4881 flags.go:64] FLAG: --allowed-unsafe-sysctls="[]" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.557284 4881 flags.go:64] FLAG: --anonymous-auth="true" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.557291 4881 flags.go:64] FLAG: --application-metrics-count-limit="100" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.557298 4881 flags.go:64] FLAG: --authentication-token-webhook="false" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.557302 4881 flags.go:64] FLAG: --authentication-token-webhook-cache-ttl="2m0s" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.557307 4881 flags.go:64] FLAG: --authorization-mode="AlwaysAllow" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.557312 4881 flags.go:64] FLAG: --authorization-webhook-cache-authorized-ttl="5m0s" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.557316 4881 flags.go:64] FLAG: --authorization-webhook-cache-unauthorized-ttl="30s" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.557321 4881 flags.go:64] FLAG: --boot-id-file="/proc/sys/kernel/random/boot_id" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.557325 4881 flags.go:64] FLAG: --bootstrap-kubeconfig="/etc/kubernetes/kubeconfig" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.557342 4881 flags.go:64] FLAG: --cert-dir="/var/lib/kubelet/pki" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.557347 4881 flags.go:64] FLAG: --cgroup-driver="cgroupfs" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.557351 4881 flags.go:64] FLAG: --cgroup-root="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.557355 4881 flags.go:64] FLAG: --cgroups-per-qos="true" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.557359 4881 flags.go:64] FLAG: --client-ca-file="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.557364 4881 flags.go:64] FLAG: --cloud-config="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.557368 4881 flags.go:64] FLAG: --cloud-provider="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.557372 4881 flags.go:64] FLAG: --cluster-dns="[]" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.557376 4881 flags.go:64] FLAG: --cluster-domain="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.557380 4881 flags.go:64] FLAG: --config="/etc/kubernetes/kubelet.conf" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.557385 4881 flags.go:64] FLAG: --config-dir="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.557389 4881 flags.go:64] FLAG: --container-hints="/etc/cadvisor/container_hints.json" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.557393 4881 flags.go:64] FLAG: --container-log-max-files="5" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.557399 4881 flags.go:64] FLAG: --container-log-max-size="10Mi" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.557403 4881 flags.go:64] FLAG: --container-runtime-endpoint="/var/run/crio/crio.sock" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.557407 4881 flags.go:64] FLAG: --containerd="/run/containerd/containerd.sock" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.557411 4881 flags.go:64] FLAG: --containerd-namespace="k8s.io" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.557415 4881 flags.go:64] FLAG: --contention-profiling="false" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.557419 4881 flags.go:64] FLAG: --cpu-cfs-quota="true" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.557424 4881 flags.go:64] FLAG: --cpu-cfs-quota-period="100ms" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.557428 4881 flags.go:64] FLAG: --cpu-manager-policy="none" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.557435 4881 flags.go:64] FLAG: --cpu-manager-policy-options="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.557440 4881 flags.go:64] FLAG: --cpu-manager-reconcile-period="10s" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.557444 4881 flags.go:64] FLAG: --enable-controller-attach-detach="true" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.557448 4881 flags.go:64] FLAG: --enable-debugging-handlers="true" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.557452 4881 flags.go:64] FLAG: --enable-load-reader="false" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.557456 4881 flags.go:64] FLAG: --enable-server="true" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.557460 4881 flags.go:64] FLAG: --enforce-node-allocatable="[pods]" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.557466 4881 flags.go:64] FLAG: --event-burst="100" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.557470 4881 flags.go:64] FLAG: --event-qps="50" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.557474 4881 flags.go:64] FLAG: --event-storage-age-limit="default=0" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.557478 4881 flags.go:64] FLAG: --event-storage-event-limit="default=0" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.557482 4881 flags.go:64] FLAG: --eviction-hard="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.557487 4881 flags.go:64] FLAG: --eviction-max-pod-grace-period="0" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.557492 4881 flags.go:64] FLAG: --eviction-minimum-reclaim="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.557496 4881 flags.go:64] FLAG: --eviction-pressure-transition-period="5m0s" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.557500 4881 flags.go:64] FLAG: --eviction-soft="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.557504 4881 flags.go:64] FLAG: --eviction-soft-grace-period="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.557508 4881 flags.go:64] FLAG: --exit-on-lock-contention="false" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.557511 4881 flags.go:64] FLAG: --experimental-allocatable-ignore-eviction="false" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.557516 4881 flags.go:64] FLAG: --experimental-mounter-path="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.557519 4881 flags.go:64] FLAG: --fail-cgroupv1="false" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.557523 4881 flags.go:64] FLAG: --fail-swap-on="true" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.557527 4881 flags.go:64] FLAG: --feature-gates="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.557532 4881 flags.go:64] FLAG: --file-check-frequency="20s" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.557536 4881 flags.go:64] FLAG: --global-housekeeping-interval="1m0s" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.557540 4881 flags.go:64] FLAG: --hairpin-mode="promiscuous-bridge" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.557544 4881 flags.go:64] FLAG: --healthz-bind-address="127.0.0.1" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.557548 4881 flags.go:64] FLAG: --healthz-port="10248" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.557553 4881 flags.go:64] FLAG: --help="false" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.557557 4881 flags.go:64] FLAG: --hostname-override="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.557561 4881 flags.go:64] FLAG: --housekeeping-interval="10s" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.557565 4881 flags.go:64] FLAG: --http-check-frequency="20s" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.557570 4881 flags.go:64] FLAG: --image-credential-provider-bin-dir="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.557574 4881 flags.go:64] FLAG: --image-credential-provider-config="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.557578 4881 flags.go:64] FLAG: --image-gc-high-threshold="85" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.557582 4881 flags.go:64] FLAG: --image-gc-low-threshold="80" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.557587 4881 flags.go:64] FLAG: --image-service-endpoint="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.557591 4881 flags.go:64] FLAG: --kernel-memcg-notification="false" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.557595 4881 flags.go:64] FLAG: --kube-api-burst="100" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.557599 4881 flags.go:64] FLAG: --kube-api-content-type="application/vnd.kubernetes.protobuf" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.557603 4881 flags.go:64] FLAG: --kube-api-qps="50" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.557607 4881 flags.go:64] FLAG: --kube-reserved="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.557611 4881 flags.go:64] FLAG: --kube-reserved-cgroup="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.557615 4881 flags.go:64] FLAG: --kubeconfig="/var/lib/kubelet/kubeconfig" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.557619 4881 flags.go:64] FLAG: --kubelet-cgroups="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.557623 4881 flags.go:64] FLAG: --local-storage-capacity-isolation="true" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.557627 4881 flags.go:64] FLAG: --lock-file="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.557630 4881 flags.go:64] FLAG: --log-cadvisor-usage="false" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.557634 4881 flags.go:64] FLAG: --log-flush-frequency="5s" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.557638 4881 flags.go:64] FLAG: --log-json-info-buffer-size="0" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.557645 4881 flags.go:64] FLAG: --log-json-split-stream="false" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.557648 4881 flags.go:64] FLAG: --log-text-info-buffer-size="0" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.557652 4881 flags.go:64] FLAG: --log-text-split-stream="false" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.557656 4881 flags.go:64] FLAG: --logging-format="text" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.557661 4881 flags.go:64] FLAG: --machine-id-file="/etc/machine-id,/var/lib/dbus/machine-id" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.557665 4881 flags.go:64] FLAG: --make-iptables-util-chains="true" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.557669 4881 flags.go:64] FLAG: --manifest-url="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.557673 4881 flags.go:64] FLAG: --manifest-url-header="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.557678 4881 flags.go:64] FLAG: --max-housekeeping-interval="15s" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.557707 4881 flags.go:64] FLAG: --max-open-files="1000000" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.557712 4881 flags.go:64] FLAG: --max-pods="110" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.557716 4881 flags.go:64] FLAG: --maximum-dead-containers="-1" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.557721 4881 flags.go:64] FLAG: --maximum-dead-containers-per-container="1" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.557725 4881 flags.go:64] FLAG: --memory-manager-policy="None" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.557730 4881 flags.go:64] FLAG: --minimum-container-ttl-duration="6m0s" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.557734 4881 flags.go:64] FLAG: --minimum-image-ttl-duration="2m0s" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.557738 4881 flags.go:64] FLAG: --node-ip="192.168.126.11" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.557742 4881 flags.go:64] FLAG: --node-labels="node-role.kubernetes.io/control-plane=,node-role.kubernetes.io/master=,node.openshift.io/os_id=rhcos" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.557752 4881 flags.go:64] FLAG: --node-status-max-images="50" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.557756 4881 flags.go:64] FLAG: --node-status-update-frequency="10s" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.557760 4881 flags.go:64] FLAG: --oom-score-adj="-999" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.557764 4881 flags.go:64] FLAG: --pod-cidr="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.557769 4881 flags.go:64] FLAG: --pod-infra-container-image="quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:33549946e22a9ffa738fd94b1345f90921bc8f92fa6137784cb33c77ad806f9d" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.557775 4881 flags.go:64] FLAG: --pod-manifest-path="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.557779 4881 flags.go:64] FLAG: --pod-max-pids="-1" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.557783 4881 flags.go:64] FLAG: --pods-per-core="0" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.557790 4881 flags.go:64] FLAG: --port="10250" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.557795 4881 flags.go:64] FLAG: --protect-kernel-defaults="false" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.557799 4881 flags.go:64] FLAG: --provider-id="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.557802 4881 flags.go:64] FLAG: --qos-reserved="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.557806 4881 flags.go:64] FLAG: --read-only-port="10255" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.557811 4881 flags.go:64] FLAG: --register-node="true" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.557815 4881 flags.go:64] FLAG: --register-schedulable="true" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.557818 4881 flags.go:64] FLAG: --register-with-taints="node-role.kubernetes.io/master=:NoSchedule" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.557826 4881 flags.go:64] FLAG: --registry-burst="10" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.557830 4881 flags.go:64] FLAG: --registry-qps="5" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.557834 4881 flags.go:64] FLAG: --reserved-cpus="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.557837 4881 flags.go:64] FLAG: --reserved-memory="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.557843 4881 flags.go:64] FLAG: --resolv-conf="/etc/resolv.conf" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.557847 4881 flags.go:64] FLAG: --root-dir="/var/lib/kubelet" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.557851 4881 flags.go:64] FLAG: --rotate-certificates="false" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.557855 4881 flags.go:64] FLAG: --rotate-server-certificates="false" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.557859 4881 flags.go:64] FLAG: --runonce="false" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.557863 4881 flags.go:64] FLAG: --runtime-cgroups="/system.slice/crio.service" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.557867 4881 flags.go:64] FLAG: --runtime-request-timeout="2m0s" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.557872 4881 flags.go:64] FLAG: --seccomp-default="false" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.557876 4881 flags.go:64] FLAG: --serialize-image-pulls="true" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.557880 4881 flags.go:64] FLAG: --storage-driver-buffer-duration="1m0s" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.557884 4881 flags.go:64] FLAG: --storage-driver-db="cadvisor" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.557888 4881 flags.go:64] FLAG: --storage-driver-host="localhost:8086" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.557893 4881 flags.go:64] FLAG: --storage-driver-password="root" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.557896 4881 flags.go:64] FLAG: --storage-driver-secure="false" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.557901 4881 flags.go:64] FLAG: --storage-driver-table="stats" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.557905 4881 flags.go:64] FLAG: --storage-driver-user="root" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.557909 4881 flags.go:64] FLAG: --streaming-connection-idle-timeout="4h0m0s" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.557913 4881 flags.go:64] FLAG: --sync-frequency="1m0s" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.557917 4881 flags.go:64] FLAG: --system-cgroups="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.557921 4881 flags.go:64] FLAG: --system-reserved="cpu=200m,ephemeral-storage=350Mi,memory=350Mi" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.557927 4881 flags.go:64] FLAG: --system-reserved-cgroup="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.557931 4881 flags.go:64] FLAG: --tls-cert-file="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.557935 4881 flags.go:64] FLAG: --tls-cipher-suites="[]" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.557940 4881 flags.go:64] FLAG: --tls-min-version="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.557944 4881 flags.go:64] FLAG: --tls-private-key-file="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.557948 4881 flags.go:64] FLAG: --topology-manager-policy="none" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.557952 4881 flags.go:64] FLAG: --topology-manager-policy-options="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.557956 4881 flags.go:64] FLAG: --topology-manager-scope="container" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.557960 4881 flags.go:64] FLAG: --v="2" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.557965 4881 flags.go:64] FLAG: --version="false" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.557971 4881 flags.go:64] FLAG: --vmodule="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.557975 4881 flags.go:64] FLAG: --volume-plugin-dir="/etc/kubernetes/kubelet-plugins/volume/exec" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.557980 4881 flags.go:64] FLAG: --volume-stats-agg-period="1m0s" Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.558079 4881 feature_gate.go:330] unrecognized feature gate: PinnedImages Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.558083 4881 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.558087 4881 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.558092 4881 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.558096 4881 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.558100 4881 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.558105 4881 feature_gate.go:330] unrecognized feature gate: SignatureStores Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.558109 4881 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.558113 4881 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.558116 4881 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.558120 4881 feature_gate.go:330] unrecognized feature gate: InsightsConfig Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.558123 4881 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.558127 4881 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.558131 4881 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.558134 4881 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.558137 4881 feature_gate.go:330] unrecognized feature gate: OVNObservability Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.558141 4881 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.558145 4881 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.558149 4881 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.558153 4881 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.558157 4881 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.558161 4881 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.558164 4881 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.558168 4881 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.558172 4881 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.558176 4881 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.558179 4881 feature_gate.go:330] unrecognized feature gate: NewOLM Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.558183 4881 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.558188 4881 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.558192 4881 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.558196 4881 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.558200 4881 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.558204 4881 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.558208 4881 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.558212 4881 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.558216 4881 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.558220 4881 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.558224 4881 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.558228 4881 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.558233 4881 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.558236 4881 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.558240 4881 feature_gate.go:330] unrecognized feature gate: GatewayAPI Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.558244 4881 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.558247 4881 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.558251 4881 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.558256 4881 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.558260 4881 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.558264 4881 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.558268 4881 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.558272 4881 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.558275 4881 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.558279 4881 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.558282 4881 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.558286 4881 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.558290 4881 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.558298 4881 feature_gate.go:330] unrecognized feature gate: Example Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.558301 4881 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.558305 4881 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.558308 4881 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.558315 4881 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.558319 4881 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.558322 4881 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.558326 4881 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.558346 4881 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.558350 4881 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.558354 4881 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.558357 4881 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.558361 4881 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.558364 4881 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.558368 4881 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.558371 4881 feature_gate.go:330] unrecognized feature gate: PlatformOperators Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.558377 4881 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.567535 4881 server.go:491] "Kubelet version" kubeletVersion="v1.31.5" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.567570 4881 server.go:493] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK="" Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.567644 4881 feature_gate.go:330] unrecognized feature gate: NewOLM Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.567652 4881 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.567659 4881 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.567668 4881 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.567677 4881 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.567683 4881 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.567689 4881 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.567695 4881 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.567700 4881 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.567705 4881 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.567710 4881 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.567715 4881 feature_gate.go:330] unrecognized feature gate: Example Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.567720 4881 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.567726 4881 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.567731 4881 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.567736 4881 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.567742 4881 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.567747 4881 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.567754 4881 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.567761 4881 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.567767 4881 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.567773 4881 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.567778 4881 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.567784 4881 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.567789 4881 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.567795 4881 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.567801 4881 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.567806 4881 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.567812 4881 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.567817 4881 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.567822 4881 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.567827 4881 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.567834 4881 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.567840 4881 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.567846 4881 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.567851 4881 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.567857 4881 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.567862 4881 feature_gate.go:330] unrecognized feature gate: OVNObservability Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.567867 4881 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.567872 4881 feature_gate.go:330] unrecognized feature gate: PinnedImages Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.567877 4881 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.567882 4881 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.567887 4881 feature_gate.go:330] unrecognized feature gate: GatewayAPI Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.567892 4881 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.567898 4881 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.567903 4881 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.567908 4881 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.567914 4881 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.567919 4881 feature_gate.go:330] unrecognized feature gate: SignatureStores Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.567927 4881 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.567932 4881 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.567937 4881 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.567942 4881 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.567948 4881 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.567953 4881 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.567958 4881 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.567963 4881 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.567968 4881 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.567973 4881 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.567978 4881 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.567984 4881 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.567989 4881 feature_gate.go:330] unrecognized feature gate: PlatformOperators Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.567994 4881 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.567999 4881 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.568004 4881 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.568010 4881 feature_gate.go:330] unrecognized feature gate: InsightsConfig Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.568015 4881 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.568020 4881 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.568025 4881 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.568030 4881 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.568036 4881 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.568054 4881 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.568199 4881 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.568209 4881 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.568217 4881 feature_gate.go:330] unrecognized feature gate: NewOLM Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.568224 4881 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.568230 4881 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.568236 4881 feature_gate.go:330] unrecognized feature gate: GatewayAPI Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.568241 4881 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.568246 4881 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.568252 4881 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.568257 4881 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.568262 4881 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.568267 4881 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.568272 4881 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.568277 4881 feature_gate.go:330] unrecognized feature gate: Example Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.568283 4881 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.568288 4881 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.568293 4881 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.568298 4881 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.568303 4881 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.568309 4881 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.568314 4881 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.568319 4881 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.568324 4881 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.568348 4881 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.568354 4881 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.568360 4881 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.568365 4881 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.568370 4881 feature_gate.go:330] unrecognized feature gate: InsightsConfig Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.568376 4881 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.568383 4881 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.568388 4881 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.568393 4881 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.568398 4881 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.568403 4881 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.568409 4881 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.568415 4881 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.568420 4881 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.568426 4881 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.568431 4881 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.568436 4881 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.568441 4881 feature_gate.go:330] unrecognized feature gate: OVNObservability Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.568447 4881 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.568452 4881 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.568457 4881 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.568462 4881 feature_gate.go:330] unrecognized feature gate: PinnedImages Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.568467 4881 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.568472 4881 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.568477 4881 feature_gate.go:330] unrecognized feature gate: SignatureStores Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.568482 4881 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.568487 4881 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.568493 4881 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.568498 4881 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.568503 4881 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.568509 4881 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.568516 4881 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.568523 4881 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.568528 4881 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.568535 4881 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.568540 4881 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.568545 4881 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.568550 4881 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.568556 4881 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.568561 4881 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.568566 4881 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.568573 4881 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.568579 4881 feature_gate.go:330] unrecognized feature gate: PlatformOperators Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.568585 4881 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.568592 4881 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.568598 4881 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.568604 4881 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.568610 4881 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.568619 4881 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.568764 4881 server.go:940] "Client rotation is on, will bootstrap in background" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.573249 4881 bootstrap.go:85] "Current kubeconfig file contents are still valid, no bootstrap necessary" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.573382 4881 certificate_store.go:130] Loading cert/key pair from "/var/lib/kubelet/pki/kubelet-client-current.pem". Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.574070 4881 server.go:997] "Starting client certificate rotation" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.574092 4881 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate rotation is enabled Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.574250 4881 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate expiration is 2026-02-24 05:52:08 +0000 UTC, rotation deadline is 2025-12-14 11:25:48.679661955 +0000 UTC Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.574326 4881 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Waiting 75h9m56.105339572s for next certificate rotation Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.686421 4881 dynamic_cafile_content.go:123] "Loaded a new CA Bundle and Verifier" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.688276 4881 dynamic_cafile_content.go:161] "Starting controller" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.706579 4881 log.go:25] "Validated CRI v1 runtime API" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.763661 4881 log.go:25] "Validated CRI v1 image API" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.765164 4881 server.go:1437] "Using cgroup driver setting received from the CRI runtime" cgroupDriver="systemd" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.769611 4881 fs.go:133] Filesystem UUIDs: map[0b076daa-c26a-46d2-b3a6-72a8dbc6e257:/dev/vda4 2025-12-11-08-10-49-00:/dev/sr0 7B77-95E7:/dev/vda2 de0497b0-db1b-465a-b278-03db02455c71:/dev/vda3] Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.769662 4881 fs.go:134] Filesystem partitions: map[/dev/shm:{mountpoint:/dev/shm major:0 minor:22 fsType:tmpfs blockSize:0} /dev/vda3:{mountpoint:/boot major:252 minor:3 fsType:ext4 blockSize:0} /dev/vda4:{mountpoint:/var major:252 minor:4 fsType:xfs blockSize:0} /run:{mountpoint:/run major:0 minor:24 fsType:tmpfs blockSize:0} /run/user/1000:{mountpoint:/run/user/1000 major:0 minor:42 fsType:tmpfs blockSize:0} /tmp:{mountpoint:/tmp major:0 minor:30 fsType:tmpfs blockSize:0} /var/lib/etcd:{mountpoint:/var/lib/etcd major:0 minor:41 fsType:tmpfs blockSize:0}] Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.814405 4881 manager.go:217] Machine: {Timestamp:2025-12-11 08:15:52.794919626 +0000 UTC m=+1.172288403 CPUVendorID:AuthenticAMD NumCores:12 NumPhysicalCores:1 NumSockets:12 CpuFrequency:2799998 MemoryCapacity:33654128640 SwapCapacity:0 MemoryByType:map[] NVMInfo:{MemoryModeCapacity:0 AppDirectModeCapacity:0 AvgPowerBudget:0} HugePages:[{PageSize:1048576 NumPages:0} {PageSize:2048 NumPages:0}] MachineID:21801e6708c44f15b81395eb736a7cec SystemUUID:fece3d29-5045-4c4f-98be-52739a921bd2 BootID:46a6300e-52c1-447e-8230-6662e62288c7 Filesystems:[{Device:/run/user/1000 DeviceMajor:0 DeviceMinor:42 Capacity:3365412864 Type:vfs Inodes:821634 HasInodes:true} {Device:/var/lib/etcd DeviceMajor:0 DeviceMinor:41 Capacity:1073741824 Type:vfs Inodes:4108170 HasInodes:true} {Device:/dev/shm DeviceMajor:0 DeviceMinor:22 Capacity:16827064320 Type:vfs Inodes:4108170 HasInodes:true} {Device:/run DeviceMajor:0 DeviceMinor:24 Capacity:6730825728 Type:vfs Inodes:819200 HasInodes:true} {Device:/dev/vda4 DeviceMajor:252 DeviceMinor:4 Capacity:85292941312 Type:vfs Inodes:41679680 HasInodes:true} {Device:/tmp DeviceMajor:0 DeviceMinor:30 Capacity:16827064320 Type:vfs Inodes:1048576 HasInodes:true} {Device:/dev/vda3 DeviceMajor:252 DeviceMinor:3 Capacity:366869504 Type:vfs Inodes:98304 HasInodes:true}] DiskMap:map[252:0:{Name:vda Major:252 Minor:0 Size:214748364800 Scheduler:none}] NetworkDevices:[{Name:br-ex MacAddress:fa:16:3e:7a:83:9e Speed:0 Mtu:1500} {Name:br-int MacAddress:d6:39:55:2e:22:71 Speed:0 Mtu:1400} {Name:ens3 MacAddress:fa:16:3e:7a:83:9e Speed:-1 Mtu:1500} {Name:ens7 MacAddress:fa:16:3e:cd:cf:82 Speed:-1 Mtu:1500} {Name:ens7.20 MacAddress:52:54:00:14:51:b3 Speed:-1 Mtu:1496} {Name:ens7.21 MacAddress:52:54:00:f0:4b:ca Speed:-1 Mtu:1496} {Name:ens7.22 MacAddress:52:54:00:f6:25:50 Speed:-1 Mtu:1496} {Name:eth10 MacAddress:96:ca:4f:f2:62:8b Speed:0 Mtu:1500} {Name:ovn-k8s-mp0 MacAddress:0a:58:0a:d9:00:02 Speed:0 Mtu:1400} {Name:ovs-system MacAddress:02:cc:09:7d:ba:54 Speed:0 Mtu:1500}] Topology:[{Id:0 Memory:33654128640 HugePages:[{PageSize:1048576 NumPages:0} {PageSize:2048 NumPages:0}] Cores:[{Id:0 Threads:[0] Caches:[{Id:0 Size:32768 Type:Data Level:1} {Id:0 Size:32768 Type:Instruction Level:1} {Id:0 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:0 Size:16777216 Type:Unified Level:3}] SocketID:0 BookID: DrawerID:} {Id:0 Threads:[1] Caches:[{Id:1 Size:32768 Type:Data Level:1} {Id:1 Size:32768 Type:Instruction Level:1} {Id:1 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:1 Size:16777216 Type:Unified Level:3}] SocketID:1 BookID: DrawerID:} {Id:0 Threads:[10] Caches:[{Id:10 Size:32768 Type:Data Level:1} {Id:10 Size:32768 Type:Instruction Level:1} {Id:10 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:10 Size:16777216 Type:Unified Level:3}] SocketID:10 BookID: DrawerID:} {Id:0 Threads:[11] Caches:[{Id:11 Size:32768 Type:Data Level:1} {Id:11 Size:32768 Type:Instruction Level:1} {Id:11 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:11 Size:16777216 Type:Unified Level:3}] SocketID:11 BookID: DrawerID:} {Id:0 Threads:[2] Caches:[{Id:2 Size:32768 Type:Data Level:1} {Id:2 Size:32768 Type:Instruction Level:1} {Id:2 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:2 Size:16777216 Type:Unified Level:3}] SocketID:2 BookID: DrawerID:} {Id:0 Threads:[3] Caches:[{Id:3 Size:32768 Type:Data Level:1} {Id:3 Size:32768 Type:Instruction Level:1} {Id:3 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:3 Size:16777216 Type:Unified Level:3}] SocketID:3 BookID: DrawerID:} {Id:0 Threads:[4] Caches:[{Id:4 Size:32768 Type:Data Level:1} {Id:4 Size:32768 Type:Instruction Level:1} {Id:4 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:4 Size:16777216 Type:Unified Level:3}] SocketID:4 BookID: DrawerID:} {Id:0 Threads:[5] Caches:[{Id:5 Size:32768 Type:Data Level:1} {Id:5 Size:32768 Type:Instruction Level:1} {Id:5 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:5 Size:16777216 Type:Unified Level:3}] SocketID:5 BookID: DrawerID:} {Id:0 Threads:[6] Caches:[{Id:6 Size:32768 Type:Data Level:1} {Id:6 Size:32768 Type:Instruction Level:1} {Id:6 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:6 Size:16777216 Type:Unified Level:3}] SocketID:6 BookID: DrawerID:} {Id:0 Threads:[7] Caches:[{Id:7 Size:32768 Type:Data Level:1} {Id:7 Size:32768 Type:Instruction Level:1} {Id:7 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:7 Size:16777216 Type:Unified Level:3}] SocketID:7 BookID: DrawerID:} {Id:0 Threads:[8] Caches:[{Id:8 Size:32768 Type:Data Level:1} {Id:8 Size:32768 Type:Instruction Level:1} {Id:8 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:8 Size:16777216 Type:Unified Level:3}] SocketID:8 BookID: DrawerID:} {Id:0 Threads:[9] Caches:[{Id:9 Size:32768 Type:Data Level:1} {Id:9 Size:32768 Type:Instruction Level:1} {Id:9 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:9 Size:16777216 Type:Unified Level:3}] SocketID:9 BookID: DrawerID:}] Caches:[] Distances:[10]}] CloudProvider:Unknown InstanceType:Unknown InstanceID:None} Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.814834 4881 manager_no_libpfm.go:29] cAdvisor is build without cgo and/or libpfm support. Perf event counters are not available. Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.815187 4881 manager.go:233] Version: {KernelVersion:5.14.0-427.50.2.el9_4.x86_64 ContainerOsVersion:Red Hat Enterprise Linux CoreOS 418.94.202502100215-0 DockerVersion: DockerAPIVersion: CadvisorVersion: CadvisorRevision:} Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.815829 4881 swap_util.go:113] "Swap is on" /proc/swaps contents="Filename\t\t\t\tType\t\tSize\t\tUsed\t\tPriority" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.816122 4881 container_manager_linux.go:267] "Container manager verified user specified cgroup-root exists" cgroupRoot=[] Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.816175 4881 container_manager_linux.go:272] "Creating Container Manager object based on Node Config" nodeConfig={"NodeName":"crc","RuntimeCgroupsName":"/system.slice/crio.service","SystemCgroupsName":"/system.slice","KubeletCgroupsName":"","KubeletOOMScoreAdj":-999,"ContainerRuntime":"","CgroupsPerQOS":true,"CgroupRoot":"/","CgroupDriver":"systemd","KubeletRootDir":"/var/lib/kubelet","ProtectKernelDefaults":true,"KubeReservedCgroupName":"","SystemReservedCgroupName":"","ReservedSystemCPUs":{},"EnforceNodeAllocatable":{"pods":{}},"KubeReserved":null,"SystemReserved":{"cpu":"200m","ephemeral-storage":"350Mi","memory":"350Mi"},"HardEvictionThresholds":[{"Signal":"nodefs.inodesFree","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.05},"GracePeriod":0,"MinReclaim":null},{"Signal":"imagefs.available","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.15},"GracePeriod":0,"MinReclaim":null},{"Signal":"imagefs.inodesFree","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.05},"GracePeriod":0,"MinReclaim":null},{"Signal":"memory.available","Operator":"LessThan","Value":{"Quantity":"100Mi","Percentage":0},"GracePeriod":0,"MinReclaim":null},{"Signal":"nodefs.available","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.1},"GracePeriod":0,"MinReclaim":null}],"QOSReserved":{},"CPUManagerPolicy":"none","CPUManagerPolicyOptions":null,"TopologyManagerScope":"container","CPUManagerReconcilePeriod":10000000000,"ExperimentalMemoryManagerPolicy":"None","ExperimentalMemoryManagerReservedMemory":null,"PodPidsLimit":4096,"EnforceCPULimits":true,"CPUCFSQuotaPeriod":100000000,"TopologyManagerPolicy":"none","TopologyManagerPolicyOptions":null,"CgroupVersion":2} Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.816511 4881 topology_manager.go:138] "Creating topology manager with none policy" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.816530 4881 container_manager_linux.go:303] "Creating device plugin manager" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.816893 4881 manager.go:142] "Creating Device Plugin manager" path="/var/lib/kubelet/device-plugins/kubelet.sock" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.816953 4881 server.go:66] "Creating device plugin registration server" version="v1beta1" socket="/var/lib/kubelet/device-plugins/kubelet.sock" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.817383 4881 state_mem.go:36] "Initialized new in-memory state store" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.818105 4881 server.go:1245] "Using root directory" path="/var/lib/kubelet" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.868214 4881 kubelet.go:418] "Attempting to sync node with API server" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.868282 4881 kubelet.go:313] "Adding static pod path" path="/etc/kubernetes/manifests" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.868384 4881 file.go:69] "Watching path" path="/etc/kubernetes/manifests" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.868425 4881 kubelet.go:324] "Adding apiserver pod source" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.868507 4881 apiserver.go:42] "Waiting for node sync before watching apiserver pods" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.874495 4881 kuberuntime_manager.go:262] "Container runtime initialized" containerRuntime="cri-o" version="1.31.5-4.rhaos4.18.gitdad78d5.el9" apiVersion="v1" Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.874725 4881 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.20:6443: connect: connection refused Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.874737 4881 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.20:6443: connect: connection refused Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.874911 4881 certificate_store.go:130] Loading cert/key pair from "/var/lib/kubelet/pki/kubelet-server-current.pem". Dec 11 08:15:52 crc kubenswrapper[4881]: E1211 08:15:52.874930 4881 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.20:6443: connect: connection refused" logger="UnhandledError" Dec 11 08:15:52 crc kubenswrapper[4881]: E1211 08:15:52.874931 4881 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.20:6443: connect: connection refused" logger="UnhandledError" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.875473 4881 kubelet.go:854] "Not starting ClusterTrustBundle informer because we are in static kubelet mode" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.876067 4881 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/portworx-volume" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.876093 4881 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/empty-dir" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.876100 4881 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/git-repo" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.876107 4881 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/host-path" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.876119 4881 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/nfs" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.876130 4881 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/secret" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.876140 4881 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/iscsi" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.876152 4881 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/downward-api" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.876163 4881 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/fc" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.876172 4881 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/configmap" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.876184 4881 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/projected" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.876192 4881 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/local-volume" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.876599 4881 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/csi" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.877058 4881 server.go:1280] "Started kubelet" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.878054 4881 server.go:163] "Starting to listen" address="0.0.0.0" port=10250 Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.878052 4881 ratelimit.go:55] "Setting rate limiting for endpoint" service="podresources" qps=100 burstTokens=10 Dec 11 08:15:52 crc systemd[1]: Started Kubernetes Kubelet. Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.879075 4881 server.go:236] "Starting to serve the podresources API" endpoint="unix:/var/lib/kubelet/pod-resources/kubelet.sock" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.878846 4881 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.20:6443: connect: connection refused Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.880841 4881 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate rotation is enabled Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.880901 4881 fs_resource_analyzer.go:67] "Starting FS ResourceAnalyzer" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.881130 4881 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-05 05:44:47.902808063 +0000 UTC Dec 11 08:15:52 crc kubenswrapper[4881]: E1211 08:15:52.881390 4881 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.881402 4881 server.go:460] "Adding debug handlers to kubelet server" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.881569 4881 desired_state_of_world_populator.go:146] "Desired state populator starts to run" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.881404 4881 volume_manager.go:287] "The desired_state_of_world populator starts" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.882053 4881 volume_manager.go:289] "Starting Kubelet Volume Manager" Dec 11 08:15:52 crc kubenswrapper[4881]: W1211 08:15:52.882071 4881 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.20:6443: connect: connection refused Dec 11 08:15:52 crc kubenswrapper[4881]: E1211 08:15:52.882193 4881 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.20:6443: connect: connection refused" interval="200ms" Dec 11 08:15:52 crc kubenswrapper[4881]: E1211 08:15:52.882159 4881 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.20:6443: connect: connection refused" logger="UnhandledError" Dec 11 08:15:52 crc kubenswrapper[4881]: E1211 08:15:52.884295 4881 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": dial tcp 38.102.83.20:6443: connect: connection refused" event="&Event{ObjectMeta:{crc.18801b3164296bf9 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:Starting,Message:Starting kubelet.,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-12-11 08:15:52.877018105 +0000 UTC m=+1.254386802,LastTimestamp:2025-12-11 08:15:52.877018105 +0000 UTC m=+1.254386802,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.885508 4881 factory.go:55] Registering systemd factory Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.885536 4881 factory.go:221] Registration of the systemd container factory successfully Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.885881 4881 factory.go:153] Registering CRI-O factory Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.885916 4881 factory.go:221] Registration of the crio container factory successfully Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.886033 4881 factory.go:219] Registration of the containerd container factory failed: unable to create containerd client: containerd: cannot unix dial containerd api service: dial unix /run/containerd/containerd.sock: connect: no such file or directory Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.886084 4881 factory.go:103] Registering Raw factory Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.886109 4881 manager.go:1196] Started watching for new ooms in manager Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.887131 4881 manager.go:319] Starting recovery of all containers Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.896684 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.896781 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" volumeName="kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.897164 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.897192 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.897219 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="37a5e44f-9a88-4405-be8a-b645485e7312" volumeName="kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.897255 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.897280 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.897543 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6731426b-95fe-49ff-bb5f-40441049fde2" volumeName="kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.897630 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d75a4c96-2883-4a0b-bab2-0fab2b6c0b49" volumeName="kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.899669 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.899740 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.899830 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.899893 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.899958 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.900038 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.900108 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.900171 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.900230 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.900291 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.900365 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.900433 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.900493 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.900559 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.900632 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.900691 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.900754 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.900816 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.900883 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.900944 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.901006 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.901063 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.901127 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.901191 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.901250 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.901307 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.901380 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.901443 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.903229 4881 reconstruct.go:144] "Volume is marked device as uncertain and added into the actual state" volumeName="kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" deviceMountPath="/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.903381 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.903530 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.903599 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.903659 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.903717 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.903781 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.903837 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.903927 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d75a4c96-2883-4a0b-bab2-0fab2b6c0b49" volumeName="kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.903987 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.904045 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="20b0d48f-5fd6-431c-a545-e3c800c7b866" volumeName="kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.904108 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.904164 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.904219 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.911426 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.911446 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3b6479f0-333b-4a96-9adf-2099afdc2447" volumeName="kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.911469 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49ef4625-1d3a-4a9f-b595-c2433d32326d" volumeName="kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.911484 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.911501 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.911515 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.911530 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.911545 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.911558 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.911570 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.911583 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.911597 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.911610 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.911624 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.911693 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.911713 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.911733 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.911754 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.911773 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.911794 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.911813 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.911830 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.911844 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.911857 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.911870 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.911883 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" volumeName="kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.911897 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.911911 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.911925 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.911939 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.911951 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="44663579-783b-4372-86d6-acf235a62d72" volumeName="kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.911967 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.912007 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.912023 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.912038 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.912075 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.912091 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.912104 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.912119 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.912133 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="37a5e44f-9a88-4405-be8a-b645485e7312" volumeName="kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.912148 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.912169 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.912182 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.912196 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.912211 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5b88f790-22fa-440e-b583-365168c0b23d" volumeName="kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.912226 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.912239 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.912253 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.912266 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.912282 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" volumeName="kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.912295 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.912309 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.912324 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.912368 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.912395 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.912411 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.912426 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.912442 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.912474 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.912489 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3ab1a177-2de0-46d9-b765-d0d0649bb42e" volumeName="kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.912504 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3ab1a177-2de0-46d9-b765-d0d0649bb42e" volumeName="kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.912519 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.912536 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" volumeName="kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.912551 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.912567 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.912583 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.912597 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.912612 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.912626 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.912640 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.912654 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.912669 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.912684 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.912700 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.912714 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.912728 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bd23aa5c-e532-4e53-bccf-e79f130c5ae8" volumeName="kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.912746 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.912764 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.912783 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.912801 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.912815 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.912854 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.912869 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.912884 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.912899 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.912913 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.912926 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.912942 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.912957 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="efdd0498-1daa-4136-9a4a-3b948c2293fc" volumeName="kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.912975 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.912989 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.913003 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.913018 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5b88f790-22fa-440e-b583-365168c0b23d" volumeName="kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.913032 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.913054 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.913069 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.913085 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.913098 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6731426b-95fe-49ff-bb5f-40441049fde2" volumeName="kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.913113 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.913128 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.913141 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.913156 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.913171 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.913185 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" volumeName="kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.913199 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.913215 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.913230 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.913243 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.913257 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.913273 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.913289 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.913306 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.913321 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.913353 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.913368 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" volumeName="kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.913382 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.913397 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.913410 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.913424 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="efdd0498-1daa-4136-9a4a-3b948c2293fc" volumeName="kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.913483 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.913506 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.913532 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.913550 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.913564 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.913580 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" volumeName="kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.913594 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.913610 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.913626 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.913639 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="20b0d48f-5fd6-431c-a545-e3c800c7b866" volumeName="kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.913651 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" volumeName="kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.913665 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.913680 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.913696 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d751cbb-f2e2-430d-9754-c882a5e924a5" volumeName="kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.913710 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.913724 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.913737 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.913753 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.913776 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.913793 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.913808 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.913821 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.913836 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.913849 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.913873 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.913887 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.913900 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.913918 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.913932 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.913946 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.913959 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" volumeName="kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.913973 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.913987 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.914002 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.914018 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.914031 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.914046 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.914060 4881 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh" seLinuxMountContext="" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.914074 4881 reconstruct.go:97] "Volume reconstruction finished" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.914084 4881 reconciler.go:26] "Reconciler: start to sync state" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.918744 4881 manager.go:324] Recovery completed Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.926569 4881 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.928127 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.928162 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.928171 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.929028 4881 cpu_manager.go:225] "Starting CPU manager" policy="none" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.929052 4881 cpu_manager.go:226] "Reconciling" reconcilePeriod="10s" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.929071 4881 state_mem.go:36] "Initialized new in-memory state store" Dec 11 08:15:52 crc kubenswrapper[4881]: E1211 08:15:52.982250 4881 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.986062 4881 policy_none.go:49] "None policy: Start" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.987860 4881 memory_manager.go:170] "Starting memorymanager" policy="None" Dec 11 08:15:52 crc kubenswrapper[4881]: I1211 08:15:52.987885 4881 state_mem.go:35] "Initializing new in-memory state store" Dec 11 08:15:53 crc kubenswrapper[4881]: I1211 08:15:53.002331 4881 kubelet_network_linux.go:50] "Initialized iptables rules." protocol="IPv4" Dec 11 08:15:53 crc kubenswrapper[4881]: I1211 08:15:53.003824 4881 kubelet_network_linux.go:50] "Initialized iptables rules." protocol="IPv6" Dec 11 08:15:53 crc kubenswrapper[4881]: I1211 08:15:53.003951 4881 status_manager.go:217] "Starting to sync pod status with apiserver" Dec 11 08:15:53 crc kubenswrapper[4881]: I1211 08:15:53.004043 4881 kubelet.go:2335] "Starting kubelet main sync loop" Dec 11 08:15:53 crc kubenswrapper[4881]: E1211 08:15:53.004173 4881 kubelet.go:2359] "Skipping pod synchronization" err="[container runtime status check may not have completed yet, PLEG is not healthy: pleg has yet to be successful]" Dec 11 08:15:53 crc kubenswrapper[4881]: W1211 08:15:53.004904 4881 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.20:6443: connect: connection refused Dec 11 08:15:53 crc kubenswrapper[4881]: E1211 08:15:53.004981 4881 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.20:6443: connect: connection refused" logger="UnhandledError" Dec 11 08:15:53 crc kubenswrapper[4881]: I1211 08:15:53.081823 4881 manager.go:334] "Starting Device Plugin manager" Dec 11 08:15:53 crc kubenswrapper[4881]: I1211 08:15:53.081898 4881 manager.go:513] "Failed to read data from checkpoint" checkpoint="kubelet_internal_checkpoint" err="checkpoint is not found" Dec 11 08:15:53 crc kubenswrapper[4881]: I1211 08:15:53.081913 4881 server.go:79] "Starting device plugin registration server" Dec 11 08:15:53 crc kubenswrapper[4881]: E1211 08:15:53.082350 4881 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Dec 11 08:15:53 crc kubenswrapper[4881]: I1211 08:15:53.082525 4881 eviction_manager.go:189] "Eviction manager: starting control loop" Dec 11 08:15:53 crc kubenswrapper[4881]: I1211 08:15:53.082546 4881 container_log_manager.go:189] "Initializing container log rotate workers" workers=1 monitorPeriod="10s" Dec 11 08:15:53 crc kubenswrapper[4881]: I1211 08:15:53.082807 4881 plugin_watcher.go:51] "Plugin Watcher Start" path="/var/lib/kubelet/plugins_registry" Dec 11 08:15:53 crc kubenswrapper[4881]: E1211 08:15:53.082891 4881 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.20:6443: connect: connection refused" interval="400ms" Dec 11 08:15:53 crc kubenswrapper[4881]: I1211 08:15:53.082946 4881 plugin_manager.go:116] "The desired_state_of_world populator (plugin watcher) starts" Dec 11 08:15:53 crc kubenswrapper[4881]: I1211 08:15:53.082961 4881 plugin_manager.go:118] "Starting Kubelet Plugin Manager" Dec 11 08:15:53 crc kubenswrapper[4881]: E1211 08:15:53.091178 4881 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Dec 11 08:15:53 crc kubenswrapper[4881]: I1211 08:15:53.104454 4881 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-etcd/etcd-crc","openshift-kube-apiserver/kube-apiserver-crc","openshift-kube-controller-manager/kube-controller-manager-crc","openshift-kube-scheduler/openshift-kube-scheduler-crc","openshift-machine-config-operator/kube-rbac-proxy-crio-crc"] Dec 11 08:15:53 crc kubenswrapper[4881]: I1211 08:15:53.104632 4881 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 11 08:15:53 crc kubenswrapper[4881]: I1211 08:15:53.106219 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:15:53 crc kubenswrapper[4881]: I1211 08:15:53.106266 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:15:53 crc kubenswrapper[4881]: I1211 08:15:53.106285 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:15:53 crc kubenswrapper[4881]: I1211 08:15:53.106502 4881 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 11 08:15:53 crc kubenswrapper[4881]: I1211 08:15:53.106685 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd/etcd-crc" Dec 11 08:15:53 crc kubenswrapper[4881]: I1211 08:15:53.106735 4881 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 11 08:15:53 crc kubenswrapper[4881]: I1211 08:15:53.107528 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:15:53 crc kubenswrapper[4881]: I1211 08:15:53.107565 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:15:53 crc kubenswrapper[4881]: I1211 08:15:53.107572 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:15:53 crc kubenswrapper[4881]: I1211 08:15:53.107617 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:15:53 crc kubenswrapper[4881]: I1211 08:15:53.107578 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:15:53 crc kubenswrapper[4881]: I1211 08:15:53.107639 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:15:53 crc kubenswrapper[4881]: I1211 08:15:53.107835 4881 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 11 08:15:53 crc kubenswrapper[4881]: I1211 08:15:53.107792 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 11 08:15:53 crc kubenswrapper[4881]: I1211 08:15:53.108244 4881 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 11 08:15:53 crc kubenswrapper[4881]: I1211 08:15:53.108453 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:15:53 crc kubenswrapper[4881]: I1211 08:15:53.108477 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:15:53 crc kubenswrapper[4881]: I1211 08:15:53.108488 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:15:53 crc kubenswrapper[4881]: I1211 08:15:53.108570 4881 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 11 08:15:53 crc kubenswrapper[4881]: I1211 08:15:53.108831 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 11 08:15:53 crc kubenswrapper[4881]: I1211 08:15:53.108896 4881 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 11 08:15:53 crc kubenswrapper[4881]: I1211 08:15:53.109239 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:15:53 crc kubenswrapper[4881]: I1211 08:15:53.109260 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:15:53 crc kubenswrapper[4881]: I1211 08:15:53.109269 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:15:53 crc kubenswrapper[4881]: I1211 08:15:53.109400 4881 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 11 08:15:53 crc kubenswrapper[4881]: I1211 08:15:53.111318 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Dec 11 08:15:53 crc kubenswrapper[4881]: I1211 08:15:53.111385 4881 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 11 08:15:53 crc kubenswrapper[4881]: I1211 08:15:53.111388 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:15:53 crc kubenswrapper[4881]: I1211 08:15:53.111427 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:15:53 crc kubenswrapper[4881]: I1211 08:15:53.111448 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:15:53 crc kubenswrapper[4881]: I1211 08:15:53.114318 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:15:53 crc kubenswrapper[4881]: I1211 08:15:53.114378 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:15:53 crc kubenswrapper[4881]: I1211 08:15:53.114393 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:15:53 crc kubenswrapper[4881]: I1211 08:15:53.114319 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:15:53 crc kubenswrapper[4881]: I1211 08:15:53.114455 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:15:53 crc kubenswrapper[4881]: I1211 08:15:53.114467 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:15:53 crc kubenswrapper[4881]: I1211 08:15:53.114451 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:15:53 crc kubenswrapper[4881]: I1211 08:15:53.114515 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:15:53 crc kubenswrapper[4881]: I1211 08:15:53.114530 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:15:53 crc kubenswrapper[4881]: I1211 08:15:53.114604 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Dec 11 08:15:53 crc kubenswrapper[4881]: I1211 08:15:53.114636 4881 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 11 08:15:53 crc kubenswrapper[4881]: I1211 08:15:53.116072 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:15:53 crc kubenswrapper[4881]: I1211 08:15:53.116090 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:15:53 crc kubenswrapper[4881]: I1211 08:15:53.116099 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:15:53 crc kubenswrapper[4881]: I1211 08:15:53.182876 4881 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 11 08:15:53 crc kubenswrapper[4881]: I1211 08:15:53.184299 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:15:53 crc kubenswrapper[4881]: I1211 08:15:53.184394 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:15:53 crc kubenswrapper[4881]: I1211 08:15:53.184415 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:15:53 crc kubenswrapper[4881]: I1211 08:15:53.184477 4881 kubelet_node_status.go:76] "Attempting to register node" node="crc" Dec 11 08:15:53 crc kubenswrapper[4881]: E1211 08:15:53.185264 4881 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.20:6443: connect: connection refused" node="crc" Dec 11 08:15:53 crc kubenswrapper[4881]: I1211 08:15:53.254893 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Dec 11 08:15:53 crc kubenswrapper[4881]: I1211 08:15:53.254930 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 11 08:15:53 crc kubenswrapper[4881]: I1211 08:15:53.254946 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Dec 11 08:15:53 crc kubenswrapper[4881]: I1211 08:15:53.254962 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Dec 11 08:15:53 crc kubenswrapper[4881]: I1211 08:15:53.254979 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Dec 11 08:15:53 crc kubenswrapper[4881]: I1211 08:15:53.254993 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Dec 11 08:15:53 crc kubenswrapper[4881]: I1211 08:15:53.255006 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Dec 11 08:15:53 crc kubenswrapper[4881]: I1211 08:15:53.255024 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Dec 11 08:15:53 crc kubenswrapper[4881]: I1211 08:15:53.255040 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Dec 11 08:15:53 crc kubenswrapper[4881]: I1211 08:15:53.255054 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Dec 11 08:15:53 crc kubenswrapper[4881]: I1211 08:15:53.255072 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Dec 11 08:15:53 crc kubenswrapper[4881]: I1211 08:15:53.255088 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 11 08:15:53 crc kubenswrapper[4881]: I1211 08:15:53.255103 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 11 08:15:53 crc kubenswrapper[4881]: I1211 08:15:53.255117 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 11 08:15:53 crc kubenswrapper[4881]: I1211 08:15:53.255133 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 11 08:15:53 crc kubenswrapper[4881]: I1211 08:15:53.356607 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Dec 11 08:15:53 crc kubenswrapper[4881]: I1211 08:15:53.356693 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Dec 11 08:15:53 crc kubenswrapper[4881]: I1211 08:15:53.356720 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Dec 11 08:15:53 crc kubenswrapper[4881]: I1211 08:15:53.356745 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Dec 11 08:15:53 crc kubenswrapper[4881]: I1211 08:15:53.356780 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Dec 11 08:15:53 crc kubenswrapper[4881]: I1211 08:15:53.356792 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Dec 11 08:15:53 crc kubenswrapper[4881]: I1211 08:15:53.356812 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Dec 11 08:15:53 crc kubenswrapper[4881]: I1211 08:15:53.356831 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Dec 11 08:15:53 crc kubenswrapper[4881]: I1211 08:15:53.356841 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 11 08:15:53 crc kubenswrapper[4881]: I1211 08:15:53.356804 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Dec 11 08:15:53 crc kubenswrapper[4881]: I1211 08:15:53.356938 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Dec 11 08:15:53 crc kubenswrapper[4881]: I1211 08:15:53.356943 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Dec 11 08:15:53 crc kubenswrapper[4881]: I1211 08:15:53.356939 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 11 08:15:53 crc kubenswrapper[4881]: I1211 08:15:53.356956 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Dec 11 08:15:53 crc kubenswrapper[4881]: I1211 08:15:53.356899 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 11 08:15:53 crc kubenswrapper[4881]: I1211 08:15:53.356915 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 11 08:15:53 crc kubenswrapper[4881]: I1211 08:15:53.357070 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 11 08:15:53 crc kubenswrapper[4881]: I1211 08:15:53.357096 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Dec 11 08:15:53 crc kubenswrapper[4881]: I1211 08:15:53.357116 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Dec 11 08:15:53 crc kubenswrapper[4881]: I1211 08:15:53.357139 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Dec 11 08:15:53 crc kubenswrapper[4881]: I1211 08:15:53.357156 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 11 08:15:53 crc kubenswrapper[4881]: I1211 08:15:53.357163 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Dec 11 08:15:53 crc kubenswrapper[4881]: I1211 08:15:53.357172 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Dec 11 08:15:53 crc kubenswrapper[4881]: I1211 08:15:53.357227 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Dec 11 08:15:53 crc kubenswrapper[4881]: I1211 08:15:53.357251 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Dec 11 08:15:53 crc kubenswrapper[4881]: I1211 08:15:53.357255 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 11 08:15:53 crc kubenswrapper[4881]: I1211 08:15:53.357137 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 11 08:15:53 crc kubenswrapper[4881]: I1211 08:15:53.357257 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 11 08:15:53 crc kubenswrapper[4881]: I1211 08:15:53.357372 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Dec 11 08:15:53 crc kubenswrapper[4881]: I1211 08:15:53.357278 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 11 08:15:53 crc kubenswrapper[4881]: I1211 08:15:53.386208 4881 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 11 08:15:53 crc kubenswrapper[4881]: I1211 08:15:53.388404 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:15:53 crc kubenswrapper[4881]: I1211 08:15:53.388500 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:15:53 crc kubenswrapper[4881]: I1211 08:15:53.388516 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:15:53 crc kubenswrapper[4881]: I1211 08:15:53.388568 4881 kubelet_node_status.go:76] "Attempting to register node" node="crc" Dec 11 08:15:53 crc kubenswrapper[4881]: E1211 08:15:53.389429 4881 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.20:6443: connect: connection refused" node="crc" Dec 11 08:15:53 crc kubenswrapper[4881]: I1211 08:15:53.452687 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd/etcd-crc" Dec 11 08:15:53 crc kubenswrapper[4881]: I1211 08:15:53.460835 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 11 08:15:53 crc kubenswrapper[4881]: I1211 08:15:53.483109 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 11 08:15:53 crc kubenswrapper[4881]: E1211 08:15:53.483625 4881 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.20:6443: connect: connection refused" interval="800ms" Dec 11 08:15:53 crc kubenswrapper[4881]: I1211 08:15:53.508232 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Dec 11 08:15:53 crc kubenswrapper[4881]: W1211 08:15:53.513433 4881 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf614b9022728cf315e60c057852e563e.slice/crio-3b49616fadbf69c977496f5fb6fc0d0070f2dd766a874f478c23b7fadfcfa2ed WatchSource:0}: Error finding container 3b49616fadbf69c977496f5fb6fc0d0070f2dd766a874f478c23b7fadfcfa2ed: Status 404 returned error can't find the container with id 3b49616fadbf69c977496f5fb6fc0d0070f2dd766a874f478c23b7fadfcfa2ed Dec 11 08:15:53 crc kubenswrapper[4881]: W1211 08:15:53.513998 4881 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2139d3e2895fc6797b9c76a1b4c9886d.slice/crio-6ce6ba9f6fad0e97c2b5d0002688cbd9b237e2ddc0066749a4927dffc7b37f07 WatchSource:0}: Error finding container 6ce6ba9f6fad0e97c2b5d0002688cbd9b237e2ddc0066749a4927dffc7b37f07: Status 404 returned error can't find the container with id 6ce6ba9f6fad0e97c2b5d0002688cbd9b237e2ddc0066749a4927dffc7b37f07 Dec 11 08:15:53 crc kubenswrapper[4881]: I1211 08:15:53.515877 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Dec 11 08:15:53 crc kubenswrapper[4881]: W1211 08:15:53.516326 4881 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf4b27818a5e8e43d0dc095d08835c792.slice/crio-8ee54b127185976dfa585a0dc7a960fc5a6c80690046952e9fe99472c6b7747f WatchSource:0}: Error finding container 8ee54b127185976dfa585a0dc7a960fc5a6c80690046952e9fe99472c6b7747f: Status 404 returned error can't find the container with id 8ee54b127185976dfa585a0dc7a960fc5a6c80690046952e9fe99472c6b7747f Dec 11 08:15:53 crc kubenswrapper[4881]: W1211 08:15:53.526054 4881 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3dcd261975c3d6b9a6ad6367fd4facd3.slice/crio-96f6b33d4de9a44f2cae0349aea3a7227964cb5fae7e0688a1f7da435e0d4fc9 WatchSource:0}: Error finding container 96f6b33d4de9a44f2cae0349aea3a7227964cb5fae7e0688a1f7da435e0d4fc9: Status 404 returned error can't find the container with id 96f6b33d4de9a44f2cae0349aea3a7227964cb5fae7e0688a1f7da435e0d4fc9 Dec 11 08:15:53 crc kubenswrapper[4881]: W1211 08:15:53.534772 4881 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd1b160f5dda77d281dd8e69ec8d817f9.slice/crio-d9c218409ad877966ca24365d98b5c3e75051973bb9226c040795947dba248cd WatchSource:0}: Error finding container d9c218409ad877966ca24365d98b5c3e75051973bb9226c040795947dba248cd: Status 404 returned error can't find the container with id d9c218409ad877966ca24365d98b5c3e75051973bb9226c040795947dba248cd Dec 11 08:15:53 crc kubenswrapper[4881]: E1211 08:15:53.604172 4881 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": dial tcp 38.102.83.20:6443: connect: connection refused" event="&Event{ObjectMeta:{crc.18801b3164296bf9 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:Starting,Message:Starting kubelet.,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-12-11 08:15:52.877018105 +0000 UTC m=+1.254386802,LastTimestamp:2025-12-11 08:15:52.877018105 +0000 UTC m=+1.254386802,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Dec 11 08:15:53 crc kubenswrapper[4881]: I1211 08:15:53.790355 4881 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 11 08:15:53 crc kubenswrapper[4881]: I1211 08:15:53.791853 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:15:53 crc kubenswrapper[4881]: I1211 08:15:53.792045 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:15:53 crc kubenswrapper[4881]: I1211 08:15:53.792055 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:15:53 crc kubenswrapper[4881]: I1211 08:15:53.792076 4881 kubelet_node_status.go:76] "Attempting to register node" node="crc" Dec 11 08:15:53 crc kubenswrapper[4881]: E1211 08:15:53.792512 4881 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.20:6443: connect: connection refused" node="crc" Dec 11 08:15:53 crc kubenswrapper[4881]: W1211 08:15:53.796390 4881 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.20:6443: connect: connection refused Dec 11 08:15:53 crc kubenswrapper[4881]: E1211 08:15:53.796490 4881 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.20:6443: connect: connection refused" logger="UnhandledError" Dec 11 08:15:53 crc kubenswrapper[4881]: I1211 08:15:53.881446 4881 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-21 08:37:19.422136867 +0000 UTC Dec 11 08:15:53 crc kubenswrapper[4881]: I1211 08:15:53.881535 4881 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.20:6443: connect: connection refused Dec 11 08:15:53 crc kubenswrapper[4881]: I1211 08:15:53.881551 4881 certificate_manager.go:356] kubernetes.io/kubelet-serving: Waiting 240h21m25.540590791s for next certificate rotation Dec 11 08:15:53 crc kubenswrapper[4881]: W1211 08:15:53.901641 4881 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.20:6443: connect: connection refused Dec 11 08:15:53 crc kubenswrapper[4881]: E1211 08:15:53.901721 4881 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.20:6443: connect: connection refused" logger="UnhandledError" Dec 11 08:15:54 crc kubenswrapper[4881]: I1211 08:15:54.008892 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"8ee54b127185976dfa585a0dc7a960fc5a6c80690046952e9fe99472c6b7747f"} Dec 11 08:15:54 crc kubenswrapper[4881]: I1211 08:15:54.010389 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"3b49616fadbf69c977496f5fb6fc0d0070f2dd766a874f478c23b7fadfcfa2ed"} Dec 11 08:15:54 crc kubenswrapper[4881]: I1211 08:15:54.012048 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"6ce6ba9f6fad0e97c2b5d0002688cbd9b237e2ddc0066749a4927dffc7b37f07"} Dec 11 08:15:54 crc kubenswrapper[4881]: I1211 08:15:54.013599 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerStarted","Data":"d9c218409ad877966ca24365d98b5c3e75051973bb9226c040795947dba248cd"} Dec 11 08:15:54 crc kubenswrapper[4881]: I1211 08:15:54.014918 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"96f6b33d4de9a44f2cae0349aea3a7227964cb5fae7e0688a1f7da435e0d4fc9"} Dec 11 08:15:54 crc kubenswrapper[4881]: W1211 08:15:54.131195 4881 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.20:6443: connect: connection refused Dec 11 08:15:54 crc kubenswrapper[4881]: E1211 08:15:54.131324 4881 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.20:6443: connect: connection refused" logger="UnhandledError" Dec 11 08:15:54 crc kubenswrapper[4881]: W1211 08:15:54.209290 4881 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.20:6443: connect: connection refused Dec 11 08:15:54 crc kubenswrapper[4881]: E1211 08:15:54.209432 4881 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.20:6443: connect: connection refused" logger="UnhandledError" Dec 11 08:15:54 crc kubenswrapper[4881]: E1211 08:15:54.284510 4881 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.20:6443: connect: connection refused" interval="1.6s" Dec 11 08:15:54 crc kubenswrapper[4881]: I1211 08:15:54.593493 4881 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 11 08:15:54 crc kubenswrapper[4881]: I1211 08:15:54.595739 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:15:54 crc kubenswrapper[4881]: I1211 08:15:54.595806 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:15:54 crc kubenswrapper[4881]: I1211 08:15:54.595816 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:15:54 crc kubenswrapper[4881]: I1211 08:15:54.595845 4881 kubelet_node_status.go:76] "Attempting to register node" node="crc" Dec 11 08:15:54 crc kubenswrapper[4881]: E1211 08:15:54.596745 4881 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.20:6443: connect: connection refused" node="crc" Dec 11 08:15:54 crc kubenswrapper[4881]: I1211 08:15:54.882266 4881 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.20:6443: connect: connection refused Dec 11 08:15:55 crc kubenswrapper[4881]: I1211 08:15:55.026367 4881 generic.go:334] "Generic (PLEG): container finished" podID="d1b160f5dda77d281dd8e69ec8d817f9" containerID="e3ddede9a1ba051ed4eca12d7cca1c3309b223ca8c01eb7d0a9499ab012531ff" exitCode=0 Dec 11 08:15:55 crc kubenswrapper[4881]: I1211 08:15:55.026430 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerDied","Data":"e3ddede9a1ba051ed4eca12d7cca1c3309b223ca8c01eb7d0a9499ab012531ff"} Dec 11 08:15:55 crc kubenswrapper[4881]: I1211 08:15:55.026462 4881 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 11 08:15:55 crc kubenswrapper[4881]: I1211 08:15:55.028194 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:15:55 crc kubenswrapper[4881]: I1211 08:15:55.028253 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:15:55 crc kubenswrapper[4881]: I1211 08:15:55.028266 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:15:55 crc kubenswrapper[4881]: I1211 08:15:55.029687 4881 generic.go:334] "Generic (PLEG): container finished" podID="3dcd261975c3d6b9a6ad6367fd4facd3" containerID="89d2f7f7873955f51814a4a24f42fc5fff4c2c08480d7ff81bfc18fd73102d7d" exitCode=0 Dec 11 08:15:55 crc kubenswrapper[4881]: I1211 08:15:55.029750 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerDied","Data":"89d2f7f7873955f51814a4a24f42fc5fff4c2c08480d7ff81bfc18fd73102d7d"} Dec 11 08:15:55 crc kubenswrapper[4881]: I1211 08:15:55.029854 4881 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 11 08:15:55 crc kubenswrapper[4881]: I1211 08:15:55.031105 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:15:55 crc kubenswrapper[4881]: I1211 08:15:55.031148 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:15:55 crc kubenswrapper[4881]: I1211 08:15:55.031166 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:15:55 crc kubenswrapper[4881]: I1211 08:15:55.032180 4881 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="70fb9b6123864b3652eabbb833b18032659a0c038f4e5857e7363a3776022396" exitCode=0 Dec 11 08:15:55 crc kubenswrapper[4881]: I1211 08:15:55.032304 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"70fb9b6123864b3652eabbb833b18032659a0c038f4e5857e7363a3776022396"} Dec 11 08:15:55 crc kubenswrapper[4881]: I1211 08:15:55.032422 4881 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 11 08:15:55 crc kubenswrapper[4881]: I1211 08:15:55.033289 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:15:55 crc kubenswrapper[4881]: I1211 08:15:55.033320 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:15:55 crc kubenswrapper[4881]: I1211 08:15:55.033397 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:15:55 crc kubenswrapper[4881]: I1211 08:15:55.033956 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"3dec421d26a9373c52bf2fb9434cc3f3b373f409b1d86ec1effca7534acb7262"} Dec 11 08:15:55 crc kubenswrapper[4881]: I1211 08:15:55.035288 4881 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="ba57aaaaf5a0258b44859937fd4deececf010c96c7753f4b97e69ae2b11584e1" exitCode=0 Dec 11 08:15:55 crc kubenswrapper[4881]: I1211 08:15:55.035327 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"ba57aaaaf5a0258b44859937fd4deececf010c96c7753f4b97e69ae2b11584e1"} Dec 11 08:15:55 crc kubenswrapper[4881]: I1211 08:15:55.035509 4881 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 11 08:15:55 crc kubenswrapper[4881]: I1211 08:15:55.036492 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:15:55 crc kubenswrapper[4881]: I1211 08:15:55.036520 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:15:55 crc kubenswrapper[4881]: I1211 08:15:55.036529 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:15:55 crc kubenswrapper[4881]: I1211 08:15:55.037948 4881 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 11 08:15:55 crc kubenswrapper[4881]: I1211 08:15:55.040176 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:15:55 crc kubenswrapper[4881]: I1211 08:15:55.040210 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:15:55 crc kubenswrapper[4881]: I1211 08:15:55.040219 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:15:55 crc kubenswrapper[4881]: W1211 08:15:55.532637 4881 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.20:6443: connect: connection refused Dec 11 08:15:55 crc kubenswrapper[4881]: E1211 08:15:55.532713 4881 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.20:6443: connect: connection refused" logger="UnhandledError" Dec 11 08:15:55 crc kubenswrapper[4881]: I1211 08:15:55.881095 4881 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.20:6443: connect: connection refused Dec 11 08:15:55 crc kubenswrapper[4881]: E1211 08:15:55.885895 4881 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.20:6443: connect: connection refused" interval="3.2s" Dec 11 08:15:56 crc kubenswrapper[4881]: I1211 08:15:56.040059 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"91541795f1dd997a0d977904a6a3cbeae8b66b8bb57cde8dac344d3703352e1a"} Dec 11 08:15:56 crc kubenswrapper[4881]: I1211 08:15:56.040107 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"7cb4176d384609eb549c9f00057256bd34053ca3eca229c20cd250b93f87f910"} Dec 11 08:15:56 crc kubenswrapper[4881]: I1211 08:15:56.040120 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"221f8c696ca3c5ab5dc2025890b5279fd406a8dfdd4ea482bb12f0c5d304255d"} Dec 11 08:15:56 crc kubenswrapper[4881]: I1211 08:15:56.040124 4881 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 11 08:15:56 crc kubenswrapper[4881]: I1211 08:15:56.041177 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:15:56 crc kubenswrapper[4881]: I1211 08:15:56.041208 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:15:56 crc kubenswrapper[4881]: I1211 08:15:56.041220 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:15:56 crc kubenswrapper[4881]: I1211 08:15:56.041625 4881 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="16c230355458e43c12c58fbd75de648c7f553961e1202314e793170f3fb03fb3" exitCode=0 Dec 11 08:15:56 crc kubenswrapper[4881]: I1211 08:15:56.041689 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"16c230355458e43c12c58fbd75de648c7f553961e1202314e793170f3fb03fb3"} Dec 11 08:15:56 crc kubenswrapper[4881]: I1211 08:15:56.041767 4881 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 11 08:15:56 crc kubenswrapper[4881]: I1211 08:15:56.042826 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:15:56 crc kubenswrapper[4881]: I1211 08:15:56.042843 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:15:56 crc kubenswrapper[4881]: I1211 08:15:56.042852 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:15:56 crc kubenswrapper[4881]: I1211 08:15:56.043791 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerStarted","Data":"9251be56904cea5a0c24898fa10c30718080f3fe77804166d664b3f40235c80a"} Dec 11 08:15:56 crc kubenswrapper[4881]: I1211 08:15:56.043810 4881 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 11 08:15:56 crc kubenswrapper[4881]: I1211 08:15:56.044589 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:15:56 crc kubenswrapper[4881]: I1211 08:15:56.044630 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:15:56 crc kubenswrapper[4881]: I1211 08:15:56.044644 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:15:56 crc kubenswrapper[4881]: I1211 08:15:56.046819 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"bdda482f14afc374a3ba38a02b0b93bab4faf300cc47f7ee39ef9183af904f22"} Dec 11 08:15:56 crc kubenswrapper[4881]: I1211 08:15:56.046852 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"ae6cead982609bcea650eccda0c847ae1c568061104bda0584984b9d62481b0e"} Dec 11 08:15:56 crc kubenswrapper[4881]: I1211 08:15:56.048327 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"f4bc17ebdc46bdf12b24515e7a055043646bcea98fe47ea7f5c44e679d02bde9"} Dec 11 08:15:56 crc kubenswrapper[4881]: I1211 08:15:56.197790 4881 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 11 08:15:56 crc kubenswrapper[4881]: I1211 08:15:56.198851 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:15:56 crc kubenswrapper[4881]: I1211 08:15:56.198898 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:15:56 crc kubenswrapper[4881]: I1211 08:15:56.198922 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:15:56 crc kubenswrapper[4881]: I1211 08:15:56.198954 4881 kubelet_node_status.go:76] "Attempting to register node" node="crc" Dec 11 08:15:56 crc kubenswrapper[4881]: E1211 08:15:56.199352 4881 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.20:6443: connect: connection refused" node="crc" Dec 11 08:15:56 crc kubenswrapper[4881]: W1211 08:15:56.615303 4881 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.20:6443: connect: connection refused Dec 11 08:15:56 crc kubenswrapper[4881]: E1211 08:15:56.615482 4881 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.20:6443: connect: connection refused" logger="UnhandledError" Dec 11 08:15:56 crc kubenswrapper[4881]: I1211 08:15:56.882484 4881 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.20:6443: connect: connection refused Dec 11 08:15:56 crc kubenswrapper[4881]: W1211 08:15:56.889161 4881 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.20:6443: connect: connection refused Dec 11 08:15:56 crc kubenswrapper[4881]: E1211 08:15:56.889276 4881 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.20:6443: connect: connection refused" logger="UnhandledError" Dec 11 08:15:56 crc kubenswrapper[4881]: W1211 08:15:56.968644 4881 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.20:6443: connect: connection refused Dec 11 08:15:56 crc kubenswrapper[4881]: E1211 08:15:56.968753 4881 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.20:6443: connect: connection refused" logger="UnhandledError" Dec 11 08:15:56 crc kubenswrapper[4881]: I1211 08:15:56.998442 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 11 08:15:56 crc kubenswrapper[4881]: I1211 08:15:56.998735 4881 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/cluster-policy-controller namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10357/healthz\": dial tcp 192.168.126.11:10357: connect: connection refused" start-of-body= Dec 11 08:15:56 crc kubenswrapper[4881]: I1211 08:15:56.998798 4881 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="cluster-policy-controller" probeResult="failure" output="Get \"https://192.168.126.11:10357/healthz\": dial tcp 192.168.126.11:10357: connect: connection refused" Dec 11 08:15:57 crc kubenswrapper[4881]: I1211 08:15:57.052426 4881 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 11 08:15:57 crc kubenswrapper[4881]: I1211 08:15:57.052417 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"a2846dae5b0d424649d68bdbeb34b7791ee2a8e1b05a0309876cd0d79c5fca01"} Dec 11 08:15:57 crc kubenswrapper[4881]: I1211 08:15:57.053288 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:15:57 crc kubenswrapper[4881]: I1211 08:15:57.053319 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:15:57 crc kubenswrapper[4881]: I1211 08:15:57.053353 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:15:57 crc kubenswrapper[4881]: I1211 08:15:57.054223 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"daf982782f40db65e6a33e1e1bcfd01493d5a385c52013546d38d921ac4dcad3"} Dec 11 08:15:57 crc kubenswrapper[4881]: I1211 08:15:57.054252 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"3ceeda38e245a549205af6a7f2468c682ca1420574f2d3b375dfba7d93a3c669"} Dec 11 08:15:57 crc kubenswrapper[4881]: I1211 08:15:57.055555 4881 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="734b87f641d1c4082b0611017a6e2d2a1b3ae995238cde641391325f8caaafe9" exitCode=0 Dec 11 08:15:57 crc kubenswrapper[4881]: I1211 08:15:57.055655 4881 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 11 08:15:57 crc kubenswrapper[4881]: I1211 08:15:57.056097 4881 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 11 08:15:57 crc kubenswrapper[4881]: I1211 08:15:57.056365 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"734b87f641d1c4082b0611017a6e2d2a1b3ae995238cde641391325f8caaafe9"} Dec 11 08:15:57 crc kubenswrapper[4881]: I1211 08:15:57.056413 4881 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 11 08:15:57 crc kubenswrapper[4881]: I1211 08:15:57.056916 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:15:57 crc kubenswrapper[4881]: I1211 08:15:57.056933 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:15:57 crc kubenswrapper[4881]: I1211 08:15:57.056941 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:15:57 crc kubenswrapper[4881]: I1211 08:15:57.057449 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:15:57 crc kubenswrapper[4881]: I1211 08:15:57.057465 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:15:57 crc kubenswrapper[4881]: I1211 08:15:57.057472 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:15:57 crc kubenswrapper[4881]: I1211 08:15:57.057781 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:15:57 crc kubenswrapper[4881]: I1211 08:15:57.057791 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:15:57 crc kubenswrapper[4881]: I1211 08:15:57.057799 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:15:57 crc kubenswrapper[4881]: I1211 08:15:57.103410 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 11 08:15:57 crc kubenswrapper[4881]: I1211 08:15:57.881873 4881 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.20:6443: connect: connection refused Dec 11 08:15:58 crc kubenswrapper[4881]: I1211 08:15:58.061140 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"6b11c3ebb26dbe8ee0a22dec8ee186c2dd013cc3566a9bd14a49cc7e24c2213d"} Dec 11 08:15:58 crc kubenswrapper[4881]: I1211 08:15:58.066285 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"6735b5cd08055deeae614855e5de8a056b276c8006a16c45f2a0fce97da172cb"} Dec 11 08:15:58 crc kubenswrapper[4881]: I1211 08:15:58.066421 4881 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 11 08:15:58 crc kubenswrapper[4881]: I1211 08:15:58.066497 4881 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 11 08:15:58 crc kubenswrapper[4881]: I1211 08:15:58.066501 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Dec 11 08:15:58 crc kubenswrapper[4881]: I1211 08:15:58.067825 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:15:58 crc kubenswrapper[4881]: I1211 08:15:58.067889 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:15:58 crc kubenswrapper[4881]: I1211 08:15:58.067910 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:15:58 crc kubenswrapper[4881]: I1211 08:15:58.068474 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:15:58 crc kubenswrapper[4881]: I1211 08:15:58.068528 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:15:58 crc kubenswrapper[4881]: I1211 08:15:58.068545 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:15:58 crc kubenswrapper[4881]: I1211 08:15:58.222540 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 11 08:15:58 crc kubenswrapper[4881]: I1211 08:15:58.222883 4881 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/kube-controller-manager namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" start-of-body= Dec 11 08:15:58 crc kubenswrapper[4881]: I1211 08:15:58.222957 4881 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="kube-controller-manager" probeResult="failure" output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" Dec 11 08:15:58 crc kubenswrapper[4881]: I1211 08:15:58.882412 4881 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.20:6443: connect: connection refused Dec 11 08:15:59 crc kubenswrapper[4881]: I1211 08:15:59.068744 4881 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 11 08:15:59 crc kubenswrapper[4881]: I1211 08:15:59.068743 4881 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 11 08:15:59 crc kubenswrapper[4881]: I1211 08:15:59.071734 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:15:59 crc kubenswrapper[4881]: I1211 08:15:59.071779 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:15:59 crc kubenswrapper[4881]: I1211 08:15:59.071790 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:15:59 crc kubenswrapper[4881]: I1211 08:15:59.071870 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:15:59 crc kubenswrapper[4881]: I1211 08:15:59.071918 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:15:59 crc kubenswrapper[4881]: I1211 08:15:59.071931 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:15:59 crc kubenswrapper[4881]: E1211 08:15:59.087272 4881 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.20:6443: connect: connection refused" interval="6.4s" Dec 11 08:15:59 crc kubenswrapper[4881]: I1211 08:15:59.399944 4881 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 11 08:15:59 crc kubenswrapper[4881]: I1211 08:15:59.401411 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:15:59 crc kubenswrapper[4881]: I1211 08:15:59.401449 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:15:59 crc kubenswrapper[4881]: I1211 08:15:59.401459 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:15:59 crc kubenswrapper[4881]: I1211 08:15:59.401481 4881 kubelet_node_status.go:76] "Attempting to register node" node="crc" Dec 11 08:15:59 crc kubenswrapper[4881]: E1211 08:15:59.401945 4881 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.20:6443: connect: connection refused" node="crc" Dec 11 08:15:59 crc kubenswrapper[4881]: I1211 08:15:59.881412 4881 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.20:6443: connect: connection refused Dec 11 08:16:00 crc kubenswrapper[4881]: I1211 08:16:00.071522 4881 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 11 08:16:00 crc kubenswrapper[4881]: I1211 08:16:00.073223 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:00 crc kubenswrapper[4881]: I1211 08:16:00.073271 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:00 crc kubenswrapper[4881]: I1211 08:16:00.073281 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:00 crc kubenswrapper[4881]: W1211 08:16:00.523005 4881 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.20:6443: connect: connection refused Dec 11 08:16:00 crc kubenswrapper[4881]: E1211 08:16:00.523115 4881 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.20:6443: connect: connection refused" logger="UnhandledError" Dec 11 08:16:00 crc kubenswrapper[4881]: W1211 08:16:00.723850 4881 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.20:6443: connect: connection refused Dec 11 08:16:00 crc kubenswrapper[4881]: E1211 08:16:00.724002 4881 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.20:6443: connect: connection refused" logger="UnhandledError" Dec 11 08:16:00 crc kubenswrapper[4881]: I1211 08:16:00.882195 4881 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.20:6443: connect: connection refused Dec 11 08:16:01 crc kubenswrapper[4881]: W1211 08:16:01.034693 4881 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.20:6443: connect: connection refused Dec 11 08:16:01 crc kubenswrapper[4881]: E1211 08:16:01.035013 4881 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.20:6443: connect: connection refused" logger="UnhandledError" Dec 11 08:16:01 crc kubenswrapper[4881]: I1211 08:16:01.080189 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"ff7ad52ec7f42bde82d0f5b9a6ef051a56c37004bb4172dbc504afde1e5f69bf"} Dec 11 08:16:01 crc kubenswrapper[4881]: I1211 08:16:01.080255 4881 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 11 08:16:01 crc kubenswrapper[4881]: I1211 08:16:01.081290 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:01 crc kubenswrapper[4881]: I1211 08:16:01.081328 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:01 crc kubenswrapper[4881]: I1211 08:16:01.081341 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:01 crc kubenswrapper[4881]: I1211 08:16:01.085085 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"75e9bba95e748021cbf04000b3b00e27d8f75cd0d9e2409761d8a045c6eb3e28"} Dec 11 08:16:01 crc kubenswrapper[4881]: I1211 08:16:01.694972 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 11 08:16:01 crc kubenswrapper[4881]: I1211 08:16:01.695277 4881 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="Get \"https://192.168.126.11:6443/livez\": dial tcp 192.168.126.11:6443: connect: connection refused" start-of-body= Dec 11 08:16:01 crc kubenswrapper[4881]: I1211 08:16:01.695388 4881 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="Get \"https://192.168.126.11:6443/livez\": dial tcp 192.168.126.11:6443: connect: connection refused" Dec 11 08:16:01 crc kubenswrapper[4881]: I1211 08:16:01.887213 4881 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.20:6443: connect: connection refused Dec 11 08:16:02 crc kubenswrapper[4881]: I1211 08:16:02.093898 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"968879a7801be5095d46d91a9fbbf2e5d951fda34f49e034cc84262231cbea44"} Dec 11 08:16:02 crc kubenswrapper[4881]: I1211 08:16:02.093963 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"bbd836df49f273865e522c55b4d5e453be84c1ed5e8276a17f82b03e74521df3"} Dec 11 08:16:02 crc kubenswrapper[4881]: I1211 08:16:02.093978 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"6db11aa4e0b3067beae81e57c640307f34554f8a692c588951dab34fc753921d"} Dec 11 08:16:02 crc kubenswrapper[4881]: I1211 08:16:02.094073 4881 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 11 08:16:02 crc kubenswrapper[4881]: I1211 08:16:02.094141 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 11 08:16:02 crc kubenswrapper[4881]: I1211 08:16:02.095171 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:02 crc kubenswrapper[4881]: I1211 08:16:02.095206 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:02 crc kubenswrapper[4881]: I1211 08:16:02.095219 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:02 crc kubenswrapper[4881]: I1211 08:16:02.881820 4881 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.20:6443: connect: connection refused Dec 11 08:16:03 crc kubenswrapper[4881]: W1211 08:16:03.057440 4881 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.20:6443: connect: connection refused Dec 11 08:16:03 crc kubenswrapper[4881]: E1211 08:16:03.057553 4881 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.20:6443: connect: connection refused" logger="UnhandledError" Dec 11 08:16:03 crc kubenswrapper[4881]: E1211 08:16:03.091466 4881 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Dec 11 08:16:03 crc kubenswrapper[4881]: I1211 08:16:03.095243 4881 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 11 08:16:03 crc kubenswrapper[4881]: I1211 08:16:03.095400 4881 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 11 08:16:03 crc kubenswrapper[4881]: I1211 08:16:03.096100 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:03 crc kubenswrapper[4881]: I1211 08:16:03.096148 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:03 crc kubenswrapper[4881]: I1211 08:16:03.096163 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:03 crc kubenswrapper[4881]: I1211 08:16:03.096268 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:03 crc kubenswrapper[4881]: I1211 08:16:03.096313 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:03 crc kubenswrapper[4881]: I1211 08:16:03.096327 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:03 crc kubenswrapper[4881]: E1211 08:16:03.605860 4881 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": dial tcp 38.102.83.20:6443: connect: connection refused" event="&Event{ObjectMeta:{crc.18801b3164296bf9 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:Starting,Message:Starting kubelet.,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-12-11 08:15:52.877018105 +0000 UTC m=+1.254386802,LastTimestamp:2025-12-11 08:15:52.877018105 +0000 UTC m=+1.254386802,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Dec 11 08:16:03 crc kubenswrapper[4881]: I1211 08:16:03.881241 4881 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.20:6443: connect: connection refused Dec 11 08:16:04 crc kubenswrapper[4881]: I1211 08:16:04.099901 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Dec 11 08:16:04 crc kubenswrapper[4881]: I1211 08:16:04.101866 4881 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="ff7ad52ec7f42bde82d0f5b9a6ef051a56c37004bb4172dbc504afde1e5f69bf" exitCode=255 Dec 11 08:16:04 crc kubenswrapper[4881]: I1211 08:16:04.101935 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"ff7ad52ec7f42bde82d0f5b9a6ef051a56c37004bb4172dbc504afde1e5f69bf"} Dec 11 08:16:04 crc kubenswrapper[4881]: I1211 08:16:04.102120 4881 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 11 08:16:04 crc kubenswrapper[4881]: I1211 08:16:04.103165 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:04 crc kubenswrapper[4881]: I1211 08:16:04.103205 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:04 crc kubenswrapper[4881]: I1211 08:16:04.103221 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:04 crc kubenswrapper[4881]: I1211 08:16:04.104058 4881 scope.go:117] "RemoveContainer" containerID="ff7ad52ec7f42bde82d0f5b9a6ef051a56c37004bb4172dbc504afde1e5f69bf" Dec 11 08:16:04 crc kubenswrapper[4881]: I1211 08:16:04.305930 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 11 08:16:04 crc kubenswrapper[4881]: I1211 08:16:04.306320 4881 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 11 08:16:04 crc kubenswrapper[4881]: I1211 08:16:04.307858 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:04 crc kubenswrapper[4881]: I1211 08:16:04.307914 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:04 crc kubenswrapper[4881]: I1211 08:16:04.307926 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:05 crc kubenswrapper[4881]: I1211 08:16:05.106809 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Dec 11 08:16:05 crc kubenswrapper[4881]: I1211 08:16:05.108398 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"204b876ced5657df64e1a6074ba7ddbb3e286df0e12630d709ff64d2905b8487"} Dec 11 08:16:05 crc kubenswrapper[4881]: I1211 08:16:05.108549 4881 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 11 08:16:05 crc kubenswrapper[4881]: I1211 08:16:05.109277 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:05 crc kubenswrapper[4881]: I1211 08:16:05.109322 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:05 crc kubenswrapper[4881]: I1211 08:16:05.109362 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:05 crc kubenswrapper[4881]: I1211 08:16:05.219587 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 11 08:16:05 crc kubenswrapper[4881]: I1211 08:16:05.802190 4881 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 11 08:16:05 crc kubenswrapper[4881]: I1211 08:16:05.804525 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:05 crc kubenswrapper[4881]: I1211 08:16:05.804579 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:05 crc kubenswrapper[4881]: I1211 08:16:05.804593 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:05 crc kubenswrapper[4881]: I1211 08:16:05.804654 4881 kubelet_node_status.go:76] "Attempting to register node" node="crc" Dec 11 08:16:06 crc kubenswrapper[4881]: I1211 08:16:06.111857 4881 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 11 08:16:06 crc kubenswrapper[4881]: I1211 08:16:06.111873 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 11 08:16:06 crc kubenswrapper[4881]: I1211 08:16:06.113210 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:06 crc kubenswrapper[4881]: I1211 08:16:06.113276 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:06 crc kubenswrapper[4881]: I1211 08:16:06.113290 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:06 crc kubenswrapper[4881]: I1211 08:16:06.553577 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-etcd/etcd-crc" Dec 11 08:16:06 crc kubenswrapper[4881]: I1211 08:16:06.553848 4881 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 11 08:16:06 crc kubenswrapper[4881]: I1211 08:16:06.555534 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:06 crc kubenswrapper[4881]: I1211 08:16:06.555597 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:06 crc kubenswrapper[4881]: I1211 08:16:06.555609 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:07 crc kubenswrapper[4881]: I1211 08:16:07.114196 4881 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 11 08:16:07 crc kubenswrapper[4881]: I1211 08:16:07.115570 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:07 crc kubenswrapper[4881]: I1211 08:16:07.115636 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:07 crc kubenswrapper[4881]: I1211 08:16:07.115704 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:08 crc kubenswrapper[4881]: I1211 08:16:08.231948 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 11 08:16:08 crc kubenswrapper[4881]: I1211 08:16:08.232141 4881 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 11 08:16:08 crc kubenswrapper[4881]: I1211 08:16:08.233691 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:08 crc kubenswrapper[4881]: I1211 08:16:08.233737 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:08 crc kubenswrapper[4881]: I1211 08:16:08.233750 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:08 crc kubenswrapper[4881]: I1211 08:16:08.244876 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 11 08:16:09 crc kubenswrapper[4881]: I1211 08:16:09.120437 4881 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 11 08:16:09 crc kubenswrapper[4881]: I1211 08:16:09.121830 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:09 crc kubenswrapper[4881]: I1211 08:16:09.121897 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:09 crc kubenswrapper[4881]: I1211 08:16:09.121923 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:10 crc kubenswrapper[4881]: I1211 08:16:10.000293 4881 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/cluster-policy-controller namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Dec 11 08:16:10 crc kubenswrapper[4881]: I1211 08:16:10.001028 4881 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="cluster-policy-controller" probeResult="failure" output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Dec 11 08:16:10 crc kubenswrapper[4881]: I1211 08:16:10.741269 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-etcd/etcd-crc" Dec 11 08:16:10 crc kubenswrapper[4881]: I1211 08:16:10.741739 4881 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 11 08:16:10 crc kubenswrapper[4881]: I1211 08:16:10.742923 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:10 crc kubenswrapper[4881]: I1211 08:16:10.742976 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:10 crc kubenswrapper[4881]: I1211 08:16:10.742990 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:10 crc kubenswrapper[4881]: I1211 08:16:10.767264 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-etcd/etcd-crc" Dec 11 08:16:11 crc kubenswrapper[4881]: I1211 08:16:11.125397 4881 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 11 08:16:11 crc kubenswrapper[4881]: I1211 08:16:11.126220 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:11 crc kubenswrapper[4881]: I1211 08:16:11.126243 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:11 crc kubenswrapper[4881]: I1211 08:16:11.126252 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:11 crc kubenswrapper[4881]: I1211 08:16:11.142730 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-etcd/etcd-crc" Dec 11 08:16:11 crc kubenswrapper[4881]: I1211 08:16:11.391750 4881 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 403" start-of-body={"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/livez\"","reason":"Forbidden","details":{},"code":403} Dec 11 08:16:11 crc kubenswrapper[4881]: I1211 08:16:11.391823 4881 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 403" Dec 11 08:16:11 crc kubenswrapper[4881]: I1211 08:16:11.707957 4881 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[+]ping ok Dec 11 08:16:11 crc kubenswrapper[4881]: [+]log ok Dec 11 08:16:11 crc kubenswrapper[4881]: [+]etcd ok Dec 11 08:16:11 crc kubenswrapper[4881]: [+]poststarthook/quota.openshift.io-clusterquotamapping ok Dec 11 08:16:11 crc kubenswrapper[4881]: [+]poststarthook/openshift.io-api-request-count-filter ok Dec 11 08:16:11 crc kubenswrapper[4881]: [+]poststarthook/openshift.io-startkubeinformers ok Dec 11 08:16:11 crc kubenswrapper[4881]: [+]poststarthook/openshift.io-openshift-apiserver-reachable ok Dec 11 08:16:11 crc kubenswrapper[4881]: [+]poststarthook/openshift.io-oauth-apiserver-reachable ok Dec 11 08:16:11 crc kubenswrapper[4881]: [+]poststarthook/start-apiserver-admission-initializer ok Dec 11 08:16:11 crc kubenswrapper[4881]: [+]poststarthook/generic-apiserver-start-informers ok Dec 11 08:16:11 crc kubenswrapper[4881]: [+]poststarthook/priority-and-fairness-config-consumer ok Dec 11 08:16:11 crc kubenswrapper[4881]: [+]poststarthook/priority-and-fairness-filter ok Dec 11 08:16:11 crc kubenswrapper[4881]: [+]poststarthook/storage-object-count-tracker-hook ok Dec 11 08:16:11 crc kubenswrapper[4881]: [+]poststarthook/start-apiextensions-informers ok Dec 11 08:16:11 crc kubenswrapper[4881]: [+]poststarthook/start-apiextensions-controllers ok Dec 11 08:16:11 crc kubenswrapper[4881]: [+]poststarthook/crd-informer-synced ok Dec 11 08:16:11 crc kubenswrapper[4881]: [+]poststarthook/start-system-namespaces-controller ok Dec 11 08:16:11 crc kubenswrapper[4881]: [+]poststarthook/start-cluster-authentication-info-controller ok Dec 11 08:16:11 crc kubenswrapper[4881]: [+]poststarthook/start-kube-apiserver-identity-lease-controller ok Dec 11 08:16:11 crc kubenswrapper[4881]: [+]poststarthook/start-kube-apiserver-identity-lease-garbage-collector ok Dec 11 08:16:11 crc kubenswrapper[4881]: [+]poststarthook/start-legacy-token-tracking-controller ok Dec 11 08:16:11 crc kubenswrapper[4881]: [+]poststarthook/start-service-ip-repair-controllers ok Dec 11 08:16:11 crc kubenswrapper[4881]: [-]poststarthook/rbac/bootstrap-roles failed: reason withheld Dec 11 08:16:11 crc kubenswrapper[4881]: [-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld Dec 11 08:16:11 crc kubenswrapper[4881]: [+]poststarthook/priority-and-fairness-config-producer ok Dec 11 08:16:11 crc kubenswrapper[4881]: [+]poststarthook/bootstrap-controller ok Dec 11 08:16:11 crc kubenswrapper[4881]: [+]poststarthook/aggregator-reload-proxy-client-cert ok Dec 11 08:16:11 crc kubenswrapper[4881]: [+]poststarthook/start-kube-aggregator-informers ok Dec 11 08:16:11 crc kubenswrapper[4881]: [+]poststarthook/apiservice-status-local-available-controller ok Dec 11 08:16:11 crc kubenswrapper[4881]: [+]poststarthook/apiservice-status-remote-available-controller ok Dec 11 08:16:11 crc kubenswrapper[4881]: [+]poststarthook/apiservice-registration-controller ok Dec 11 08:16:11 crc kubenswrapper[4881]: [+]poststarthook/apiservice-wait-for-first-sync ok Dec 11 08:16:11 crc kubenswrapper[4881]: [+]poststarthook/apiservice-discovery-controller ok Dec 11 08:16:11 crc kubenswrapper[4881]: [+]poststarthook/kube-apiserver-autoregistration ok Dec 11 08:16:11 crc kubenswrapper[4881]: [+]autoregister-completion ok Dec 11 08:16:11 crc kubenswrapper[4881]: [+]poststarthook/apiservice-openapi-controller ok Dec 11 08:16:11 crc kubenswrapper[4881]: [+]poststarthook/apiservice-openapiv3-controller ok Dec 11 08:16:11 crc kubenswrapper[4881]: livez check failed Dec 11 08:16:11 crc kubenswrapper[4881]: I1211 08:16:11.708095 4881 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 11 08:16:12 crc kubenswrapper[4881]: I1211 08:16:12.128052 4881 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 11 08:16:12 crc kubenswrapper[4881]: I1211 08:16:12.129010 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:12 crc kubenswrapper[4881]: I1211 08:16:12.129056 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:12 crc kubenswrapper[4881]: I1211 08:16:12.129069 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:13 crc kubenswrapper[4881]: E1211 08:16:13.091617 4881 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Dec 11 08:16:16 crc kubenswrapper[4881]: E1211 08:16:16.403257 4881 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": context deadline exceeded" interval="7s" Dec 11 08:16:16 crc kubenswrapper[4881]: I1211 08:16:16.408671 4881 reflector.go:368] Caches populated for *v1.CSIDriver from k8s.io/client-go/informers/factory.go:160 Dec 11 08:16:16 crc kubenswrapper[4881]: I1211 08:16:16.408704 4881 reflector.go:368] Caches populated for *v1.Node from k8s.io/client-go/informers/factory.go:160 Dec 11 08:16:16 crc kubenswrapper[4881]: I1211 08:16:16.409165 4881 reconstruct.go:205] "DevicePaths of reconstructed volumes updated" Dec 11 08:16:16 crc kubenswrapper[4881]: E1211 08:16:16.410667 4881 kubelet_node_status.go:99] "Unable to register node with API server" err="nodes \"crc\" is forbidden: autoscaling.openshift.io/ManagedNode infra config cache not synchronized" node="crc" Dec 11 08:16:16 crc kubenswrapper[4881]: I1211 08:16:16.413589 4881 reflector.go:368] Caches populated for *v1.Service from k8s.io/client-go/informers/factory.go:160 Dec 11 08:16:16 crc kubenswrapper[4881]: I1211 08:16:16.699146 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 11 08:16:16 crc kubenswrapper[4881]: I1211 08:16:16.703323 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 11 08:16:16 crc kubenswrapper[4881]: I1211 08:16:16.884283 4881 apiserver.go:52] "Watching apiserver" Dec 11 08:16:16 crc kubenswrapper[4881]: I1211 08:16:16.896991 4881 reflector.go:368] Caches populated for *v1.Pod from pkg/kubelet/config/apiserver.go:66 Dec 11 08:16:16 crc kubenswrapper[4881]: I1211 08:16:16.897511 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-network-diagnostics/network-check-target-xd92c","openshift-network-node-identity/network-node-identity-vrzqb","openshift-network-operator/iptables-alerter-4ln5h","openshift-network-operator/network-operator-58b4c7f79c-55gtf","openshift-kube-apiserver/kube-apiserver-crc","openshift-network-console/networking-console-plugin-85b44fc459-gdk6g","openshift-network-diagnostics/network-check-source-55646444c4-trplf"] Dec 11 08:16:16 crc kubenswrapper[4881]: I1211 08:16:16.897938 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Dec 11 08:16:16 crc kubenswrapper[4881]: I1211 08:16:16.898429 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 11 08:16:16 crc kubenswrapper[4881]: I1211 08:16:16.898455 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 11 08:16:16 crc kubenswrapper[4881]: E1211 08:16:16.898710 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 11 08:16:16 crc kubenswrapper[4881]: I1211 08:16:16.898799 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-node-identity/network-node-identity-vrzqb" Dec 11 08:16:16 crc kubenswrapper[4881]: I1211 08:16:16.898802 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/iptables-alerter-4ln5h" Dec 11 08:16:16 crc kubenswrapper[4881]: E1211 08:16:16.898882 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 11 08:16:16 crc kubenswrapper[4881]: I1211 08:16:16.899642 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 11 08:16:16 crc kubenswrapper[4881]: E1211 08:16:16.899760 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 11 08:16:16 crc kubenswrapper[4881]: I1211 08:16:16.902991 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"env-overrides" Dec 11 08:16:16 crc kubenswrapper[4881]: I1211 08:16:16.903030 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"openshift-service-ca.crt" Dec 11 08:16:16 crc kubenswrapper[4881]: I1211 08:16:16.903260 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-operator"/"metrics-tls" Dec 11 08:16:16 crc kubenswrapper[4881]: I1211 08:16:16.904240 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"ovnkube-identity-cm" Dec 11 08:16:16 crc kubenswrapper[4881]: I1211 08:16:16.904368 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"kube-root-ca.crt" Dec 11 08:16:16 crc kubenswrapper[4881]: I1211 08:16:16.904618 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"openshift-service-ca.crt" Dec 11 08:16:16 crc kubenswrapper[4881]: I1211 08:16:16.904647 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"kube-root-ca.crt" Dec 11 08:16:16 crc kubenswrapper[4881]: I1211 08:16:16.906422 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-node-identity"/"network-node-identity-cert" Dec 11 08:16:16 crc kubenswrapper[4881]: I1211 08:16:16.908258 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"iptables-alerter-script" Dec 11 08:16:16 crc kubenswrapper[4881]: I1211 08:16:16.950270 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 11 08:16:16 crc kubenswrapper[4881]: I1211 08:16:16.952279 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 11 08:16:16 crc kubenswrapper[4881]: I1211 08:16:16.966198 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 11 08:16:16 crc kubenswrapper[4881]: I1211 08:16:16.983079 4881 desired_state_of_world_populator.go:154] "Finished populating initial desired state of world" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.003957 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.011567 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.013054 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.013100 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w9rds\" (UniqueName: \"kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds\") pod \"20b0d48f-5fd6-431c-a545-e3c800c7b866\" (UID: \"20b0d48f-5fd6-431c-a545-e3c800c7b866\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.013123 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.013145 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.013161 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.013179 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.013194 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-249nr\" (UniqueName: \"kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.013215 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls\") pod \"6731426b-95fe-49ff-bb5f-40441049fde2\" (UID: \"6731426b-95fe-49ff-bb5f-40441049fde2\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.013231 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.013244 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.013259 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2d4wz\" (UniqueName: \"kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.013277 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.013290 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls\") pod \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\" (UID: \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.013309 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.013326 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.013362 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2w9zh\" (UniqueName: \"kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.013387 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qs4fp\" (UniqueName: \"kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.013403 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.013419 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lz9wn\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.013436 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sb6h7\" (UniqueName: \"kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.013451 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.013466 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.013485 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.013501 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcphl\" (UniqueName: \"kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.013518 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.013514 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.013540 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4d4hj\" (UniqueName: \"kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj\") pod \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\" (UID: \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.013604 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.013635 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.013658 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.013683 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lzf88\" (UniqueName: \"kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.013709 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qg5z5\" (UniqueName: \"kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.013731 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.013755 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.013777 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls\") pod \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\" (UID: \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.013800 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.013846 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj" (OuterVolumeSpecName: "kube-api-access-4d4hj") pod "3ab1a177-2de0-46d9-b765-d0d0649bb42e" (UID: "3ab1a177-2de0-46d9-b765-d0d0649bb42e"). InnerVolumeSpecName "kube-api-access-4d4hj". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.013870 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.013895 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.013918 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.013948 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.013984 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jkwtn\" (UniqueName: \"kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn\") pod \"5b88f790-22fa-440e-b583-365168c0b23d\" (UID: \"5b88f790-22fa-440e-b583-365168c0b23d\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.014007 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds" (OuterVolumeSpecName: "kube-api-access-w9rds") pod "20b0d48f-5fd6-431c-a545-e3c800c7b866" (UID: "20b0d48f-5fd6-431c-a545-e3c800c7b866"). InnerVolumeSpecName "kube-api-access-w9rds". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.014017 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.014045 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.014071 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.014097 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.014127 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.014158 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vt5rc\" (UniqueName: \"kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc\") pod \"44663579-783b-4372-86d6-acf235a62d72\" (UID: \"44663579-783b-4372-86d6-acf235a62d72\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.014182 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.014204 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.014226 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.014248 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.014271 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.014293 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x4zgh\" (UniqueName: \"kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.014315 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.014369 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6g6sz\" (UniqueName: \"kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.014390 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.014425 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.014446 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.014467 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.014488 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.014510 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.014532 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.014555 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.014576 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.014678 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config" (OuterVolumeSpecName: "mcc-auth-proxy-config") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "mcc-auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.014691 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.014729 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.014780 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-279lb\" (UniqueName: \"kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.014802 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.014826 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.014840 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.014849 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ngvvp\" (UniqueName: \"kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.014874 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.014896 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.014918 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.014938 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.014964 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.014974 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.014986 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.015013 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.015036 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tk88c\" (UniqueName: \"kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.015059 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.015079 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.015108 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.015131 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert\") pod \"20b0d48f-5fd6-431c-a545-e3c800c7b866\" (UID: \"20b0d48f-5fd6-431c-a545-e3c800c7b866\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.015153 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.015181 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.015213 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.015246 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.015271 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w7l8j\" (UniqueName: \"kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.015295 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zgdk5\" (UniqueName: \"kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.015303 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config" (OuterVolumeSpecName: "config") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.015318 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.015366 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x7zkh\" (UniqueName: \"kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh\") pod \"6731426b-95fe-49ff-bb5f-40441049fde2\" (UID: \"6731426b-95fe-49ff-bb5f-40441049fde2\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.015392 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.015415 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8tdtz\" (UniqueName: \"kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.015437 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6ccd8\" (UniqueName: \"kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.015453 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr" (OuterVolumeSpecName: "kube-api-access-249nr") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "kube-api-access-249nr". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.015459 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s4n52\" (UniqueName: \"kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.015485 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dbsvg\" (UniqueName: \"kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.015532 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.015557 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.015581 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d4lsv\" (UniqueName: \"kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.015590 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls" (OuterVolumeSpecName: "control-plane-machine-set-operator-tls") pod "6731426b-95fe-49ff-bb5f-40441049fde2" (UID: "6731426b-95fe-49ff-bb5f-40441049fde2"). InnerVolumeSpecName "control-plane-machine-set-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.015604 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.015628 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.015651 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gf66m\" (UniqueName: \"kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m\") pod \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\" (UID: \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.015675 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v47cf\" (UniqueName: \"kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.015699 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.015720 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs" (OuterVolumeSpecName: "certs") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.015721 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.015756 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pjr6v\" (UniqueName: \"kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v\") pod \"49ef4625-1d3a-4a9f-b595-c2433d32326d\" (UID: \"49ef4625-1d3a-4a9f-b595-c2433d32326d\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.015777 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.015794 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcgwh\" (UniqueName: \"kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.015812 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.015842 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.015858 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7c4vf\" (UniqueName: \"kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.015877 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9xfj7\" (UniqueName: \"kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.015895 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nzwt7\" (UniqueName: \"kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7\") pod \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\" (UID: \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.015910 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.015926 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.015942 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w4xd4\" (UniqueName: \"kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.015958 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.015973 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.015990 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs\") pod \"5b88f790-22fa-440e-b583-365168c0b23d\" (UID: \"5b88f790-22fa-440e-b583-365168c0b23d\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.016013 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.016040 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bf2bz\" (UniqueName: \"kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.016057 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.016084 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.016107 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs\") pod \"efdd0498-1daa-4136-9a4a-3b948c2293fc\" (UID: \"efdd0498-1daa-4136-9a4a-3b948c2293fc\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.016130 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.016156 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.016173 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.016190 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cfbct\" (UniqueName: \"kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.016239 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.016258 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.016275 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.016293 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.016312 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.016328 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.016363 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kfwg7\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.016379 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.016395 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.016412 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.016439 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.016457 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jhbk2\" (UniqueName: \"kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2\") pod \"bd23aa5c-e532-4e53-bccf-e79f130c5ae8\" (UID: \"bd23aa5c-e532-4e53-bccf-e79f130c5ae8\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.016500 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.016516 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.016531 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.016546 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.016563 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.016578 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.016594 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x2m85\" (UniqueName: \"kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85\") pod \"cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d\" (UID: \"cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.016592 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config" (OuterVolumeSpecName: "config") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.016611 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.016628 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.016662 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.016680 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mg5zb\" (UniqueName: \"kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.016696 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mnrrd\" (UniqueName: \"kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.016712 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.016730 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zkvpv\" (UniqueName: \"kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.016747 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.016763 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert\") pod \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\" (UID: \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.016780 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rnphk\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.016797 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.016812 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.016829 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.016845 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.016861 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.016877 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wxkg8\" (UniqueName: \"kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8\") pod \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\" (UID: \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.016894 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.016910 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pj782\" (UniqueName: \"kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.016925 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.016942 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca\") pod \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\" (UID: \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.016958 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.016977 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.016993 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.017011 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.017029 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.017046 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pcxfs\" (UniqueName: \"kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.017061 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.017077 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.017092 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.017107 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.017123 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.017139 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.017156 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fcqwp\" (UniqueName: \"kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.017174 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.017191 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.017207 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fqsjt\" (UniqueName: \"kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt\") pod \"efdd0498-1daa-4136-9a4a-3b948c2293fc\" (UID: \"efdd0498-1daa-4136-9a4a-3b948c2293fc\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.017224 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.017240 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.017258 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.017258 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config" (OuterVolumeSpecName: "config") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.017276 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.017294 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.017311 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d6qdx\" (UniqueName: \"kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.017329 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.017911 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.017941 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.017967 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.017995 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.018021 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-htfz6\" (UniqueName: \"kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.018046 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.018071 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.018135 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rdwmf\" (UniqueName: \"kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.018166 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rczfb\" (UniqueName: \"kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.018196 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.018224 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.018249 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.018288 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.018319 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.018366 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.018392 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.018419 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"iptables-alerter-script\" (UniqueName: \"kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.018447 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.018480 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2kz5\" (UniqueName: \"kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.018506 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.018530 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-identity-cm\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.018608 4881 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.018624 4881 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.018639 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.018655 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w9rds\" (UniqueName: \"kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.018671 4881 reconciler_common.go:293] "Volume detached for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.018686 4881 reconciler_common.go:293] "Volume detached for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.018700 4881 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.018713 4881 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.018728 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-249nr\" (UniqueName: \"kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.018741 4881 reconciler_common.go:293] "Volume detached for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.018755 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4d4hj\" (UniqueName: \"kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.018769 4881 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.024671 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.026517 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-identity-cm\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.017803 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config" (OuterVolumeSpecName: "multus-daemon-config") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "multus-daemon-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.018414 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca" (OuterVolumeSpecName: "service-ca") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.018724 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88" (OuterVolumeSpecName: "kube-api-access-lzf88") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "kube-api-access-lzf88". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.018975 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5" (OuterVolumeSpecName: "kube-api-access-qg5z5") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "kube-api-access-qg5z5". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.019530 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca" (OuterVolumeSpecName: "v4-0-config-system-service-ca") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.019866 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.020071 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key" (OuterVolumeSpecName: "signing-key") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "signing-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.020155 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp" (OuterVolumeSpecName: "kube-api-access-ngvvp") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "kube-api-access-ngvvp". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: E1211 08:16:17.020413 4881 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-11 08:16:17.520362153 +0000 UTC m=+25.897730910 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.027817 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert" (OuterVolumeSpecName: "package-server-manager-serving-cert") pod "3ab1a177-2de0-46d9-b765-d0d0649bb42e" (UID: "3ab1a177-2de0-46d9-b765-d0d0649bb42e"). InnerVolumeSpecName "package-server-manager-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.027882 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config" (OuterVolumeSpecName: "mcd-auth-proxy-config") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "mcd-auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.029093 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates" (OuterVolumeSpecName: "registry-certificates") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "registry-certificates". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.029498 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl" (OuterVolumeSpecName: "kube-api-access-xcphl") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "kube-api-access-xcphl". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.029818 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.029855 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.030128 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz" (OuterVolumeSpecName: "kube-api-access-2d4wz") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "kube-api-access-2d4wz". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.030237 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.030416 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.030840 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.032651 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs" (OuterVolumeSpecName: "metrics-certs") pod "5b88f790-22fa-440e-b583-365168c0b23d" (UID: "5b88f790-22fa-440e-b583-365168c0b23d"). InnerVolumeSpecName "metrics-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.033031 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data" (OuterVolumeSpecName: "v4-0-config-user-idp-0-file-data") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-idp-0-file-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.033038 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.033184 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.033605 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets" (OuterVolumeSpecName: "installation-pull-secrets") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "installation-pull-secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.033756 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca" (OuterVolumeSpecName: "image-import-ca") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "image-import-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.033982 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx" (OuterVolumeSpecName: "kube-api-access-d6qdx") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "kube-api-access-d6qdx". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.034108 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config" (OuterVolumeSpecName: "config") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.034280 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.020434 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6" (OuterVolumeSpecName: "kube-api-access-htfz6") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "kube-api-access-htfz6". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.020529 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782" (OuterVolumeSpecName: "kube-api-access-pj782") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "kube-api-access-pj782". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.020829 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.034796 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.020819 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk" (OuterVolumeSpecName: "kube-api-access-rnphk") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "kube-api-access-rnphk". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.035128 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs" (OuterVolumeSpecName: "tmpfs") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "tmpfs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.020981 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca" (OuterVolumeSpecName: "etcd-ca") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.021120 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb" (OuterVolumeSpecName: "kube-api-access-mg5zb") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "kube-api-access-mg5zb". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.021186 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities" (OuterVolumeSpecName: "utilities") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.021421 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd" (OuterVolumeSpecName: "kube-api-access-mnrrd") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "kube-api-access-mnrrd". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.021455 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config" (OuterVolumeSpecName: "config") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.035472 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.021500 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.021505 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz" (OuterVolumeSpecName: "kube-api-access-6g6sz") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "kube-api-access-6g6sz". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.035564 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5" (OuterVolumeSpecName: "kube-api-access-zgdk5") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "kube-api-access-zgdk5". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.021912 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.021947 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt" (OuterVolumeSpecName: "kube-api-access-fqsjt") pod "efdd0498-1daa-4136-9a4a-3b948c2293fc" (UID: "efdd0498-1daa-4136-9a4a-3b948c2293fc"). InnerVolumeSpecName "kube-api-access-fqsjt". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.022149 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config" (OuterVolumeSpecName: "config") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.022247 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template" (OuterVolumeSpecName: "v4-0-config-system-ocp-branding-template") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-ocp-branding-template". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.022566 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv" (OuterVolumeSpecName: "kube-api-access-zkvpv") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "kube-api-access-zkvpv". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.023728 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle" (OuterVolumeSpecName: "service-ca-bundle") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "service-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.024290 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images" (OuterVolumeSpecName: "images") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "images". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.024320 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca" (OuterVolumeSpecName: "etcd-service-ca") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.024033 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle" (OuterVolumeSpecName: "service-ca-bundle") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "service-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.024806 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.024904 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh" (OuterVolumeSpecName: "kube-api-access-x4zgh") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "kube-api-access-x4zgh". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.025047 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.025086 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j" (OuterVolumeSpecName: "kube-api-access-w7l8j") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "kube-api-access-w7l8j". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.025103 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config" (OuterVolumeSpecName: "auth-proxy-config") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.025357 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls" (OuterVolumeSpecName: "registry-tls") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "registry-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.025411 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls" (OuterVolumeSpecName: "samples-operator-tls") pod "a0128f3a-b052-44ed-a84e-c4c8aaf17c13" (UID: "a0128f3a-b052-44ed-a84e-c4c8aaf17c13"). InnerVolumeSpecName "samples-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.025450 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.025529 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8" (OuterVolumeSpecName: "kube-api-access-wxkg8") pod "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" (UID: "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59"). InnerVolumeSpecName "kube-api-access-wxkg8". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.025935 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca" (OuterVolumeSpecName: "etcd-serving-ca") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "etcd-serving-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.026122 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.026240 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert" (OuterVolumeSpecName: "srv-cert") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "srv-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.026492 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.027132 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert" (OuterVolumeSpecName: "ovn-node-metrics-cert") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovn-node-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.027160 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.027451 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert" (OuterVolumeSpecName: "v4-0-config-system-serving-cert") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.027492 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb" (OuterVolumeSpecName: "kube-api-access-279lb") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "kube-api-access-279lb". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.027526 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca" (OuterVolumeSpecName: "etcd-serving-ca") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "etcd-serving-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.035787 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh" (OuterVolumeSpecName: "kube-api-access-xcgwh") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "kube-api-access-xcgwh". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: E1211 08:16:17.035904 4881 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.035995 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config" (OuterVolumeSpecName: "config") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: E1211 08:16:17.036075 4881 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-12-11 08:16:17.536049249 +0000 UTC m=+25.913417966 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.036258 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection" (OuterVolumeSpecName: "v4-0-config-user-template-provider-selection") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-provider-selection". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.036596 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.036613 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs" (OuterVolumeSpecName: "metrics-certs") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "metrics-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.036725 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.036775 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.036796 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.036935 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.037362 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.037624 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf" (OuterVolumeSpecName: "kube-api-access-7c4vf") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "kube-api-access-7c4vf". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.037693 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp" (OuterVolumeSpecName: "kube-api-access-qs4fp") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "kube-api-access-qs4fp". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.037712 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz" (OuterVolumeSpecName: "kube-api-access-8tdtz") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "kube-api-access-8tdtz". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.038060 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7" (OuterVolumeSpecName: "kube-api-access-9xfj7") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "kube-api-access-9xfj7". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.038182 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager/kube-controller-manager-crc"] Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.038687 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz" (OuterVolumeSpecName: "kube-api-access-bf2bz") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "kube-api-access-bf2bz". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.038819 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth" (OuterVolumeSpecName: "stats-auth") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "stats-auth". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.039107 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52" (OuterVolumeSpecName: "kube-api-access-s4n52") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "kube-api-access-s4n52". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.039138 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images" (OuterVolumeSpecName: "images") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "images". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.039194 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca" (OuterVolumeSpecName: "client-ca") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.039462 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate" (OuterVolumeSpecName: "default-certificate") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "default-certificate". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.039558 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg" (OuterVolumeSpecName: "kube-api-access-dbsvg") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "kube-api-access-dbsvg". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.039714 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls" (OuterVolumeSpecName: "machine-api-operator-tls") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "machine-api-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.039892 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert" (OuterVolumeSpecName: "srv-cert") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "srv-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.040206 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy" (OuterVolumeSpecName: "cni-binary-copy") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "cni-binary-copy". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.040645 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs" (OuterVolumeSpecName: "webhook-certs") pod "efdd0498-1daa-4136-9a4a-3b948c2293fc" (UID: "efdd0498-1daa-4136-9a4a-3b948c2293fc"). InnerVolumeSpecName "webhook-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: E1211 08:16:17.040792 4881 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.040857 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca" (OuterVolumeSpecName: "service-ca") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: E1211 08:16:17.040887 4881 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-12-11 08:16:17.540861995 +0000 UTC m=+25.918230892 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.041214 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4" (OuterVolumeSpecName: "kube-api-access-w4xd4") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "kube-api-access-w4xd4". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.041219 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert" (OuterVolumeSpecName: "cert") pod "20b0d48f-5fd6-431c-a545-e3c800c7b866" (UID: "20b0d48f-5fd6-431c-a545-e3c800c7b866"). InnerVolumeSpecName "cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.041265 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.041551 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config" (OuterVolumeSpecName: "encryption-config") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "encryption-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.041910 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config" (OuterVolumeSpecName: "config") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.042116 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config" (OuterVolumeSpecName: "config") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.042466 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login" (OuterVolumeSpecName: "v4-0-config-user-template-login") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-login". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.042584 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config" (OuterVolumeSpecName: "auth-proxy-config") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.042670 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert" (OuterVolumeSpecName: "profile-collector-cert") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "profile-collector-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.042849 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error" (OuterVolumeSpecName: "v4-0-config-user-template-error") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-error". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.042904 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh" (OuterVolumeSpecName: "kube-api-access-2w9zh") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "kube-api-access-2w9zh". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.043046 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert" (OuterVolumeSpecName: "profile-collector-cert") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "profile-collector-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.043191 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct" (OuterVolumeSpecName: "kube-api-access-cfbct") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "kube-api-access-cfbct". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.043151 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v" (OuterVolumeSpecName: "kube-api-access-pjr6v") pod "49ef4625-1d3a-4a9f-b595-c2433d32326d" (UID: "49ef4625-1d3a-4a9f-b595-c2433d32326d"). InnerVolumeSpecName "kube-api-access-pjr6v". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.043784 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle" (OuterVolumeSpecName: "signing-cabundle") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "signing-cabundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.044041 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.044127 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"iptables-alerter-script\" (UniqueName: \"kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.044856 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.045942 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert" (OuterVolumeSpecName: "webhook-cert") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "webhook-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.046579 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config" (OuterVolumeSpecName: "config") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.046712 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.047400 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "96b93a3a-6083-4aea-8eab-fe1aa8245ad9" (UID: "96b93a3a-6083-4aea-8eab-fe1aa8245ad9"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.047448 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c" (OuterVolumeSpecName: "kube-api-access-tk88c") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "kube-api-access-tk88c". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.047619 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7" (OuterVolumeSpecName: "kube-api-access-nzwt7") pod "96b93a3a-6083-4aea-8eab-fe1aa8245ad9" (UID: "96b93a3a-6083-4aea-8eab-fe1aa8245ad9"). InnerVolumeSpecName "kube-api-access-nzwt7". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.047654 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics" (OuterVolumeSpecName: "marketplace-operator-metrics") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "marketplace-operator-metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.048463 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config" (OuterVolumeSpecName: "config") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.048962 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert" (OuterVolumeSpecName: "apiservice-cert") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "apiservice-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.049235 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8" (OuterVolumeSpecName: "kube-api-access-6ccd8") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "kube-api-access-6ccd8". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.049828 4881 swap_util.go:74] "error creating dir to test if tmpfs noswap is enabled. Assuming not supported" mount path="" error="stat /var/lib/kubelet/plugins/kubernetes.io/empty-dir: no such file or directory" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.040137 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert" (OuterVolumeSpecName: "ovn-control-plane-metrics-cert") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "ovn-control-plane-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.050403 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.050435 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv" (OuterVolumeSpecName: "kube-api-access-d4lsv") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "kube-api-access-d4lsv". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.050546 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.050892 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2" (OuterVolumeSpecName: "kube-api-access-jhbk2") pod "bd23aa5c-e532-4e53-bccf-e79f130c5ae8" (UID: "bd23aa5c-e532-4e53-bccf-e79f130c5ae8"). InnerVolumeSpecName "kube-api-access-jhbk2". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.051127 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.051547 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit" (OuterVolumeSpecName: "audit") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "audit". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.051989 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.052457 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist" (OuterVolumeSpecName: "cni-sysctl-allowlist") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "cni-sysctl-allowlist". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.053059 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config" (OuterVolumeSpecName: "encryption-config") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "encryption-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.053170 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs" (OuterVolumeSpecName: "kube-api-access-pcxfs") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "kube-api-access-pcxfs". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.053421 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.053715 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.053984 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.054377 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle" (OuterVolumeSpecName: "v4-0-config-system-trusted-ca-bundle") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.054725 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp" (OuterVolumeSpecName: "kube-api-access-fcqwp") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "kube-api-access-fcqwp". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.056069 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config" (OuterVolumeSpecName: "config") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.058261 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.058407 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m" (OuterVolumeSpecName: "kube-api-access-gf66m") pod "a0128f3a-b052-44ed-a84e-c4c8aaf17c13" (UID: "a0128f3a-b052-44ed-a84e-c4c8aaf17c13"). InnerVolumeSpecName "kube-api-access-gf66m". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.058636 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf" (OuterVolumeSpecName: "kube-api-access-v47cf") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "kube-api-access-v47cf". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.058755 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.060299 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn" (OuterVolumeSpecName: "kube-api-access-jkwtn") pod "5b88f790-22fa-440e-b583-365168c0b23d" (UID: "5b88f790-22fa-440e-b583-365168c0b23d"). InnerVolumeSpecName "kube-api-access-jkwtn". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.060825 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"712c62b1-8ccd-4aa3-bcf9-678e361454ec\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f4bc17ebdc46bdf12b24515e7a055043646bcea98fe47ea7f5c44e679d02bde9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://daf982782f40db65e6a33e1e1bcfd01493d5a385c52013546d38d921ac4dcad3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3ceeda38e245a549205af6a7f2468c682ca1420574f2d3b375dfba7d93a3c669\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://204b876ced5657df64e1a6074ba7ddbb3e286df0e12630d709ff64d2905b8487\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ff7ad52ec7f42bde82d0f5b9a6ef051a56c37004bb4172dbc504afde1e5f69bf\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-11T08:16:03Z\\\",\\\"message\\\":\\\"W1211 08:16:02.420586 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1211 08:16:02.420959 1 crypto.go:601] Generating new CA for check-endpoints-signer@1765440962 cert, and key in /tmp/serving-cert-2611071911/serving-signer.crt, /tmp/serving-cert-2611071911/serving-signer.key\\\\nI1211 08:16:03.011861 1 observer_polling.go:159] Starting file observer\\\\nW1211 08:16:03.024183 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1211 08:16:03.024367 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1211 08:16:03.024984 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2611071911/tls.crt::/tmp/serving-cert-2611071911/tls.key\\\\\\\"\\\\nF1211 08:16:03.405647 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:00Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6735b5cd08055deeae614855e5de8a056b276c8006a16c45f2a0fce97da172cb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:57Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70fb9b6123864b3652eabbb833b18032659a0c038f4e5857e7363a3776022396\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://70fb9b6123864b3652eabbb833b18032659a0c038f4e5857e7363a3776022396\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:15:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:15:53Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.061197 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy" (OuterVolumeSpecName: "cni-binary-copy") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "cni-binary-copy". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.061250 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.061784 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.062181 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token" (OuterVolumeSpecName: "node-bootstrap-token") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "node-bootstrap-token". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.062509 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh" (OuterVolumeSpecName: "kube-api-access-x7zkh") pod "6731426b-95fe-49ff-bb5f-40441049fde2" (UID: "6731426b-95fe-49ff-bb5f-40441049fde2"). InnerVolumeSpecName "kube-api-access-x7zkh". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.062593 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities" (OuterVolumeSpecName: "utilities") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: E1211 08:16:17.062680 4881 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Dec 11 08:16:17 crc kubenswrapper[4881]: E1211 08:16:17.062715 4881 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Dec 11 08:16:17 crc kubenswrapper[4881]: E1211 08:16:17.062735 4881 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 11 08:16:17 crc kubenswrapper[4881]: E1211 08:16:17.062848 4881 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-12-11 08:16:17.562801522 +0000 UTC m=+25.940170309 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.063233 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.063636 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config" (OuterVolumeSpecName: "config") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.063982 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session" (OuterVolumeSpecName: "v4-0-config-system-session") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-session". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.064906 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config" (OuterVolumeSpecName: "config") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.064999 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca" (OuterVolumeSpecName: "serviceca") pod "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" (UID: "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59"). InnerVolumeSpecName "serviceca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.065181 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls" (OuterVolumeSpecName: "machine-approver-tls") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "machine-approver-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.065709 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn" (OuterVolumeSpecName: "kube-api-access-lz9wn") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "kube-api-access-lz9wn". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.066023 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7" (OuterVolumeSpecName: "kube-api-access-sb6h7") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "kube-api-access-sb6h7". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.066099 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities" (OuterVolumeSpecName: "utilities") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.066166 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca" (OuterVolumeSpecName: "client-ca") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.066258 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85" (OuterVolumeSpecName: "kube-api-access-x2m85") pod "cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" (UID: "cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d"). InnerVolumeSpecName "kube-api-access-x2m85". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.066704 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.066745 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates" (OuterVolumeSpecName: "available-featuregates") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "available-featuregates". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.067470 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs" (OuterVolumeSpecName: "v4-0-config-system-router-certs") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-router-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.067774 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.068833 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca" (OuterVolumeSpecName: "marketplace-trusted-ca") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "marketplace-trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.069558 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities" (OuterVolumeSpecName: "utilities") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.070428 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume" (OuterVolumeSpecName: "config-volume") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.070896 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config" (OuterVolumeSpecName: "console-config") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.076521 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.077000 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.077176 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rdwmf\" (UniqueName: \"kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Dec 11 08:16:17 crc kubenswrapper[4881]: E1211 08:16:17.077042 4881 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.082592 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7" (OuterVolumeSpecName: "kube-api-access-kfwg7") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "kube-api-access-kfwg7". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.083283 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.082125 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.081991 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig" (OuterVolumeSpecName: "v4-0-config-system-cliconfig") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-cliconfig". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.083759 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config" (OuterVolumeSpecName: "config") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: E1211 08:16:17.084565 4881 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Dec 11 08:16:17 crc kubenswrapper[4881]: E1211 08:16:17.084662 4881 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 11 08:16:17 crc kubenswrapper[4881]: E1211 08:16:17.084782 4881 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-12-11 08:16:17.58476124 +0000 UTC m=+25.962129937 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.084974 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.085141 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc" (OuterVolumeSpecName: "kube-api-access-vt5rc") pod "44663579-783b-4372-86d6-acf235a62d72" (UID: "44663579-783b-4372-86d6-acf235a62d72"). InnerVolumeSpecName "kube-api-access-vt5rc". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.085459 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.086535 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.087054 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rczfb\" (UniqueName: \"kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.087811 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls" (OuterVolumeSpecName: "image-registry-operator-tls") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "image-registry-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.088226 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib" (OuterVolumeSpecName: "ovnkube-script-lib") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovnkube-script-lib". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.088378 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.099054 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2kz5\" (UniqueName: \"kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.103106 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.105893 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.120273 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.120373 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.120465 4881 reconciler_common.go:293] "Volume detached for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.120483 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ngvvp\" (UniqueName: \"kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.120500 4881 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.120512 4881 reconciler_common.go:293] "Volume detached for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.120525 4881 reconciler_common.go:293] "Volume detached for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.120537 4881 reconciler_common.go:293] "Volume detached for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.120549 4881 reconciler_common.go:293] "Volume detached for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.120561 4881 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.120573 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tk88c\" (UniqueName: \"kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.120587 4881 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.120600 4881 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.120611 4881 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.120624 4881 reconciler_common.go:293] "Volume detached for volume \"cert\" (UniqueName: \"kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.120637 4881 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.120651 4881 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.120667 4881 reconciler_common.go:293] "Volume detached for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.120682 4881 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.120694 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w7l8j\" (UniqueName: \"kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.120707 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zgdk5\" (UniqueName: \"kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.120746 4881 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.120756 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x7zkh\" (UniqueName: \"kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.120769 4881 reconciler_common.go:293] "Volume detached for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.120778 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8tdtz\" (UniqueName: \"kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.120787 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6ccd8\" (UniqueName: \"kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.120796 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s4n52\" (UniqueName: \"kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.120805 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dbsvg\" (UniqueName: \"kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.120814 4881 reconciler_common.go:293] "Volume detached for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.120823 4881 reconciler_common.go:293] "Volume detached for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.120832 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d4lsv\" (UniqueName: \"kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.120842 4881 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.120851 4881 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.120861 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gf66m\" (UniqueName: \"kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.120871 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v47cf\" (UniqueName: \"kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.120880 4881 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.120890 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pjr6v\" (UniqueName: \"kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.120899 4881 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.120910 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcgwh\" (UniqueName: \"kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.120919 4881 reconciler_common.go:293] "Volume detached for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.120928 4881 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.120937 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7c4vf\" (UniqueName: \"kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.120948 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9xfj7\" (UniqueName: \"kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.120957 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nzwt7\" (UniqueName: \"kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.120989 4881 reconciler_common.go:293] "Volume detached for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.120998 4881 reconciler_common.go:293] "Volume detached for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.121008 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w4xd4\" (UniqueName: \"kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.121016 4881 reconciler_common.go:293] "Volume detached for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.121026 4881 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.121035 4881 reconciler_common.go:293] "Volume detached for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.121045 4881 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.121054 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bf2bz\" (UniqueName: \"kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.121063 4881 reconciler_common.go:293] "Volume detached for volume \"images\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.121074 4881 reconciler_common.go:293] "Volume detached for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.121083 4881 reconciler_common.go:293] "Volume detached for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.121093 4881 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.121103 4881 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.121111 4881 reconciler_common.go:293] "Volume detached for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.121120 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cfbct\" (UniqueName: \"kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.121130 4881 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.121139 4881 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.121149 4881 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.121159 4881 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.121169 4881 reconciler_common.go:293] "Volume detached for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.121179 4881 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.121189 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kfwg7\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.121200 4881 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.121211 4881 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.121223 4881 reconciler_common.go:293] "Volume detached for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.121233 4881 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.121248 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jhbk2\" (UniqueName: \"kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.121258 4881 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.121268 4881 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.121277 4881 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.121286 4881 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.121297 4881 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.121306 4881 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.121316 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x2m85\" (UniqueName: \"kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.121327 4881 reconciler_common.go:293] "Volume detached for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.121357 4881 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.121368 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mg5zb\" (UniqueName: \"kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.121378 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mnrrd\" (UniqueName: \"kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.121387 4881 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.121396 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zkvpv\" (UniqueName: \"kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.121407 4881 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.121417 4881 reconciler_common.go:293] "Volume detached for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.121427 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rnphk\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.121437 4881 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.121447 4881 reconciler_common.go:293] "Volume detached for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.121455 4881 reconciler_common.go:293] "Volume detached for volume \"images\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.121464 4881 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.121473 4881 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.121481 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wxkg8\" (UniqueName: \"kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.121490 4881 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.121502 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pj782\" (UniqueName: \"kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.121511 4881 reconciler_common.go:293] "Volume detached for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.121521 4881 reconciler_common.go:293] "Volume detached for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.121530 4881 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.121539 4881 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.121548 4881 reconciler_common.go:293] "Volume detached for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.121557 4881 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.121567 4881 reconciler_common.go:293] "Volume detached for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.121577 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pcxfs\" (UniqueName: \"kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.121586 4881 reconciler_common.go:293] "Volume detached for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.121596 4881 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.121605 4881 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.121613 4881 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.121623 4881 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.121633 4881 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.121643 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fcqwp\" (UniqueName: \"kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.121653 4881 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.121663 4881 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.121672 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fqsjt\" (UniqueName: \"kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.121682 4881 reconciler_common.go:293] "Volume detached for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.121691 4881 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.121704 4881 reconciler_common.go:293] "Volume detached for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.121713 4881 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.121724 4881 reconciler_common.go:293] "Volume detached for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.121736 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d6qdx\" (UniqueName: \"kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.121748 4881 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.121760 4881 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.121771 4881 reconciler_common.go:293] "Volume detached for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.121780 4881 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.121788 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.121797 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-htfz6\" (UniqueName: \"kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.121807 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.121818 4881 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.121829 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.121841 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2d4wz\" (UniqueName: \"kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.121852 4881 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.121864 4881 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.121873 4881 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.121881 4881 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.121894 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2w9zh\" (UniqueName: \"kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.121904 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qs4fp\" (UniqueName: \"kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.121914 4881 reconciler_common.go:293] "Volume detached for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.121926 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lz9wn\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.121937 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sb6h7\" (UniqueName: \"kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.121949 4881 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.121960 4881 reconciler_common.go:293] "Volume detached for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.121970 4881 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.121977 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcphl\" (UniqueName: \"kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.121986 4881 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.121994 4881 reconciler_common.go:293] "Volume detached for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.122005 4881 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.122016 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lzf88\" (UniqueName: \"kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.122027 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qg5z5\" (UniqueName: \"kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.122038 4881 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.122053 4881 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.122064 4881 reconciler_common.go:293] "Volume detached for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.122075 4881 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.122083 4881 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.122093 4881 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.122113 4881 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.122125 4881 reconciler_common.go:293] "Volume detached for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.122136 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jkwtn\" (UniqueName: \"kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.122147 4881 reconciler_common.go:293] "Volume detached for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.122158 4881 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.122167 4881 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.122177 4881 reconciler_common.go:293] "Volume detached for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.122188 4881 reconciler_common.go:293] "Volume detached for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.122199 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vt5rc\" (UniqueName: \"kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.122209 4881 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.122220 4881 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.122231 4881 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.122243 4881 reconciler_common.go:293] "Volume detached for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.122254 4881 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.122265 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x4zgh\" (UniqueName: \"kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.122276 4881 reconciler_common.go:293] "Volume detached for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.122287 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6g6sz\" (UniqueName: \"kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.122298 4881 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.122309 4881 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.122322 4881 reconciler_common.go:293] "Volume detached for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.122359 4881 reconciler_common.go:293] "Volume detached for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.122371 4881 reconciler_common.go:293] "Volume detached for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.122383 4881 reconciler_common.go:293] "Volume detached for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.122394 4881 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.122405 4881 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.122417 4881 reconciler_common.go:293] "Volume detached for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.122427 4881 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.122438 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-279lb\" (UniqueName: \"kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.122450 4881 reconciler_common.go:293] "Volume detached for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.122534 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.122591 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.123034 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.124910 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted" (OuterVolumeSpecName: "ca-trust-extracted") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "ca-trust-extracted". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.132861 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.140713 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 11 08:16:17 crc kubenswrapper[4881]: E1211 08:16:17.151258 4881 kubelet.go:1929] "Failed creating a mirror pod for" err="pods \"kube-apiserver-crc\" already exists" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.152893 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.162797 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.175199 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.187431 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.196558 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.206730 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.216567 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e233ddb-7c1f-4b3d-a111-de8cdc7ccd18\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://221f8c696ca3c5ab5dc2025890b5279fd406a8dfdd4ea482bb12f0c5d304255d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3dec421d26a9373c52bf2fb9434cc3f3b373f409b1d86ec1effca7534acb7262\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7cb4176d384609eb549c9f00057256bd34053ca3eca229c20cd250b93f87f910\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://91541795f1dd997a0d977904a6a3cbeae8b66b8bb57cde8dac344d3703352e1a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:15:53Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.223298 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.224248 4881 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.224286 4881 reconciler_common.go:293] "Volume detached for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted\") on node \"crc\" DevicePath \"\"" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.233293 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/iptables-alerter-4ln5h" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.235193 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"712c62b1-8ccd-4aa3-bcf9-678e361454ec\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f4bc17ebdc46bdf12b24515e7a055043646bcea98fe47ea7f5c44e679d02bde9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://daf982782f40db65e6a33e1e1bcfd01493d5a385c52013546d38d921ac4dcad3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3ceeda38e245a549205af6a7f2468c682ca1420574f2d3b375dfba7d93a3c669\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://204b876ced5657df64e1a6074ba7ddbb3e286df0e12630d709ff64d2905b8487\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ff7ad52ec7f42bde82d0f5b9a6ef051a56c37004bb4172dbc504afde1e5f69bf\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-11T08:16:03Z\\\",\\\"message\\\":\\\"W1211 08:16:02.420586 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1211 08:16:02.420959 1 crypto.go:601] Generating new CA for check-endpoints-signer@1765440962 cert, and key in /tmp/serving-cert-2611071911/serving-signer.crt, /tmp/serving-cert-2611071911/serving-signer.key\\\\nI1211 08:16:03.011861 1 observer_polling.go:159] Starting file observer\\\\nW1211 08:16:03.024183 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1211 08:16:03.024367 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1211 08:16:03.024984 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2611071911/tls.crt::/tmp/serving-cert-2611071911/tls.key\\\\\\\"\\\\nF1211 08:16:03.405647 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:00Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6735b5cd08055deeae614855e5de8a056b276c8006a16c45f2a0fce97da172cb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:57Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70fb9b6123864b3652eabbb833b18032659a0c038f4e5857e7363a3776022396\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://70fb9b6123864b3652eabbb833b18032659a0c038f4e5857e7363a3776022396\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:15:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:15:53Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.240587 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-node-identity/network-node-identity-vrzqb" Dec 11 08:16:17 crc kubenswrapper[4881]: W1211 08:16:17.243090 4881 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod37a5e44f_9a88_4405_be8a_b645485e7312.slice/crio-996c028f04b51965b27d119a3068afc708f8fb239c25e32c46474253bfcbc16f WatchSource:0}: Error finding container 996c028f04b51965b27d119a3068afc708f8fb239c25e32c46474253bfcbc16f: Status 404 returned error can't find the container with id 996c028f04b51965b27d119a3068afc708f8fb239c25e32c46474253bfcbc16f Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.527718 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 11 08:16:17 crc kubenswrapper[4881]: E1211 08:16:17.527875 4881 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-11 08:16:18.527857991 +0000 UTC m=+26.905226688 (durationBeforeRetry 1s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.629306 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.629413 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.629442 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.629472 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 11 08:16:17 crc kubenswrapper[4881]: E1211 08:16:17.629592 4881 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Dec 11 08:16:17 crc kubenswrapper[4881]: E1211 08:16:17.629636 4881 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Dec 11 08:16:17 crc kubenswrapper[4881]: E1211 08:16:17.629651 4881 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 11 08:16:17 crc kubenswrapper[4881]: E1211 08:16:17.629716 4881 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Dec 11 08:16:17 crc kubenswrapper[4881]: E1211 08:16:17.629741 4881 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Dec 11 08:16:17 crc kubenswrapper[4881]: E1211 08:16:17.629597 4881 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Dec 11 08:16:17 crc kubenswrapper[4881]: E1211 08:16:17.629760 4881 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 11 08:16:17 crc kubenswrapper[4881]: E1211 08:16:17.629716 4881 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-12-11 08:16:18.629695167 +0000 UTC m=+27.007063864 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 11 08:16:17 crc kubenswrapper[4881]: E1211 08:16:17.629834 4881 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-12-11 08:16:18.629793169 +0000 UTC m=+27.007161876 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Dec 11 08:16:17 crc kubenswrapper[4881]: E1211 08:16:17.629862 4881 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-12-11 08:16:18.629849291 +0000 UTC m=+27.007217978 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 11 08:16:17 crc kubenswrapper[4881]: E1211 08:16:17.629610 4881 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Dec 11 08:16:17 crc kubenswrapper[4881]: E1211 08:16:17.629950 4881 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-12-11 08:16:18.629915382 +0000 UTC m=+27.007284079 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Dec 11 08:16:17 crc kubenswrapper[4881]: I1211 08:16:17.867857 4881 reflector.go:368] Caches populated for *v1.RuntimeClass from k8s.io/client-go/informers/factory.go:160 Dec 11 08:16:18 crc kubenswrapper[4881]: I1211 08:16:18.004614 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 11 08:16:18 crc kubenswrapper[4881]: E1211 08:16:18.005031 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 11 08:16:18 crc kubenswrapper[4881]: I1211 08:16:18.147903 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"6ec4e8a28a722463111eeb961e6e80d1f8ef1b776c40fc212a2ca5aaf0c43b63"} Dec 11 08:16:18 crc kubenswrapper[4881]: I1211 08:16:18.147961 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"6eff6a04afb607c6248352793d1205ae80c194a77222a234d586925a8ca8cd00"} Dec 11 08:16:18 crc kubenswrapper[4881]: I1211 08:16:18.147973 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"5f54f103a45b6cc16402b26f39c42bbb23207226430c010cd08b56c5e9dafcc8"} Dec 11 08:16:18 crc kubenswrapper[4881]: I1211 08:16:18.149465 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" event={"ID":"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49","Type":"ContainerStarted","Data":"dc618cb7ef04ea85f97cc83b8059061de6a322eb4b09c78a057b30c67eb5b034"} Dec 11 08:16:18 crc kubenswrapper[4881]: I1211 08:16:18.150840 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" event={"ID":"37a5e44f-9a88-4405-be8a-b645485e7312","Type":"ContainerStarted","Data":"02d46e407feeba96230ff44b31f513e09434cdc3378124322398587ebda11b4f"} Dec 11 08:16:18 crc kubenswrapper[4881]: I1211 08:16:18.150879 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" event={"ID":"37a5e44f-9a88-4405-be8a-b645485e7312","Type":"ContainerStarted","Data":"996c028f04b51965b27d119a3068afc708f8fb239c25e32c46474253bfcbc16f"} Dec 11 08:16:18 crc kubenswrapper[4881]: I1211 08:16:18.167602 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6ec4e8a28a722463111eeb961e6e80d1f8ef1b776c40fc212a2ca5aaf0c43b63\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6eff6a04afb607c6248352793d1205ae80c194a77222a234d586925a8ca8cd00\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:18Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:18 crc kubenswrapper[4881]: I1211 08:16:18.187145 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:18Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:18 crc kubenswrapper[4881]: I1211 08:16:18.200441 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:18Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:18 crc kubenswrapper[4881]: I1211 08:16:18.213162 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:18Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:18 crc kubenswrapper[4881]: I1211 08:16:18.223113 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:18Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:18 crc kubenswrapper[4881]: I1211 08:16:18.235047 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:18Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:18 crc kubenswrapper[4881]: I1211 08:16:18.248258 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e233ddb-7c1f-4b3d-a111-de8cdc7ccd18\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://221f8c696ca3c5ab5dc2025890b5279fd406a8dfdd4ea482bb12f0c5d304255d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3dec421d26a9373c52bf2fb9434cc3f3b373f409b1d86ec1effca7534acb7262\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7cb4176d384609eb549c9f00057256bd34053ca3eca229c20cd250b93f87f910\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://91541795f1dd997a0d977904a6a3cbeae8b66b8bb57cde8dac344d3703352e1a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:15:53Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:18Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:18 crc kubenswrapper[4881]: I1211 08:16:18.259572 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"712c62b1-8ccd-4aa3-bcf9-678e361454ec\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f4bc17ebdc46bdf12b24515e7a055043646bcea98fe47ea7f5c44e679d02bde9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://daf982782f40db65e6a33e1e1bcfd01493d5a385c52013546d38d921ac4dcad3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3ceeda38e245a549205af6a7f2468c682ca1420574f2d3b375dfba7d93a3c669\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://204b876ced5657df64e1a6074ba7ddbb3e286df0e12630d709ff64d2905b8487\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ff7ad52ec7f42bde82d0f5b9a6ef051a56c37004bb4172dbc504afde1e5f69bf\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-11T08:16:03Z\\\",\\\"message\\\":\\\"W1211 08:16:02.420586 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1211 08:16:02.420959 1 crypto.go:601] Generating new CA for check-endpoints-signer@1765440962 cert, and key in /tmp/serving-cert-2611071911/serving-signer.crt, /tmp/serving-cert-2611071911/serving-signer.key\\\\nI1211 08:16:03.011861 1 observer_polling.go:159] Starting file observer\\\\nW1211 08:16:03.024183 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1211 08:16:03.024367 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1211 08:16:03.024984 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2611071911/tls.crt::/tmp/serving-cert-2611071911/tls.key\\\\\\\"\\\\nF1211 08:16:03.405647 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:00Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6735b5cd08055deeae614855e5de8a056b276c8006a16c45f2a0fce97da172cb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:57Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70fb9b6123864b3652eabbb833b18032659a0c038f4e5857e7363a3776022396\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://70fb9b6123864b3652eabbb833b18032659a0c038f4e5857e7363a3776022396\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:15:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:15:53Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:18Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:18 crc kubenswrapper[4881]: I1211 08:16:18.272028 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://02d46e407feeba96230ff44b31f513e09434cdc3378124322398587ebda11b4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:18Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:18 crc kubenswrapper[4881]: I1211 08:16:18.289526 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:18Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:18 crc kubenswrapper[4881]: I1211 08:16:18.307603 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:18Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:18 crc kubenswrapper[4881]: I1211 08:16:18.326213 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e233ddb-7c1f-4b3d-a111-de8cdc7ccd18\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://221f8c696ca3c5ab5dc2025890b5279fd406a8dfdd4ea482bb12f0c5d304255d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3dec421d26a9373c52bf2fb9434cc3f3b373f409b1d86ec1effca7534acb7262\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7cb4176d384609eb549c9f00057256bd34053ca3eca229c20cd250b93f87f910\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://91541795f1dd997a0d977904a6a3cbeae8b66b8bb57cde8dac344d3703352e1a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:15:53Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:18Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:18 crc kubenswrapper[4881]: I1211 08:16:18.355718 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"712c62b1-8ccd-4aa3-bcf9-678e361454ec\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f4bc17ebdc46bdf12b24515e7a055043646bcea98fe47ea7f5c44e679d02bde9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://daf982782f40db65e6a33e1e1bcfd01493d5a385c52013546d38d921ac4dcad3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3ceeda38e245a549205af6a7f2468c682ca1420574f2d3b375dfba7d93a3c669\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://204b876ced5657df64e1a6074ba7ddbb3e286df0e12630d709ff64d2905b8487\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ff7ad52ec7f42bde82d0f5b9a6ef051a56c37004bb4172dbc504afde1e5f69bf\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-11T08:16:03Z\\\",\\\"message\\\":\\\"W1211 08:16:02.420586 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1211 08:16:02.420959 1 crypto.go:601] Generating new CA for check-endpoints-signer@1765440962 cert, and key in /tmp/serving-cert-2611071911/serving-signer.crt, /tmp/serving-cert-2611071911/serving-signer.key\\\\nI1211 08:16:03.011861 1 observer_polling.go:159] Starting file observer\\\\nW1211 08:16:03.024183 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1211 08:16:03.024367 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1211 08:16:03.024984 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2611071911/tls.crt::/tmp/serving-cert-2611071911/tls.key\\\\\\\"\\\\nF1211 08:16:03.405647 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:00Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6735b5cd08055deeae614855e5de8a056b276c8006a16c45f2a0fce97da172cb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:57Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70fb9b6123864b3652eabbb833b18032659a0c038f4e5857e7363a3776022396\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://70fb9b6123864b3652eabbb833b18032659a0c038f4e5857e7363a3776022396\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:15:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:15:53Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:18Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:18 crc kubenswrapper[4881]: I1211 08:16:18.379363 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6ec4e8a28a722463111eeb961e6e80d1f8ef1b776c40fc212a2ca5aaf0c43b63\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6eff6a04afb607c6248352793d1205ae80c194a77222a234d586925a8ca8cd00\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:18Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:18 crc kubenswrapper[4881]: I1211 08:16:18.390896 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:18Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:18 crc kubenswrapper[4881]: I1211 08:16:18.403871 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:18Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:18 crc kubenswrapper[4881]: I1211 08:16:18.542719 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 11 08:16:18 crc kubenswrapper[4881]: E1211 08:16:18.542953 4881 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-11 08:16:20.54291593 +0000 UTC m=+28.920284637 (durationBeforeRetry 2s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 08:16:18 crc kubenswrapper[4881]: I1211 08:16:18.644190 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 11 08:16:18 crc kubenswrapper[4881]: I1211 08:16:18.644250 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 11 08:16:18 crc kubenswrapper[4881]: I1211 08:16:18.644280 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 11 08:16:18 crc kubenswrapper[4881]: I1211 08:16:18.644302 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 11 08:16:18 crc kubenswrapper[4881]: E1211 08:16:18.644366 4881 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Dec 11 08:16:18 crc kubenswrapper[4881]: E1211 08:16:18.644392 4881 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Dec 11 08:16:18 crc kubenswrapper[4881]: E1211 08:16:18.644405 4881 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 11 08:16:18 crc kubenswrapper[4881]: E1211 08:16:18.644451 4881 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Dec 11 08:16:18 crc kubenswrapper[4881]: E1211 08:16:18.644464 4881 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-12-11 08:16:20.644445379 +0000 UTC m=+29.021814076 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 11 08:16:18 crc kubenswrapper[4881]: E1211 08:16:18.644470 4881 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Dec 11 08:16:18 crc kubenswrapper[4881]: E1211 08:16:18.644470 4881 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Dec 11 08:16:18 crc kubenswrapper[4881]: E1211 08:16:18.644450 4881 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Dec 11 08:16:18 crc kubenswrapper[4881]: E1211 08:16:18.644578 4881 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-12-11 08:16:20.644555292 +0000 UTC m=+29.021923989 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Dec 11 08:16:18 crc kubenswrapper[4881]: E1211 08:16:18.644486 4881 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 11 08:16:18 crc kubenswrapper[4881]: E1211 08:16:18.644625 4881 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-12-11 08:16:20.644599603 +0000 UTC m=+29.021968360 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Dec 11 08:16:18 crc kubenswrapper[4881]: E1211 08:16:18.644644 4881 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-12-11 08:16:20.644635903 +0000 UTC m=+29.022004690 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 11 08:16:19 crc kubenswrapper[4881]: I1211 08:16:19.005277 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 11 08:16:19 crc kubenswrapper[4881]: I1211 08:16:19.005386 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 11 08:16:19 crc kubenswrapper[4881]: E1211 08:16:19.005434 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 11 08:16:19 crc kubenswrapper[4881]: E1211 08:16:19.005540 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 11 08:16:19 crc kubenswrapper[4881]: I1211 08:16:19.011406 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="01ab3dd5-8196-46d0-ad33-122e2ca51def" path="/var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes" Dec 11 08:16:19 crc kubenswrapper[4881]: I1211 08:16:19.012174 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" path="/var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes" Dec 11 08:16:19 crc kubenswrapper[4881]: I1211 08:16:19.013044 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09efc573-dbb6-4249-bd59-9b87aba8dd28" path="/var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes" Dec 11 08:16:19 crc kubenswrapper[4881]: I1211 08:16:19.013814 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b574797-001e-440a-8f4e-c0be86edad0f" path="/var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes" Dec 11 08:16:19 crc kubenswrapper[4881]: I1211 08:16:19.015412 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b78653f-4ff9-4508-8672-245ed9b561e3" path="/var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes" Dec 11 08:16:19 crc kubenswrapper[4881]: I1211 08:16:19.015988 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1386a44e-36a2-460c-96d0-0359d2b6f0f5" path="/var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes" Dec 11 08:16:19 crc kubenswrapper[4881]: I1211 08:16:19.016776 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1bf7eb37-55a3-4c65-b768-a94c82151e69" path="/var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes" Dec 11 08:16:19 crc kubenswrapper[4881]: I1211 08:16:19.017800 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1d611f23-29be-4491-8495-bee1670e935f" path="/var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes" Dec 11 08:16:19 crc kubenswrapper[4881]: I1211 08:16:19.018410 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="20b0d48f-5fd6-431c-a545-e3c800c7b866" path="/var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/volumes" Dec 11 08:16:19 crc kubenswrapper[4881]: I1211 08:16:19.019460 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" path="/var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes" Dec 11 08:16:19 crc kubenswrapper[4881]: I1211 08:16:19.020053 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="22c825df-677d-4ca6-82db-3454ed06e783" path="/var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes" Dec 11 08:16:19 crc kubenswrapper[4881]: I1211 08:16:19.021549 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="25e176fe-21b4-4974-b1ed-c8b94f112a7f" path="/var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes" Dec 11 08:16:19 crc kubenswrapper[4881]: I1211 08:16:19.022196 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" path="/var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes" Dec 11 08:16:19 crc kubenswrapper[4881]: I1211 08:16:19.023576 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="31d8b7a1-420e-4252-a5b7-eebe8a111292" path="/var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes" Dec 11 08:16:19 crc kubenswrapper[4881]: I1211 08:16:19.024965 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3ab1a177-2de0-46d9-b765-d0d0649bb42e" path="/var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/volumes" Dec 11 08:16:19 crc kubenswrapper[4881]: I1211 08:16:19.026506 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" path="/var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes" Dec 11 08:16:19 crc kubenswrapper[4881]: I1211 08:16:19.027504 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="43509403-f426-496e-be36-56cef71462f5" path="/var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes" Dec 11 08:16:19 crc kubenswrapper[4881]: I1211 08:16:19.028005 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="44663579-783b-4372-86d6-acf235a62d72" path="/var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/volumes" Dec 11 08:16:19 crc kubenswrapper[4881]: I1211 08:16:19.029322 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="496e6271-fb68-4057-954e-a0d97a4afa3f" path="/var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes" Dec 11 08:16:19 crc kubenswrapper[4881]: I1211 08:16:19.030096 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" path="/var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes" Dec 11 08:16:19 crc kubenswrapper[4881]: I1211 08:16:19.030862 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49ef4625-1d3a-4a9f-b595-c2433d32326d" path="/var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/volumes" Dec 11 08:16:19 crc kubenswrapper[4881]: I1211 08:16:19.031923 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4bb40260-dbaa-4fb0-84df-5e680505d512" path="/var/lib/kubelet/pods/4bb40260-dbaa-4fb0-84df-5e680505d512/volumes" Dec 11 08:16:19 crc kubenswrapper[4881]: I1211 08:16:19.032449 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5225d0e4-402f-4861-b410-819f433b1803" path="/var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes" Dec 11 08:16:19 crc kubenswrapper[4881]: I1211 08:16:19.033773 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5441d097-087c-4d9a-baa8-b210afa90fc9" path="/var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes" Dec 11 08:16:19 crc kubenswrapper[4881]: I1211 08:16:19.034269 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="57a731c4-ef35-47a8-b875-bfb08a7f8011" path="/var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes" Dec 11 08:16:19 crc kubenswrapper[4881]: I1211 08:16:19.035578 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5b88f790-22fa-440e-b583-365168c0b23d" path="/var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/volumes" Dec 11 08:16:19 crc kubenswrapper[4881]: I1211 08:16:19.036560 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5fe579f8-e8a6-4643-bce5-a661393c4dde" path="/var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/volumes" Dec 11 08:16:19 crc kubenswrapper[4881]: I1211 08:16:19.037145 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6402fda4-df10-493c-b4e5-d0569419652d" path="/var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes" Dec 11 08:16:19 crc kubenswrapper[4881]: I1211 08:16:19.038297 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6509e943-70c6-444c-bc41-48a544e36fbd" path="/var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes" Dec 11 08:16:19 crc kubenswrapper[4881]: I1211 08:16:19.038999 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6731426b-95fe-49ff-bb5f-40441049fde2" path="/var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/volumes" Dec 11 08:16:19 crc kubenswrapper[4881]: I1211 08:16:19.039861 4881 kubelet_volumes.go:152] "Cleaned up orphaned volume subpath from pod" podUID="6ea678ab-3438-413e-bfe3-290ae7725660" path="/var/lib/kubelet/pods/6ea678ab-3438-413e-bfe3-290ae7725660/volume-subpaths/run-systemd/ovnkube-controller/6" Dec 11 08:16:19 crc kubenswrapper[4881]: I1211 08:16:19.039966 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6ea678ab-3438-413e-bfe3-290ae7725660" path="/var/lib/kubelet/pods/6ea678ab-3438-413e-bfe3-290ae7725660/volumes" Dec 11 08:16:19 crc kubenswrapper[4881]: I1211 08:16:19.041715 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7539238d-5fe0-46ed-884e-1c3b566537ec" path="/var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes" Dec 11 08:16:19 crc kubenswrapper[4881]: I1211 08:16:19.042701 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7583ce53-e0fe-4a16-9e4d-50516596a136" path="/var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes" Dec 11 08:16:19 crc kubenswrapper[4881]: I1211 08:16:19.043122 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7bb08738-c794-4ee8-9972-3a62ca171029" path="/var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes" Dec 11 08:16:19 crc kubenswrapper[4881]: I1211 08:16:19.044909 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="87cf06ed-a83f-41a7-828d-70653580a8cb" path="/var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes" Dec 11 08:16:19 crc kubenswrapper[4881]: I1211 08:16:19.045975 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" path="/var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes" Dec 11 08:16:19 crc kubenswrapper[4881]: I1211 08:16:19.046562 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="925f1c65-6136-48ba-85aa-3a3b50560753" path="/var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes" Dec 11 08:16:19 crc kubenswrapper[4881]: I1211 08:16:19.047194 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" path="/var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/volumes" Dec 11 08:16:19 crc kubenswrapper[4881]: I1211 08:16:19.048293 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9d4552c7-cd75-42dd-8880-30dd377c49a4" path="/var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes" Dec 11 08:16:19 crc kubenswrapper[4881]: I1211 08:16:19.048813 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" path="/var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/volumes" Dec 11 08:16:19 crc kubenswrapper[4881]: I1211 08:16:19.049911 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a31745f5-9847-4afe-82a5-3161cc66ca93" path="/var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes" Dec 11 08:16:19 crc kubenswrapper[4881]: I1211 08:16:19.051311 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" path="/var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes" Dec 11 08:16:19 crc kubenswrapper[4881]: I1211 08:16:19.051983 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6312bbd-5731-4ea0-a20f-81d5a57df44a" path="/var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/volumes" Dec 11 08:16:19 crc kubenswrapper[4881]: I1211 08:16:19.052857 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" path="/var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes" Dec 11 08:16:19 crc kubenswrapper[4881]: I1211 08:16:19.053440 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" path="/var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes" Dec 11 08:16:19 crc kubenswrapper[4881]: I1211 08:16:19.054352 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bd23aa5c-e532-4e53-bccf-e79f130c5ae8" path="/var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/volumes" Dec 11 08:16:19 crc kubenswrapper[4881]: I1211 08:16:19.055127 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bf126b07-da06-4140-9a57-dfd54fc6b486" path="/var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes" Dec 11 08:16:19 crc kubenswrapper[4881]: I1211 08:16:19.056036 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c03ee662-fb2f-4fc4-a2c1-af487c19d254" path="/var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes" Dec 11 08:16:19 crc kubenswrapper[4881]: I1211 08:16:19.056601 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" path="/var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/volumes" Dec 11 08:16:19 crc kubenswrapper[4881]: I1211 08:16:19.057127 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e7e6199b-1264-4501-8953-767f51328d08" path="/var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes" Dec 11 08:16:19 crc kubenswrapper[4881]: I1211 08:16:19.058030 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="efdd0498-1daa-4136-9a4a-3b948c2293fc" path="/var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/volumes" Dec 11 08:16:19 crc kubenswrapper[4881]: I1211 08:16:19.058626 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" path="/var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/volumes" Dec 11 08:16:19 crc kubenswrapper[4881]: I1211 08:16:19.059535 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fda69060-fa79-4696-b1a6-7980f124bf7c" path="/var/lib/kubelet/pods/fda69060-fa79-4696-b1a6-7980f124bf7c/volumes" Dec 11 08:16:20 crc kubenswrapper[4881]: I1211 08:16:20.005099 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 11 08:16:20 crc kubenswrapper[4881]: E1211 08:16:20.005240 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 11 08:16:20 crc kubenswrapper[4881]: I1211 08:16:20.157930 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" event={"ID":"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49","Type":"ContainerStarted","Data":"35b9fdb7a609d6a955abda7e49e7bceec55811d33e14295ba7258c0e085cc517"} Dec 11 08:16:20 crc kubenswrapper[4881]: I1211 08:16:20.170306 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:20Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:20 crc kubenswrapper[4881]: I1211 08:16:20.187671 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6ec4e8a28a722463111eeb961e6e80d1f8ef1b776c40fc212a2ca5aaf0c43b63\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6eff6a04afb607c6248352793d1205ae80c194a77222a234d586925a8ca8cd00\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:20Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:20 crc kubenswrapper[4881]: I1211 08:16:20.208035 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:20Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:20 crc kubenswrapper[4881]: I1211 08:16:20.224265 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:20Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:20Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://35b9fdb7a609d6a955abda7e49e7bceec55811d33e14295ba7258c0e085cc517\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:20Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:20 crc kubenswrapper[4881]: I1211 08:16:20.252612 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e233ddb-7c1f-4b3d-a111-de8cdc7ccd18\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://221f8c696ca3c5ab5dc2025890b5279fd406a8dfdd4ea482bb12f0c5d304255d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3dec421d26a9373c52bf2fb9434cc3f3b373f409b1d86ec1effca7534acb7262\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7cb4176d384609eb549c9f00057256bd34053ca3eca229c20cd250b93f87f910\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://91541795f1dd997a0d977904a6a3cbeae8b66b8bb57cde8dac344d3703352e1a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:15:53Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:20Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:20 crc kubenswrapper[4881]: I1211 08:16:20.292030 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"712c62b1-8ccd-4aa3-bcf9-678e361454ec\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f4bc17ebdc46bdf12b24515e7a055043646bcea98fe47ea7f5c44e679d02bde9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://daf982782f40db65e6a33e1e1bcfd01493d5a385c52013546d38d921ac4dcad3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3ceeda38e245a549205af6a7f2468c682ca1420574f2d3b375dfba7d93a3c669\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://204b876ced5657df64e1a6074ba7ddbb3e286df0e12630d709ff64d2905b8487\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ff7ad52ec7f42bde82d0f5b9a6ef051a56c37004bb4172dbc504afde1e5f69bf\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-11T08:16:03Z\\\",\\\"message\\\":\\\"W1211 08:16:02.420586 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1211 08:16:02.420959 1 crypto.go:601] Generating new CA for check-endpoints-signer@1765440962 cert, and key in /tmp/serving-cert-2611071911/serving-signer.crt, /tmp/serving-cert-2611071911/serving-signer.key\\\\nI1211 08:16:03.011861 1 observer_polling.go:159] Starting file observer\\\\nW1211 08:16:03.024183 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1211 08:16:03.024367 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1211 08:16:03.024984 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2611071911/tls.crt::/tmp/serving-cert-2611071911/tls.key\\\\\\\"\\\\nF1211 08:16:03.405647 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:00Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6735b5cd08055deeae614855e5de8a056b276c8006a16c45f2a0fce97da172cb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:57Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70fb9b6123864b3652eabbb833b18032659a0c038f4e5857e7363a3776022396\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://70fb9b6123864b3652eabbb833b18032659a0c038f4e5857e7363a3776022396\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:15:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:15:53Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:20Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:20 crc kubenswrapper[4881]: I1211 08:16:20.336857 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://02d46e407feeba96230ff44b31f513e09434cdc3378124322398587ebda11b4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:20Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:20 crc kubenswrapper[4881]: I1211 08:16:20.357383 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:20Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:20 crc kubenswrapper[4881]: I1211 08:16:20.559603 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 11 08:16:20 crc kubenswrapper[4881]: E1211 08:16:20.559817 4881 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-11 08:16:24.55976493 +0000 UTC m=+32.937133627 (durationBeforeRetry 4s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 08:16:20 crc kubenswrapper[4881]: I1211 08:16:20.660181 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 11 08:16:20 crc kubenswrapper[4881]: I1211 08:16:20.660226 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 11 08:16:20 crc kubenswrapper[4881]: I1211 08:16:20.660252 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 11 08:16:20 crc kubenswrapper[4881]: I1211 08:16:20.660272 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 11 08:16:20 crc kubenswrapper[4881]: E1211 08:16:20.660431 4881 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Dec 11 08:16:20 crc kubenswrapper[4881]: E1211 08:16:20.660481 4881 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Dec 11 08:16:20 crc kubenswrapper[4881]: E1211 08:16:20.660503 4881 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Dec 11 08:16:20 crc kubenswrapper[4881]: E1211 08:16:20.660501 4881 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Dec 11 08:16:20 crc kubenswrapper[4881]: E1211 08:16:20.660532 4881 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-12-11 08:16:24.660509089 +0000 UTC m=+33.037877786 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Dec 11 08:16:20 crc kubenswrapper[4881]: E1211 08:16:20.660546 4881 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Dec 11 08:16:20 crc kubenswrapper[4881]: E1211 08:16:20.660561 4881 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 11 08:16:20 crc kubenswrapper[4881]: E1211 08:16:20.660574 4881 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Dec 11 08:16:20 crc kubenswrapper[4881]: E1211 08:16:20.660629 4881 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-12-11 08:16:24.660607592 +0000 UTC m=+33.037976359 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 11 08:16:20 crc kubenswrapper[4881]: E1211 08:16:20.660518 4881 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 11 08:16:20 crc kubenswrapper[4881]: E1211 08:16:20.660683 4881 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-12-11 08:16:24.660641853 +0000 UTC m=+33.038010600 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Dec 11 08:16:20 crc kubenswrapper[4881]: E1211 08:16:20.660713 4881 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-12-11 08:16:24.660700334 +0000 UTC m=+33.038069031 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 11 08:16:20 crc kubenswrapper[4881]: I1211 08:16:20.735943 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns/node-resolver-847k7"] Dec 11 08:16:20 crc kubenswrapper[4881]: I1211 08:16:20.736397 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/node-resolver-847k7" Dec 11 08:16:20 crc kubenswrapper[4881]: I1211 08:16:20.738358 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"kube-root-ca.crt" Dec 11 08:16:20 crc kubenswrapper[4881]: I1211 08:16:20.739062 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"openshift-service-ca.crt" Dec 11 08:16:20 crc kubenswrapper[4881]: I1211 08:16:20.739220 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"node-resolver-dockercfg-kz9s7" Dec 11 08:16:20 crc kubenswrapper[4881]: I1211 08:16:20.756485 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:20Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:20 crc kubenswrapper[4881]: I1211 08:16:20.771565 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:20Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:20 crc kubenswrapper[4881]: I1211 08:16:20.783418 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6ec4e8a28a722463111eeb961e6e80d1f8ef1b776c40fc212a2ca5aaf0c43b63\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6eff6a04afb607c6248352793d1205ae80c194a77222a234d586925a8ca8cd00\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:20Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:20 crc kubenswrapper[4881]: I1211 08:16:20.796595 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:20Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:20 crc kubenswrapper[4881]: I1211 08:16:20.807986 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:20Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:20Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://35b9fdb7a609d6a955abda7e49e7bceec55811d33e14295ba7258c0e085cc517\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:20Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:20 crc kubenswrapper[4881]: I1211 08:16:20.819022 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-847k7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1711214f-683a-4a2e-b9b8-f3fd2dd6dbc2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:20Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:20Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2txkz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:20Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-847k7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:20Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:20 crc kubenswrapper[4881]: I1211 08:16:20.862022 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/1711214f-683a-4a2e-b9b8-f3fd2dd6dbc2-hosts-file\") pod \"node-resolver-847k7\" (UID: \"1711214f-683a-4a2e-b9b8-f3fd2dd6dbc2\") " pod="openshift-dns/node-resolver-847k7" Dec 11 08:16:20 crc kubenswrapper[4881]: I1211 08:16:20.862077 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2txkz\" (UniqueName: \"kubernetes.io/projected/1711214f-683a-4a2e-b9b8-f3fd2dd6dbc2-kube-api-access-2txkz\") pod \"node-resolver-847k7\" (UID: \"1711214f-683a-4a2e-b9b8-f3fd2dd6dbc2\") " pod="openshift-dns/node-resolver-847k7" Dec 11 08:16:20 crc kubenswrapper[4881]: I1211 08:16:20.865089 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e233ddb-7c1f-4b3d-a111-de8cdc7ccd18\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://221f8c696ca3c5ab5dc2025890b5279fd406a8dfdd4ea482bb12f0c5d304255d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3dec421d26a9373c52bf2fb9434cc3f3b373f409b1d86ec1effca7534acb7262\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7cb4176d384609eb549c9f00057256bd34053ca3eca229c20cd250b93f87f910\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://91541795f1dd997a0d977904a6a3cbeae8b66b8bb57cde8dac344d3703352e1a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:15:53Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:20Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:20 crc kubenswrapper[4881]: I1211 08:16:20.901160 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"712c62b1-8ccd-4aa3-bcf9-678e361454ec\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f4bc17ebdc46bdf12b24515e7a055043646bcea98fe47ea7f5c44e679d02bde9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://daf982782f40db65e6a33e1e1bcfd01493d5a385c52013546d38d921ac4dcad3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3ceeda38e245a549205af6a7f2468c682ca1420574f2d3b375dfba7d93a3c669\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://204b876ced5657df64e1a6074ba7ddbb3e286df0e12630d709ff64d2905b8487\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ff7ad52ec7f42bde82d0f5b9a6ef051a56c37004bb4172dbc504afde1e5f69bf\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-11T08:16:03Z\\\",\\\"message\\\":\\\"W1211 08:16:02.420586 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1211 08:16:02.420959 1 crypto.go:601] Generating new CA for check-endpoints-signer@1765440962 cert, and key in /tmp/serving-cert-2611071911/serving-signer.crt, /tmp/serving-cert-2611071911/serving-signer.key\\\\nI1211 08:16:03.011861 1 observer_polling.go:159] Starting file observer\\\\nW1211 08:16:03.024183 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1211 08:16:03.024367 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1211 08:16:03.024984 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2611071911/tls.crt::/tmp/serving-cert-2611071911/tls.key\\\\\\\"\\\\nF1211 08:16:03.405647 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:00Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6735b5cd08055deeae614855e5de8a056b276c8006a16c45f2a0fce97da172cb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:57Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70fb9b6123864b3652eabbb833b18032659a0c038f4e5857e7363a3776022396\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://70fb9b6123864b3652eabbb833b18032659a0c038f4e5857e7363a3776022396\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:15:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:15:53Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:20Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:20 crc kubenswrapper[4881]: I1211 08:16:20.917753 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://02d46e407feeba96230ff44b31f513e09434cdc3378124322398587ebda11b4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:20Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:20 crc kubenswrapper[4881]: I1211 08:16:20.963083 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/1711214f-683a-4a2e-b9b8-f3fd2dd6dbc2-hosts-file\") pod \"node-resolver-847k7\" (UID: \"1711214f-683a-4a2e-b9b8-f3fd2dd6dbc2\") " pod="openshift-dns/node-resolver-847k7" Dec 11 08:16:20 crc kubenswrapper[4881]: I1211 08:16:20.963375 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2txkz\" (UniqueName: \"kubernetes.io/projected/1711214f-683a-4a2e-b9b8-f3fd2dd6dbc2-kube-api-access-2txkz\") pod \"node-resolver-847k7\" (UID: \"1711214f-683a-4a2e-b9b8-f3fd2dd6dbc2\") " pod="openshift-dns/node-resolver-847k7" Dec 11 08:16:20 crc kubenswrapper[4881]: I1211 08:16:20.963277 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/1711214f-683a-4a2e-b9b8-f3fd2dd6dbc2-hosts-file\") pod \"node-resolver-847k7\" (UID: \"1711214f-683a-4a2e-b9b8-f3fd2dd6dbc2\") " pod="openshift-dns/node-resolver-847k7" Dec 11 08:16:20 crc kubenswrapper[4881]: I1211 08:16:20.985523 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2txkz\" (UniqueName: \"kubernetes.io/projected/1711214f-683a-4a2e-b9b8-f3fd2dd6dbc2-kube-api-access-2txkz\") pod \"node-resolver-847k7\" (UID: \"1711214f-683a-4a2e-b9b8-f3fd2dd6dbc2\") " pod="openshift-dns/node-resolver-847k7" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.005445 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 11 08:16:21 crc kubenswrapper[4881]: E1211 08:16:21.005585 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.005455 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 11 08:16:21 crc kubenswrapper[4881]: E1211 08:16:21.006001 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.048719 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/node-resolver-847k7" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.163369 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/node-resolver-847k7" event={"ID":"1711214f-683a-4a2e-b9b8-f3fd2dd6dbc2","Type":"ContainerStarted","Data":"012ec68933b8cf4025caa2655a768f2090fe8bce5db4ea70d42cfddf0ad31b2d"} Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.163738 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-additional-cni-plugins-cwhxk"] Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.164192 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-g8jhd"] Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.164352 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-daemon-z9nnh"] Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.164407 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-additional-cni-plugins-cwhxk" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.164567 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-g8jhd" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.164681 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.167240 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"default-cni-sysctl-allowlist" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.167414 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"openshift-service-ca.crt" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.167766 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-daemon-dockercfg-r5tcq" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.168151 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"default-dockercfg-2q5b6" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.168196 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"openshift-service-ca.crt" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.168269 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"multus-daemon-config" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.168670 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-root-ca.crt" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.169463 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ancillary-tools-dockercfg-vnmsz" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.169716 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-rbac-proxy" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.169904 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"cni-copy-resources" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.170743 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"proxy-tls" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.172091 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"kube-root-ca.crt" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.186633 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"712c62b1-8ccd-4aa3-bcf9-678e361454ec\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f4bc17ebdc46bdf12b24515e7a055043646bcea98fe47ea7f5c44e679d02bde9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://daf982782f40db65e6a33e1e1bcfd01493d5a385c52013546d38d921ac4dcad3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3ceeda38e245a549205af6a7f2468c682ca1420574f2d3b375dfba7d93a3c669\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://204b876ced5657df64e1a6074ba7ddbb3e286df0e12630d709ff64d2905b8487\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ff7ad52ec7f42bde82d0f5b9a6ef051a56c37004bb4172dbc504afde1e5f69bf\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-11T08:16:03Z\\\",\\\"message\\\":\\\"W1211 08:16:02.420586 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1211 08:16:02.420959 1 crypto.go:601] Generating new CA for check-endpoints-signer@1765440962 cert, and key in /tmp/serving-cert-2611071911/serving-signer.crt, /tmp/serving-cert-2611071911/serving-signer.key\\\\nI1211 08:16:03.011861 1 observer_polling.go:159] Starting file observer\\\\nW1211 08:16:03.024183 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1211 08:16:03.024367 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1211 08:16:03.024984 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2611071911/tls.crt::/tmp/serving-cert-2611071911/tls.key\\\\\\\"\\\\nF1211 08:16:03.405647 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:00Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6735b5cd08055deeae614855e5de8a056b276c8006a16c45f2a0fce97da172cb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:57Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70fb9b6123864b3652eabbb833b18032659a0c038f4e5857e7363a3776022396\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://70fb9b6123864b3652eabbb833b18032659a0c038f4e5857e7363a3776022396\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:15:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:15:53Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:21Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.207467 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://02d46e407feeba96230ff44b31f513e09434cdc3378124322398587ebda11b4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:21Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.221116 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:21Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.238174 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:20Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:20Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://35b9fdb7a609d6a955abda7e49e7bceec55811d33e14295ba7258c0e085cc517\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:21Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.256511 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:21Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.267368 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/368e635e-0e63-4202-b9e4-4a3a85c6f30c-host-run-k8s-cni-cncf-io\") pod \"multus-g8jhd\" (UID: \"368e635e-0e63-4202-b9e4-4a3a85c6f30c\") " pod="openshift-multus/multus-g8jhd" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.267428 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/afd0cc21-e31c-47c9-a598-cd93dde96121-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-cwhxk\" (UID: \"afd0cc21-e31c-47c9-a598-cd93dde96121\") " pod="openshift-multus/multus-additional-cni-plugins-cwhxk" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.267447 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jkbr8\" (UniqueName: \"kubernetes.io/projected/afd0cc21-e31c-47c9-a598-cd93dde96121-kube-api-access-jkbr8\") pod \"multus-additional-cni-plugins-cwhxk\" (UID: \"afd0cc21-e31c-47c9-a598-cd93dde96121\") " pod="openshift-multus/multus-additional-cni-plugins-cwhxk" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.267483 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/368e635e-0e63-4202-b9e4-4a3a85c6f30c-etc-kubernetes\") pod \"multus-g8jhd\" (UID: \"368e635e-0e63-4202-b9e4-4a3a85c6f30c\") " pod="openshift-multus/multus-g8jhd" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.267499 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zrsnr\" (UniqueName: \"kubernetes.io/projected/368e635e-0e63-4202-b9e4-4a3a85c6f30c-kube-api-access-zrsnr\") pod \"multus-g8jhd\" (UID: \"368e635e-0e63-4202-b9e4-4a3a85c6f30c\") " pod="openshift-multus/multus-g8jhd" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.267515 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/56d69133-af36-4cbd-af7d-3a58cc4dd8ca-mcd-auth-proxy-config\") pod \"machine-config-daemon-z9nnh\" (UID: \"56d69133-af36-4cbd-af7d-3a58cc4dd8ca\") " pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.267530 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/368e635e-0e63-4202-b9e4-4a3a85c6f30c-host-var-lib-cni-bin\") pod \"multus-g8jhd\" (UID: \"368e635e-0e63-4202-b9e4-4a3a85c6f30c\") " pod="openshift-multus/multus-g8jhd" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.267544 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/368e635e-0e63-4202-b9e4-4a3a85c6f30c-multus-daemon-config\") pod \"multus-g8jhd\" (UID: \"368e635e-0e63-4202-b9e4-4a3a85c6f30c\") " pod="openshift-multus/multus-g8jhd" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.267561 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/56d69133-af36-4cbd-af7d-3a58cc4dd8ca-rootfs\") pod \"machine-config-daemon-z9nnh\" (UID: \"56d69133-af36-4cbd-af7d-3a58cc4dd8ca\") " pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.267576 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/368e635e-0e63-4202-b9e4-4a3a85c6f30c-multus-socket-dir-parent\") pod \"multus-g8jhd\" (UID: \"368e635e-0e63-4202-b9e4-4a3a85c6f30c\") " pod="openshift-multus/multus-g8jhd" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.267797 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/368e635e-0e63-4202-b9e4-4a3a85c6f30c-cni-binary-copy\") pod \"multus-g8jhd\" (UID: \"368e635e-0e63-4202-b9e4-4a3a85c6f30c\") " pod="openshift-multus/multus-g8jhd" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.267897 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/368e635e-0e63-4202-b9e4-4a3a85c6f30c-host-run-netns\") pod \"multus-g8jhd\" (UID: \"368e635e-0e63-4202-b9e4-4a3a85c6f30c\") " pod="openshift-multus/multus-g8jhd" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.267944 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/368e635e-0e63-4202-b9e4-4a3a85c6f30c-cnibin\") pod \"multus-g8jhd\" (UID: \"368e635e-0e63-4202-b9e4-4a3a85c6f30c\") " pod="openshift-multus/multus-g8jhd" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.267995 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/56d69133-af36-4cbd-af7d-3a58cc4dd8ca-proxy-tls\") pod \"machine-config-daemon-z9nnh\" (UID: \"56d69133-af36-4cbd-af7d-3a58cc4dd8ca\") " pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.268026 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/afd0cc21-e31c-47c9-a598-cd93dde96121-tuning-conf-dir\") pod \"multus-additional-cni-plugins-cwhxk\" (UID: \"afd0cc21-e31c-47c9-a598-cd93dde96121\") " pod="openshift-multus/multus-additional-cni-plugins-cwhxk" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.268061 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/afd0cc21-e31c-47c9-a598-cd93dde96121-os-release\") pod \"multus-additional-cni-plugins-cwhxk\" (UID: \"afd0cc21-e31c-47c9-a598-cd93dde96121\") " pod="openshift-multus/multus-additional-cni-plugins-cwhxk" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.268126 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/368e635e-0e63-4202-b9e4-4a3a85c6f30c-system-cni-dir\") pod \"multus-g8jhd\" (UID: \"368e635e-0e63-4202-b9e4-4a3a85c6f30c\") " pod="openshift-multus/multus-g8jhd" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.268159 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/afd0cc21-e31c-47c9-a598-cd93dde96121-system-cni-dir\") pod \"multus-additional-cni-plugins-cwhxk\" (UID: \"afd0cc21-e31c-47c9-a598-cd93dde96121\") " pod="openshift-multus/multus-additional-cni-plugins-cwhxk" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.268179 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/afd0cc21-e31c-47c9-a598-cd93dde96121-cnibin\") pod \"multus-additional-cni-plugins-cwhxk\" (UID: \"afd0cc21-e31c-47c9-a598-cd93dde96121\") " pod="openshift-multus/multus-additional-cni-plugins-cwhxk" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.268248 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/368e635e-0e63-4202-b9e4-4a3a85c6f30c-multus-cni-dir\") pod \"multus-g8jhd\" (UID: \"368e635e-0e63-4202-b9e4-4a3a85c6f30c\") " pod="openshift-multus/multus-g8jhd" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.268268 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/368e635e-0e63-4202-b9e4-4a3a85c6f30c-host-run-multus-certs\") pod \"multus-g8jhd\" (UID: \"368e635e-0e63-4202-b9e4-4a3a85c6f30c\") " pod="openshift-multus/multus-g8jhd" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.268295 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/368e635e-0e63-4202-b9e4-4a3a85c6f30c-host-var-lib-kubelet\") pod \"multus-g8jhd\" (UID: \"368e635e-0e63-4202-b9e4-4a3a85c6f30c\") " pod="openshift-multus/multus-g8jhd" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.268310 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/368e635e-0e63-4202-b9e4-4a3a85c6f30c-multus-conf-dir\") pod \"multus-g8jhd\" (UID: \"368e635e-0e63-4202-b9e4-4a3a85c6f30c\") " pod="openshift-multus/multus-g8jhd" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.268361 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/afd0cc21-e31c-47c9-a598-cd93dde96121-cni-binary-copy\") pod \"multus-additional-cni-plugins-cwhxk\" (UID: \"afd0cc21-e31c-47c9-a598-cd93dde96121\") " pod="openshift-multus/multus-additional-cni-plugins-cwhxk" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.268396 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/368e635e-0e63-4202-b9e4-4a3a85c6f30c-hostroot\") pod \"multus-g8jhd\" (UID: \"368e635e-0e63-4202-b9e4-4a3a85c6f30c\") " pod="openshift-multus/multus-g8jhd" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.268411 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7rwh4\" (UniqueName: \"kubernetes.io/projected/56d69133-af36-4cbd-af7d-3a58cc4dd8ca-kube-api-access-7rwh4\") pod \"machine-config-daemon-z9nnh\" (UID: \"56d69133-af36-4cbd-af7d-3a58cc4dd8ca\") " pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.268425 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/368e635e-0e63-4202-b9e4-4a3a85c6f30c-os-release\") pod \"multus-g8jhd\" (UID: \"368e635e-0e63-4202-b9e4-4a3a85c6f30c\") " pod="openshift-multus/multus-g8jhd" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.268449 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/368e635e-0e63-4202-b9e4-4a3a85c6f30c-host-var-lib-cni-multus\") pod \"multus-g8jhd\" (UID: \"368e635e-0e63-4202-b9e4-4a3a85c6f30c\") " pod="openshift-multus/multus-g8jhd" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.269434 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6ec4e8a28a722463111eeb961e6e80d1f8ef1b776c40fc212a2ca5aaf0c43b63\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6eff6a04afb607c6248352793d1205ae80c194a77222a234d586925a8ca8cd00\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:21Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.298406 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:21Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.318413 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-g8jhd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"368e635e-0e63-4202-b9e4-4a3a85c6f30c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zrsnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-g8jhd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:21Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.333398 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-cwhxk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"afd0cc21-e31c-47c9-a598-cd93dde96121\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-cwhxk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:21Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.357555 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e233ddb-7c1f-4b3d-a111-de8cdc7ccd18\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://221f8c696ca3c5ab5dc2025890b5279fd406a8dfdd4ea482bb12f0c5d304255d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3dec421d26a9373c52bf2fb9434cc3f3b373f409b1d86ec1effca7534acb7262\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7cb4176d384609eb549c9f00057256bd34053ca3eca229c20cd250b93f87f910\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://91541795f1dd997a0d977904a6a3cbeae8b66b8bb57cde8dac344d3703352e1a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:15:53Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:21Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.369545 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/368e635e-0e63-4202-b9e4-4a3a85c6f30c-multus-socket-dir-parent\") pod \"multus-g8jhd\" (UID: \"368e635e-0e63-4202-b9e4-4a3a85c6f30c\") " pod="openshift-multus/multus-g8jhd" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.369595 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/368e635e-0e63-4202-b9e4-4a3a85c6f30c-cni-binary-copy\") pod \"multus-g8jhd\" (UID: \"368e635e-0e63-4202-b9e4-4a3a85c6f30c\") " pod="openshift-multus/multus-g8jhd" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.369616 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/368e635e-0e63-4202-b9e4-4a3a85c6f30c-host-run-netns\") pod \"multus-g8jhd\" (UID: \"368e635e-0e63-4202-b9e4-4a3a85c6f30c\") " pod="openshift-multus/multus-g8jhd" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.369638 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/368e635e-0e63-4202-b9e4-4a3a85c6f30c-cnibin\") pod \"multus-g8jhd\" (UID: \"368e635e-0e63-4202-b9e4-4a3a85c6f30c\") " pod="openshift-multus/multus-g8jhd" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.369662 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/56d69133-af36-4cbd-af7d-3a58cc4dd8ca-proxy-tls\") pod \"machine-config-daemon-z9nnh\" (UID: \"56d69133-af36-4cbd-af7d-3a58cc4dd8ca\") " pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.369684 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/afd0cc21-e31c-47c9-a598-cd93dde96121-tuning-conf-dir\") pod \"multus-additional-cni-plugins-cwhxk\" (UID: \"afd0cc21-e31c-47c9-a598-cd93dde96121\") " pod="openshift-multus/multus-additional-cni-plugins-cwhxk" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.369704 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/afd0cc21-e31c-47c9-a598-cd93dde96121-os-release\") pod \"multus-additional-cni-plugins-cwhxk\" (UID: \"afd0cc21-e31c-47c9-a598-cd93dde96121\") " pod="openshift-multus/multus-additional-cni-plugins-cwhxk" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.369723 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/368e635e-0e63-4202-b9e4-4a3a85c6f30c-system-cni-dir\") pod \"multus-g8jhd\" (UID: \"368e635e-0e63-4202-b9e4-4a3a85c6f30c\") " pod="openshift-multus/multus-g8jhd" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.369720 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/368e635e-0e63-4202-b9e4-4a3a85c6f30c-host-run-netns\") pod \"multus-g8jhd\" (UID: \"368e635e-0e63-4202-b9e4-4a3a85c6f30c\") " pod="openshift-multus/multus-g8jhd" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.369743 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/afd0cc21-e31c-47c9-a598-cd93dde96121-system-cni-dir\") pod \"multus-additional-cni-plugins-cwhxk\" (UID: \"afd0cc21-e31c-47c9-a598-cd93dde96121\") " pod="openshift-multus/multus-additional-cni-plugins-cwhxk" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.369782 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/afd0cc21-e31c-47c9-a598-cd93dde96121-system-cni-dir\") pod \"multus-additional-cni-plugins-cwhxk\" (UID: \"afd0cc21-e31c-47c9-a598-cd93dde96121\") " pod="openshift-multus/multus-additional-cni-plugins-cwhxk" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.369823 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/368e635e-0e63-4202-b9e4-4a3a85c6f30c-multus-socket-dir-parent\") pod \"multus-g8jhd\" (UID: \"368e635e-0e63-4202-b9e4-4a3a85c6f30c\") " pod="openshift-multus/multus-g8jhd" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.369826 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/afd0cc21-e31c-47c9-a598-cd93dde96121-cnibin\") pod \"multus-additional-cni-plugins-cwhxk\" (UID: \"afd0cc21-e31c-47c9-a598-cd93dde96121\") " pod="openshift-multus/multus-additional-cni-plugins-cwhxk" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.369850 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/afd0cc21-e31c-47c9-a598-cd93dde96121-cnibin\") pod \"multus-additional-cni-plugins-cwhxk\" (UID: \"afd0cc21-e31c-47c9-a598-cd93dde96121\") " pod="openshift-multus/multus-additional-cni-plugins-cwhxk" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.369845 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/368e635e-0e63-4202-b9e4-4a3a85c6f30c-cnibin\") pod \"multus-g8jhd\" (UID: \"368e635e-0e63-4202-b9e4-4a3a85c6f30c\") " pod="openshift-multus/multus-g8jhd" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.369893 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/368e635e-0e63-4202-b9e4-4a3a85c6f30c-multus-cni-dir\") pod \"multus-g8jhd\" (UID: \"368e635e-0e63-4202-b9e4-4a3a85c6f30c\") " pod="openshift-multus/multus-g8jhd" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.369911 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/368e635e-0e63-4202-b9e4-4a3a85c6f30c-host-run-multus-certs\") pod \"multus-g8jhd\" (UID: \"368e635e-0e63-4202-b9e4-4a3a85c6f30c\") " pod="openshift-multus/multus-g8jhd" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.369934 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/368e635e-0e63-4202-b9e4-4a3a85c6f30c-host-var-lib-kubelet\") pod \"multus-g8jhd\" (UID: \"368e635e-0e63-4202-b9e4-4a3a85c6f30c\") " pod="openshift-multus/multus-g8jhd" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.369953 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/368e635e-0e63-4202-b9e4-4a3a85c6f30c-multus-conf-dir\") pod \"multus-g8jhd\" (UID: \"368e635e-0e63-4202-b9e4-4a3a85c6f30c\") " pod="openshift-multus/multus-g8jhd" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.369964 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/368e635e-0e63-4202-b9e4-4a3a85c6f30c-system-cni-dir\") pod \"multus-g8jhd\" (UID: \"368e635e-0e63-4202-b9e4-4a3a85c6f30c\") " pod="openshift-multus/multus-g8jhd" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.369969 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/afd0cc21-e31c-47c9-a598-cd93dde96121-cni-binary-copy\") pod \"multus-additional-cni-plugins-cwhxk\" (UID: \"afd0cc21-e31c-47c9-a598-cd93dde96121\") " pod="openshift-multus/multus-additional-cni-plugins-cwhxk" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.370016 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/368e635e-0e63-4202-b9e4-4a3a85c6f30c-hostroot\") pod \"multus-g8jhd\" (UID: \"368e635e-0e63-4202-b9e4-4a3a85c6f30c\") " pod="openshift-multus/multus-g8jhd" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.370038 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7rwh4\" (UniqueName: \"kubernetes.io/projected/56d69133-af36-4cbd-af7d-3a58cc4dd8ca-kube-api-access-7rwh4\") pod \"machine-config-daemon-z9nnh\" (UID: \"56d69133-af36-4cbd-af7d-3a58cc4dd8ca\") " pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.370062 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/afd0cc21-e31c-47c9-a598-cd93dde96121-os-release\") pod \"multus-additional-cni-plugins-cwhxk\" (UID: \"afd0cc21-e31c-47c9-a598-cd93dde96121\") " pod="openshift-multus/multus-additional-cni-plugins-cwhxk" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.370061 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/368e635e-0e63-4202-b9e4-4a3a85c6f30c-os-release\") pod \"multus-g8jhd\" (UID: \"368e635e-0e63-4202-b9e4-4a3a85c6f30c\") " pod="openshift-multus/multus-g8jhd" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.370099 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/368e635e-0e63-4202-b9e4-4a3a85c6f30c-host-var-lib-cni-multus\") pod \"multus-g8jhd\" (UID: \"368e635e-0e63-4202-b9e4-4a3a85c6f30c\") " pod="openshift-multus/multus-g8jhd" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.370106 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/368e635e-0e63-4202-b9e4-4a3a85c6f30c-os-release\") pod \"multus-g8jhd\" (UID: \"368e635e-0e63-4202-b9e4-4a3a85c6f30c\") " pod="openshift-multus/multus-g8jhd" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.370119 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/368e635e-0e63-4202-b9e4-4a3a85c6f30c-host-run-k8s-cni-cncf-io\") pod \"multus-g8jhd\" (UID: \"368e635e-0e63-4202-b9e4-4a3a85c6f30c\") " pod="openshift-multus/multus-g8jhd" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.370132 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/368e635e-0e63-4202-b9e4-4a3a85c6f30c-hostroot\") pod \"multus-g8jhd\" (UID: \"368e635e-0e63-4202-b9e4-4a3a85c6f30c\") " pod="openshift-multus/multus-g8jhd" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.370136 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/afd0cc21-e31c-47c9-a598-cd93dde96121-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-cwhxk\" (UID: \"afd0cc21-e31c-47c9-a598-cd93dde96121\") " pod="openshift-multus/multus-additional-cni-plugins-cwhxk" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.370164 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jkbr8\" (UniqueName: \"kubernetes.io/projected/afd0cc21-e31c-47c9-a598-cd93dde96121-kube-api-access-jkbr8\") pod \"multus-additional-cni-plugins-cwhxk\" (UID: \"afd0cc21-e31c-47c9-a598-cd93dde96121\") " pod="openshift-multus/multus-additional-cni-plugins-cwhxk" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.370227 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/368e635e-0e63-4202-b9e4-4a3a85c6f30c-etc-kubernetes\") pod \"multus-g8jhd\" (UID: \"368e635e-0e63-4202-b9e4-4a3a85c6f30c\") " pod="openshift-multus/multus-g8jhd" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.370249 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zrsnr\" (UniqueName: \"kubernetes.io/projected/368e635e-0e63-4202-b9e4-4a3a85c6f30c-kube-api-access-zrsnr\") pod \"multus-g8jhd\" (UID: \"368e635e-0e63-4202-b9e4-4a3a85c6f30c\") " pod="openshift-multus/multus-g8jhd" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.370267 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/56d69133-af36-4cbd-af7d-3a58cc4dd8ca-mcd-auth-proxy-config\") pod \"machine-config-daemon-z9nnh\" (UID: \"56d69133-af36-4cbd-af7d-3a58cc4dd8ca\") " pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.370287 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/368e635e-0e63-4202-b9e4-4a3a85c6f30c-host-var-lib-cni-bin\") pod \"multus-g8jhd\" (UID: \"368e635e-0e63-4202-b9e4-4a3a85c6f30c\") " pod="openshift-multus/multus-g8jhd" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.370304 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/368e635e-0e63-4202-b9e4-4a3a85c6f30c-multus-daemon-config\") pod \"multus-g8jhd\" (UID: \"368e635e-0e63-4202-b9e4-4a3a85c6f30c\") " pod="openshift-multus/multus-g8jhd" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.370322 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/56d69133-af36-4cbd-af7d-3a58cc4dd8ca-rootfs\") pod \"machine-config-daemon-z9nnh\" (UID: \"56d69133-af36-4cbd-af7d-3a58cc4dd8ca\") " pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.370321 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/368e635e-0e63-4202-b9e4-4a3a85c6f30c-host-var-lib-cni-multus\") pod \"multus-g8jhd\" (UID: \"368e635e-0e63-4202-b9e4-4a3a85c6f30c\") " pod="openshift-multus/multus-g8jhd" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.370381 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/368e635e-0e63-4202-b9e4-4a3a85c6f30c-host-run-multus-certs\") pod \"multus-g8jhd\" (UID: \"368e635e-0e63-4202-b9e4-4a3a85c6f30c\") " pod="openshift-multus/multus-g8jhd" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.370467 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/368e635e-0e63-4202-b9e4-4a3a85c6f30c-cni-binary-copy\") pod \"multus-g8jhd\" (UID: \"368e635e-0e63-4202-b9e4-4a3a85c6f30c\") " pod="openshift-multus/multus-g8jhd" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.370523 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/368e635e-0e63-4202-b9e4-4a3a85c6f30c-host-var-lib-kubelet\") pod \"multus-g8jhd\" (UID: \"368e635e-0e63-4202-b9e4-4a3a85c6f30c\") " pod="openshift-multus/multus-g8jhd" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.370552 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/368e635e-0e63-4202-b9e4-4a3a85c6f30c-multus-cni-dir\") pod \"multus-g8jhd\" (UID: \"368e635e-0e63-4202-b9e4-4a3a85c6f30c\") " pod="openshift-multus/multus-g8jhd" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.370622 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/afd0cc21-e31c-47c9-a598-cd93dde96121-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-cwhxk\" (UID: \"afd0cc21-e31c-47c9-a598-cd93dde96121\") " pod="openshift-multus/multus-additional-cni-plugins-cwhxk" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.370645 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/afd0cc21-e31c-47c9-a598-cd93dde96121-cni-binary-copy\") pod \"multus-additional-cni-plugins-cwhxk\" (UID: \"afd0cc21-e31c-47c9-a598-cd93dde96121\") " pod="openshift-multus/multus-additional-cni-plugins-cwhxk" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.370662 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/368e635e-0e63-4202-b9e4-4a3a85c6f30c-host-run-k8s-cni-cncf-io\") pod \"multus-g8jhd\" (UID: \"368e635e-0e63-4202-b9e4-4a3a85c6f30c\") " pod="openshift-multus/multus-g8jhd" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.370691 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/368e635e-0e63-4202-b9e4-4a3a85c6f30c-multus-conf-dir\") pod \"multus-g8jhd\" (UID: \"368e635e-0e63-4202-b9e4-4a3a85c6f30c\") " pod="openshift-multus/multus-g8jhd" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.370698 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/368e635e-0e63-4202-b9e4-4a3a85c6f30c-etc-kubernetes\") pod \"multus-g8jhd\" (UID: \"368e635e-0e63-4202-b9e4-4a3a85c6f30c\") " pod="openshift-multus/multus-g8jhd" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.370743 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/56d69133-af36-4cbd-af7d-3a58cc4dd8ca-rootfs\") pod \"machine-config-daemon-z9nnh\" (UID: \"56d69133-af36-4cbd-af7d-3a58cc4dd8ca\") " pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.370778 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/368e635e-0e63-4202-b9e4-4a3a85c6f30c-host-var-lib-cni-bin\") pod \"multus-g8jhd\" (UID: \"368e635e-0e63-4202-b9e4-4a3a85c6f30c\") " pod="openshift-multus/multus-g8jhd" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.371114 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/56d69133-af36-4cbd-af7d-3a58cc4dd8ca-mcd-auth-proxy-config\") pod \"machine-config-daemon-z9nnh\" (UID: \"56d69133-af36-4cbd-af7d-3a58cc4dd8ca\") " pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.371358 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/368e635e-0e63-4202-b9e4-4a3a85c6f30c-multus-daemon-config\") pod \"multus-g8jhd\" (UID: \"368e635e-0e63-4202-b9e4-4a3a85c6f30c\") " pod="openshift-multus/multus-g8jhd" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.372036 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/afd0cc21-e31c-47c9-a598-cd93dde96121-tuning-conf-dir\") pod \"multus-additional-cni-plugins-cwhxk\" (UID: \"afd0cc21-e31c-47c9-a598-cd93dde96121\") " pod="openshift-multus/multus-additional-cni-plugins-cwhxk" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.379289 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/56d69133-af36-4cbd-af7d-3a58cc4dd8ca-proxy-tls\") pod \"machine-config-daemon-z9nnh\" (UID: \"56d69133-af36-4cbd-af7d-3a58cc4dd8ca\") " pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.386276 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-847k7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1711214f-683a-4a2e-b9b8-f3fd2dd6dbc2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:20Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:20Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2txkz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:20Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-847k7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:21Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.400978 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zrsnr\" (UniqueName: \"kubernetes.io/projected/368e635e-0e63-4202-b9e4-4a3a85c6f30c-kube-api-access-zrsnr\") pod \"multus-g8jhd\" (UID: \"368e635e-0e63-4202-b9e4-4a3a85c6f30c\") " pod="openshift-multus/multus-g8jhd" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.401111 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7rwh4\" (UniqueName: \"kubernetes.io/projected/56d69133-af36-4cbd-af7d-3a58cc4dd8ca-kube-api-access-7rwh4\") pod \"machine-config-daemon-z9nnh\" (UID: \"56d69133-af36-4cbd-af7d-3a58cc4dd8ca\") " pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.404869 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jkbr8\" (UniqueName: \"kubernetes.io/projected/afd0cc21-e31c-47c9-a598-cd93dde96121-kube-api-access-jkbr8\") pod \"multus-additional-cni-plugins-cwhxk\" (UID: \"afd0cc21-e31c-47c9-a598-cd93dde96121\") " pod="openshift-multus/multus-additional-cni-plugins-cwhxk" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.419974 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-847k7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1711214f-683a-4a2e-b9b8-f3fd2dd6dbc2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:20Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:20Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2txkz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:20Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-847k7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:21Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.439489 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e233ddb-7c1f-4b3d-a111-de8cdc7ccd18\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://221f8c696ca3c5ab5dc2025890b5279fd406a8dfdd4ea482bb12f0c5d304255d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3dec421d26a9373c52bf2fb9434cc3f3b373f409b1d86ec1effca7534acb7262\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7cb4176d384609eb549c9f00057256bd34053ca3eca229c20cd250b93f87f910\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://91541795f1dd997a0d977904a6a3cbeae8b66b8bb57cde8dac344d3703352e1a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:15:53Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:21Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.455751 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:21Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.474445 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:20Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:20Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://35b9fdb7a609d6a955abda7e49e7bceec55811d33e14295ba7258c0e085cc517\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:21Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.477642 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-additional-cni-plugins-cwhxk" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.486386 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-g8jhd" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.493927 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.494074 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"712c62b1-8ccd-4aa3-bcf9-678e361454ec\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f4bc17ebdc46bdf12b24515e7a055043646bcea98fe47ea7f5c44e679d02bde9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://daf982782f40db65e6a33e1e1bcfd01493d5a385c52013546d38d921ac4dcad3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3ceeda38e245a549205af6a7f2468c682ca1420574f2d3b375dfba7d93a3c669\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://204b876ced5657df64e1a6074ba7ddbb3e286df0e12630d709ff64d2905b8487\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ff7ad52ec7f42bde82d0f5b9a6ef051a56c37004bb4172dbc504afde1e5f69bf\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-11T08:16:03Z\\\",\\\"message\\\":\\\"W1211 08:16:02.420586 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1211 08:16:02.420959 1 crypto.go:601] Generating new CA for check-endpoints-signer@1765440962 cert, and key in /tmp/serving-cert-2611071911/serving-signer.crt, /tmp/serving-cert-2611071911/serving-signer.key\\\\nI1211 08:16:03.011861 1 observer_polling.go:159] Starting file observer\\\\nW1211 08:16:03.024183 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1211 08:16:03.024367 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1211 08:16:03.024984 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2611071911/tls.crt::/tmp/serving-cert-2611071911/tls.key\\\\\\\"\\\\nF1211 08:16:03.405647 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:00Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6735b5cd08055deeae614855e5de8a056b276c8006a16c45f2a0fce97da172cb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:57Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70fb9b6123864b3652eabbb833b18032659a0c038f4e5857e7363a3776022396\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://70fb9b6123864b3652eabbb833b18032659a0c038f4e5857e7363a3776022396\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:15:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:15:53Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:21Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:21 crc kubenswrapper[4881]: W1211 08:16:21.499358 4881 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podafd0cc21_e31c_47c9_a598_cd93dde96121.slice/crio-a963cd918efa9c5d38dfe25eeb4bb8c62ec96c9d7de8bb5d1c500526727d0b68 WatchSource:0}: Error finding container a963cd918efa9c5d38dfe25eeb4bb8c62ec96c9d7de8bb5d1c500526727d0b68: Status 404 returned error can't find the container with id a963cd918efa9c5d38dfe25eeb4bb8c62ec96c9d7de8bb5d1c500526727d0b68 Dec 11 08:16:21 crc kubenswrapper[4881]: W1211 08:16:21.510794 4881 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod368e635e_0e63_4202_b9e4_4a3a85c6f30c.slice/crio-99174b50078cd2e17d6bd153f068db25fdfd683389b015d06b32d66db05165b1 WatchSource:0}: Error finding container 99174b50078cd2e17d6bd153f068db25fdfd683389b015d06b32d66db05165b1: Status 404 returned error can't find the container with id 99174b50078cd2e17d6bd153f068db25fdfd683389b015d06b32d66db05165b1 Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.513969 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://02d46e407feeba96230ff44b31f513e09434cdc3378124322398587ebda11b4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:21Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:21 crc kubenswrapper[4881]: W1211 08:16:21.521268 4881 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod56d69133_af36_4cbd_af7d_3a58cc4dd8ca.slice/crio-6cbf9a6538192650367a611f2b982ed213cae6f353bfecc9536bd50779344a7f WatchSource:0}: Error finding container 6cbf9a6538192650367a611f2b982ed213cae6f353bfecc9536bd50779344a7f: Status 404 returned error can't find the container with id 6cbf9a6538192650367a611f2b982ed213cae6f353bfecc9536bd50779344a7f Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.527925 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:21Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.542946 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-g8jhd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"368e635e-0e63-4202-b9e4-4a3a85c6f30c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zrsnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-g8jhd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:21Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.551742 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-wf8q8"] Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.552632 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-wf8q8" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.554898 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-node-metrics-cert" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.555262 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-node-dockercfg-pwtwl" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.555363 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-config" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.555538 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"openshift-service-ca.crt" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.555953 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"env-overrides" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.556143 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"kube-root-ca.crt" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.556291 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-script-lib" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.560812 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-cwhxk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"afd0cc21-e31c-47c9-a598-cd93dde96121\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-cwhxk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:21Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.577508 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"56d69133-af36-4cbd-af7d-3a58cc4dd8ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7rwh4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7rwh4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:21Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-z9nnh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:21Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.593714 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:21Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.608172 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6ec4e8a28a722463111eeb961e6e80d1f8ef1b776c40fc212a2ca5aaf0c43b63\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6eff6a04afb607c6248352793d1205ae80c194a77222a234d586925a8ca8cd00\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:21Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.621557 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"712c62b1-8ccd-4aa3-bcf9-678e361454ec\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f4bc17ebdc46bdf12b24515e7a055043646bcea98fe47ea7f5c44e679d02bde9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://daf982782f40db65e6a33e1e1bcfd01493d5a385c52013546d38d921ac4dcad3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3ceeda38e245a549205af6a7f2468c682ca1420574f2d3b375dfba7d93a3c669\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://204b876ced5657df64e1a6074ba7ddbb3e286df0e12630d709ff64d2905b8487\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ff7ad52ec7f42bde82d0f5b9a6ef051a56c37004bb4172dbc504afde1e5f69bf\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-11T08:16:03Z\\\",\\\"message\\\":\\\"W1211 08:16:02.420586 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1211 08:16:02.420959 1 crypto.go:601] Generating new CA for check-endpoints-signer@1765440962 cert, and key in /tmp/serving-cert-2611071911/serving-signer.crt, /tmp/serving-cert-2611071911/serving-signer.key\\\\nI1211 08:16:03.011861 1 observer_polling.go:159] Starting file observer\\\\nW1211 08:16:03.024183 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1211 08:16:03.024367 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1211 08:16:03.024984 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2611071911/tls.crt::/tmp/serving-cert-2611071911/tls.key\\\\\\\"\\\\nF1211 08:16:03.405647 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:00Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6735b5cd08055deeae614855e5de8a056b276c8006a16c45f2a0fce97da172cb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:57Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70fb9b6123864b3652eabbb833b18032659a0c038f4e5857e7363a3776022396\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://70fb9b6123864b3652eabbb833b18032659a0c038f4e5857e7363a3776022396\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:15:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:15:53Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:21Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.635680 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://02d46e407feeba96230ff44b31f513e09434cdc3378124322398587ebda11b4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:21Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.647850 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:21Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.658553 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:20Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:20Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://35b9fdb7a609d6a955abda7e49e7bceec55811d33e14295ba7258c0e085cc517\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:21Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.672806 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/f14cc110-e74f-4cb7-a998-041e3f9b537b-host-run-ovn-kubernetes\") pod \"ovnkube-node-wf8q8\" (UID: \"f14cc110-e74f-4cb7-a998-041e3f9b537b\") " pod="openshift-ovn-kubernetes/ovnkube-node-wf8q8" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.672840 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8rjwc\" (UniqueName: \"kubernetes.io/projected/f14cc110-e74f-4cb7-a998-041e3f9b537b-kube-api-access-8rjwc\") pod \"ovnkube-node-wf8q8\" (UID: \"f14cc110-e74f-4cb7-a998-041e3f9b537b\") " pod="openshift-ovn-kubernetes/ovnkube-node-wf8q8" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.672865 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/f14cc110-e74f-4cb7-a998-041e3f9b537b-var-lib-openvswitch\") pod \"ovnkube-node-wf8q8\" (UID: \"f14cc110-e74f-4cb7-a998-041e3f9b537b\") " pod="openshift-ovn-kubernetes/ovnkube-node-wf8q8" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.672897 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/f14cc110-e74f-4cb7-a998-041e3f9b537b-host-run-netns\") pod \"ovnkube-node-wf8q8\" (UID: \"f14cc110-e74f-4cb7-a998-041e3f9b537b\") " pod="openshift-ovn-kubernetes/ovnkube-node-wf8q8" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.672911 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/f14cc110-e74f-4cb7-a998-041e3f9b537b-etc-openvswitch\") pod \"ovnkube-node-wf8q8\" (UID: \"f14cc110-e74f-4cb7-a998-041e3f9b537b\") " pod="openshift-ovn-kubernetes/ovnkube-node-wf8q8" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.672925 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/f14cc110-e74f-4cb7-a998-041e3f9b537b-run-systemd\") pod \"ovnkube-node-wf8q8\" (UID: \"f14cc110-e74f-4cb7-a998-041e3f9b537b\") " pod="openshift-ovn-kubernetes/ovnkube-node-wf8q8" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.672941 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/f14cc110-e74f-4cb7-a998-041e3f9b537b-node-log\") pod \"ovnkube-node-wf8q8\" (UID: \"f14cc110-e74f-4cb7-a998-041e3f9b537b\") " pod="openshift-ovn-kubernetes/ovnkube-node-wf8q8" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.672964 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/f14cc110-e74f-4cb7-a998-041e3f9b537b-ovnkube-script-lib\") pod \"ovnkube-node-wf8q8\" (UID: \"f14cc110-e74f-4cb7-a998-041e3f9b537b\") " pod="openshift-ovn-kubernetes/ovnkube-node-wf8q8" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.672981 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/f14cc110-e74f-4cb7-a998-041e3f9b537b-log-socket\") pod \"ovnkube-node-wf8q8\" (UID: \"f14cc110-e74f-4cb7-a998-041e3f9b537b\") " pod="openshift-ovn-kubernetes/ovnkube-node-wf8q8" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.673003 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/f14cc110-e74f-4cb7-a998-041e3f9b537b-run-openvswitch\") pod \"ovnkube-node-wf8q8\" (UID: \"f14cc110-e74f-4cb7-a998-041e3f9b537b\") " pod="openshift-ovn-kubernetes/ovnkube-node-wf8q8" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.673017 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/f14cc110-e74f-4cb7-a998-041e3f9b537b-ovnkube-config\") pod \"ovnkube-node-wf8q8\" (UID: \"f14cc110-e74f-4cb7-a998-041e3f9b537b\") " pod="openshift-ovn-kubernetes/ovnkube-node-wf8q8" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.673032 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/f14cc110-e74f-4cb7-a998-041e3f9b537b-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-wf8q8\" (UID: \"f14cc110-e74f-4cb7-a998-041e3f9b537b\") " pod="openshift-ovn-kubernetes/ovnkube-node-wf8q8" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.673048 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/f14cc110-e74f-4cb7-a998-041e3f9b537b-host-kubelet\") pod \"ovnkube-node-wf8q8\" (UID: \"f14cc110-e74f-4cb7-a998-041e3f9b537b\") " pod="openshift-ovn-kubernetes/ovnkube-node-wf8q8" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.673063 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/f14cc110-e74f-4cb7-a998-041e3f9b537b-systemd-units\") pod \"ovnkube-node-wf8q8\" (UID: \"f14cc110-e74f-4cb7-a998-041e3f9b537b\") " pod="openshift-ovn-kubernetes/ovnkube-node-wf8q8" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.673078 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/f14cc110-e74f-4cb7-a998-041e3f9b537b-host-cni-netd\") pod \"ovnkube-node-wf8q8\" (UID: \"f14cc110-e74f-4cb7-a998-041e3f9b537b\") " pod="openshift-ovn-kubernetes/ovnkube-node-wf8q8" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.673118 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/f14cc110-e74f-4cb7-a998-041e3f9b537b-ovn-node-metrics-cert\") pod \"ovnkube-node-wf8q8\" (UID: \"f14cc110-e74f-4cb7-a998-041e3f9b537b\") " pod="openshift-ovn-kubernetes/ovnkube-node-wf8q8" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.673149 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/f14cc110-e74f-4cb7-a998-041e3f9b537b-env-overrides\") pod \"ovnkube-node-wf8q8\" (UID: \"f14cc110-e74f-4cb7-a998-041e3f9b537b\") " pod="openshift-ovn-kubernetes/ovnkube-node-wf8q8" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.673163 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/f14cc110-e74f-4cb7-a998-041e3f9b537b-host-slash\") pod \"ovnkube-node-wf8q8\" (UID: \"f14cc110-e74f-4cb7-a998-041e3f9b537b\") " pod="openshift-ovn-kubernetes/ovnkube-node-wf8q8" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.673185 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/f14cc110-e74f-4cb7-a998-041e3f9b537b-host-cni-bin\") pod \"ovnkube-node-wf8q8\" (UID: \"f14cc110-e74f-4cb7-a998-041e3f9b537b\") " pod="openshift-ovn-kubernetes/ovnkube-node-wf8q8" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.673200 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/f14cc110-e74f-4cb7-a998-041e3f9b537b-run-ovn\") pod \"ovnkube-node-wf8q8\" (UID: \"f14cc110-e74f-4cb7-a998-041e3f9b537b\") " pod="openshift-ovn-kubernetes/ovnkube-node-wf8q8" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.679780 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wf8q8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f14cc110-e74f-4cb7-a998-041e3f9b537b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:21Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-wf8q8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:21Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.689835 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"56d69133-af36-4cbd-af7d-3a58cc4dd8ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7rwh4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7rwh4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:21Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-z9nnh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:21Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.703087 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:21Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.716020 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6ec4e8a28a722463111eeb961e6e80d1f8ef1b776c40fc212a2ca5aaf0c43b63\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6eff6a04afb607c6248352793d1205ae80c194a77222a234d586925a8ca8cd00\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:21Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.729017 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:21Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.752115 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-g8jhd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"368e635e-0e63-4202-b9e4-4a3a85c6f30c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zrsnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-g8jhd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:21Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.767822 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-cwhxk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"afd0cc21-e31c-47c9-a598-cd93dde96121\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-cwhxk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:21Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.773664 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/f14cc110-e74f-4cb7-a998-041e3f9b537b-systemd-units\") pod \"ovnkube-node-wf8q8\" (UID: \"f14cc110-e74f-4cb7-a998-041e3f9b537b\") " pod="openshift-ovn-kubernetes/ovnkube-node-wf8q8" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.773714 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/f14cc110-e74f-4cb7-a998-041e3f9b537b-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-wf8q8\" (UID: \"f14cc110-e74f-4cb7-a998-041e3f9b537b\") " pod="openshift-ovn-kubernetes/ovnkube-node-wf8q8" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.773745 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/f14cc110-e74f-4cb7-a998-041e3f9b537b-host-kubelet\") pod \"ovnkube-node-wf8q8\" (UID: \"f14cc110-e74f-4cb7-a998-041e3f9b537b\") " pod="openshift-ovn-kubernetes/ovnkube-node-wf8q8" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.773824 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/f14cc110-e74f-4cb7-a998-041e3f9b537b-host-cni-netd\") pod \"ovnkube-node-wf8q8\" (UID: \"f14cc110-e74f-4cb7-a998-041e3f9b537b\") " pod="openshift-ovn-kubernetes/ovnkube-node-wf8q8" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.773824 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/f14cc110-e74f-4cb7-a998-041e3f9b537b-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-wf8q8\" (UID: \"f14cc110-e74f-4cb7-a998-041e3f9b537b\") " pod="openshift-ovn-kubernetes/ovnkube-node-wf8q8" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.773886 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/f14cc110-e74f-4cb7-a998-041e3f9b537b-host-kubelet\") pod \"ovnkube-node-wf8q8\" (UID: \"f14cc110-e74f-4cb7-a998-041e3f9b537b\") " pod="openshift-ovn-kubernetes/ovnkube-node-wf8q8" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.773880 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/f14cc110-e74f-4cb7-a998-041e3f9b537b-systemd-units\") pod \"ovnkube-node-wf8q8\" (UID: \"f14cc110-e74f-4cb7-a998-041e3f9b537b\") " pod="openshift-ovn-kubernetes/ovnkube-node-wf8q8" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.773942 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/f14cc110-e74f-4cb7-a998-041e3f9b537b-ovn-node-metrics-cert\") pod \"ovnkube-node-wf8q8\" (UID: \"f14cc110-e74f-4cb7-a998-041e3f9b537b\") " pod="openshift-ovn-kubernetes/ovnkube-node-wf8q8" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.773956 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/f14cc110-e74f-4cb7-a998-041e3f9b537b-host-cni-netd\") pod \"ovnkube-node-wf8q8\" (UID: \"f14cc110-e74f-4cb7-a998-041e3f9b537b\") " pod="openshift-ovn-kubernetes/ovnkube-node-wf8q8" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.773968 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/f14cc110-e74f-4cb7-a998-041e3f9b537b-env-overrides\") pod \"ovnkube-node-wf8q8\" (UID: \"f14cc110-e74f-4cb7-a998-041e3f9b537b\") " pod="openshift-ovn-kubernetes/ovnkube-node-wf8q8" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.774001 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/f14cc110-e74f-4cb7-a998-041e3f9b537b-host-slash\") pod \"ovnkube-node-wf8q8\" (UID: \"f14cc110-e74f-4cb7-a998-041e3f9b537b\") " pod="openshift-ovn-kubernetes/ovnkube-node-wf8q8" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.774032 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/f14cc110-e74f-4cb7-a998-041e3f9b537b-host-cni-bin\") pod \"ovnkube-node-wf8q8\" (UID: \"f14cc110-e74f-4cb7-a998-041e3f9b537b\") " pod="openshift-ovn-kubernetes/ovnkube-node-wf8q8" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.774060 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/f14cc110-e74f-4cb7-a998-041e3f9b537b-host-slash\") pod \"ovnkube-node-wf8q8\" (UID: \"f14cc110-e74f-4cb7-a998-041e3f9b537b\") " pod="openshift-ovn-kubernetes/ovnkube-node-wf8q8" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.774077 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/f14cc110-e74f-4cb7-a998-041e3f9b537b-run-ovn\") pod \"ovnkube-node-wf8q8\" (UID: \"f14cc110-e74f-4cb7-a998-041e3f9b537b\") " pod="openshift-ovn-kubernetes/ovnkube-node-wf8q8" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.774087 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/f14cc110-e74f-4cb7-a998-041e3f9b537b-host-cni-bin\") pod \"ovnkube-node-wf8q8\" (UID: \"f14cc110-e74f-4cb7-a998-041e3f9b537b\") " pod="openshift-ovn-kubernetes/ovnkube-node-wf8q8" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.774113 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/f14cc110-e74f-4cb7-a998-041e3f9b537b-run-ovn\") pod \"ovnkube-node-wf8q8\" (UID: \"f14cc110-e74f-4cb7-a998-041e3f9b537b\") " pod="openshift-ovn-kubernetes/ovnkube-node-wf8q8" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.774099 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/f14cc110-e74f-4cb7-a998-041e3f9b537b-var-lib-openvswitch\") pod \"ovnkube-node-wf8q8\" (UID: \"f14cc110-e74f-4cb7-a998-041e3f9b537b\") " pod="openshift-ovn-kubernetes/ovnkube-node-wf8q8" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.774138 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/f14cc110-e74f-4cb7-a998-041e3f9b537b-var-lib-openvswitch\") pod \"ovnkube-node-wf8q8\" (UID: \"f14cc110-e74f-4cb7-a998-041e3f9b537b\") " pod="openshift-ovn-kubernetes/ovnkube-node-wf8q8" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.774141 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/f14cc110-e74f-4cb7-a998-041e3f9b537b-host-run-ovn-kubernetes\") pod \"ovnkube-node-wf8q8\" (UID: \"f14cc110-e74f-4cb7-a998-041e3f9b537b\") " pod="openshift-ovn-kubernetes/ovnkube-node-wf8q8" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.774162 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/f14cc110-e74f-4cb7-a998-041e3f9b537b-host-run-ovn-kubernetes\") pod \"ovnkube-node-wf8q8\" (UID: \"f14cc110-e74f-4cb7-a998-041e3f9b537b\") " pod="openshift-ovn-kubernetes/ovnkube-node-wf8q8" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.774181 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8rjwc\" (UniqueName: \"kubernetes.io/projected/f14cc110-e74f-4cb7-a998-041e3f9b537b-kube-api-access-8rjwc\") pod \"ovnkube-node-wf8q8\" (UID: \"f14cc110-e74f-4cb7-a998-041e3f9b537b\") " pod="openshift-ovn-kubernetes/ovnkube-node-wf8q8" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.774219 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/f14cc110-e74f-4cb7-a998-041e3f9b537b-host-run-netns\") pod \"ovnkube-node-wf8q8\" (UID: \"f14cc110-e74f-4cb7-a998-041e3f9b537b\") " pod="openshift-ovn-kubernetes/ovnkube-node-wf8q8" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.774315 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/f14cc110-e74f-4cb7-a998-041e3f9b537b-etc-openvswitch\") pod \"ovnkube-node-wf8q8\" (UID: \"f14cc110-e74f-4cb7-a998-041e3f9b537b\") " pod="openshift-ovn-kubernetes/ovnkube-node-wf8q8" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.774265 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/f14cc110-e74f-4cb7-a998-041e3f9b537b-host-run-netns\") pod \"ovnkube-node-wf8q8\" (UID: \"f14cc110-e74f-4cb7-a998-041e3f9b537b\") " pod="openshift-ovn-kubernetes/ovnkube-node-wf8q8" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.774387 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/f14cc110-e74f-4cb7-a998-041e3f9b537b-run-systemd\") pod \"ovnkube-node-wf8q8\" (UID: \"f14cc110-e74f-4cb7-a998-041e3f9b537b\") " pod="openshift-ovn-kubernetes/ovnkube-node-wf8q8" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.774408 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/f14cc110-e74f-4cb7-a998-041e3f9b537b-node-log\") pod \"ovnkube-node-wf8q8\" (UID: \"f14cc110-e74f-4cb7-a998-041e3f9b537b\") " pod="openshift-ovn-kubernetes/ovnkube-node-wf8q8" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.774522 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/f14cc110-e74f-4cb7-a998-041e3f9b537b-ovnkube-script-lib\") pod \"ovnkube-node-wf8q8\" (UID: \"f14cc110-e74f-4cb7-a998-041e3f9b537b\") " pod="openshift-ovn-kubernetes/ovnkube-node-wf8q8" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.774547 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/f14cc110-e74f-4cb7-a998-041e3f9b537b-run-systemd\") pod \"ovnkube-node-wf8q8\" (UID: \"f14cc110-e74f-4cb7-a998-041e3f9b537b\") " pod="openshift-ovn-kubernetes/ovnkube-node-wf8q8" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.774472 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/f14cc110-e74f-4cb7-a998-041e3f9b537b-node-log\") pod \"ovnkube-node-wf8q8\" (UID: \"f14cc110-e74f-4cb7-a998-041e3f9b537b\") " pod="openshift-ovn-kubernetes/ovnkube-node-wf8q8" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.774470 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/f14cc110-e74f-4cb7-a998-041e3f9b537b-etc-openvswitch\") pod \"ovnkube-node-wf8q8\" (UID: \"f14cc110-e74f-4cb7-a998-041e3f9b537b\") " pod="openshift-ovn-kubernetes/ovnkube-node-wf8q8" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.774680 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/f14cc110-e74f-4cb7-a998-041e3f9b537b-env-overrides\") pod \"ovnkube-node-wf8q8\" (UID: \"f14cc110-e74f-4cb7-a998-041e3f9b537b\") " pod="openshift-ovn-kubernetes/ovnkube-node-wf8q8" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.774762 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/f14cc110-e74f-4cb7-a998-041e3f9b537b-log-socket\") pod \"ovnkube-node-wf8q8\" (UID: \"f14cc110-e74f-4cb7-a998-041e3f9b537b\") " pod="openshift-ovn-kubernetes/ovnkube-node-wf8q8" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.775225 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/f14cc110-e74f-4cb7-a998-041e3f9b537b-ovnkube-script-lib\") pod \"ovnkube-node-wf8q8\" (UID: \"f14cc110-e74f-4cb7-a998-041e3f9b537b\") " pod="openshift-ovn-kubernetes/ovnkube-node-wf8q8" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.775282 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/f14cc110-e74f-4cb7-a998-041e3f9b537b-log-socket\") pod \"ovnkube-node-wf8q8\" (UID: \"f14cc110-e74f-4cb7-a998-041e3f9b537b\") " pod="openshift-ovn-kubernetes/ovnkube-node-wf8q8" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.775318 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/f14cc110-e74f-4cb7-a998-041e3f9b537b-run-openvswitch\") pod \"ovnkube-node-wf8q8\" (UID: \"f14cc110-e74f-4cb7-a998-041e3f9b537b\") " pod="openshift-ovn-kubernetes/ovnkube-node-wf8q8" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.775385 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/f14cc110-e74f-4cb7-a998-041e3f9b537b-run-openvswitch\") pod \"ovnkube-node-wf8q8\" (UID: \"f14cc110-e74f-4cb7-a998-041e3f9b537b\") " pod="openshift-ovn-kubernetes/ovnkube-node-wf8q8" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.775445 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/f14cc110-e74f-4cb7-a998-041e3f9b537b-ovnkube-config\") pod \"ovnkube-node-wf8q8\" (UID: \"f14cc110-e74f-4cb7-a998-041e3f9b537b\") " pod="openshift-ovn-kubernetes/ovnkube-node-wf8q8" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.775989 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/f14cc110-e74f-4cb7-a998-041e3f9b537b-ovnkube-config\") pod \"ovnkube-node-wf8q8\" (UID: \"f14cc110-e74f-4cb7-a998-041e3f9b537b\") " pod="openshift-ovn-kubernetes/ovnkube-node-wf8q8" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.777107 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/f14cc110-e74f-4cb7-a998-041e3f9b537b-ovn-node-metrics-cert\") pod \"ovnkube-node-wf8q8\" (UID: \"f14cc110-e74f-4cb7-a998-041e3f9b537b\") " pod="openshift-ovn-kubernetes/ovnkube-node-wf8q8" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.783907 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e233ddb-7c1f-4b3d-a111-de8cdc7ccd18\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://221f8c696ca3c5ab5dc2025890b5279fd406a8dfdd4ea482bb12f0c5d304255d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3dec421d26a9373c52bf2fb9434cc3f3b373f409b1d86ec1effca7534acb7262\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7cb4176d384609eb549c9f00057256bd34053ca3eca229c20cd250b93f87f910\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://91541795f1dd997a0d977904a6a3cbeae8b66b8bb57cde8dac344d3703352e1a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:15:53Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:21Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.789817 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8rjwc\" (UniqueName: \"kubernetes.io/projected/f14cc110-e74f-4cb7-a998-041e3f9b537b-kube-api-access-8rjwc\") pod \"ovnkube-node-wf8q8\" (UID: \"f14cc110-e74f-4cb7-a998-041e3f9b537b\") " pod="openshift-ovn-kubernetes/ovnkube-node-wf8q8" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.794439 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-847k7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1711214f-683a-4a2e-b9b8-f3fd2dd6dbc2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:20Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:20Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2txkz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:20Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-847k7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:21Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:21 crc kubenswrapper[4881]: I1211 08:16:21.886417 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-wf8q8" Dec 11 08:16:22 crc kubenswrapper[4881]: I1211 08:16:22.005057 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 11 08:16:22 crc kubenswrapper[4881]: E1211 08:16:22.005259 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 11 08:16:22 crc kubenswrapper[4881]: W1211 08:16:22.044029 4881 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf14cc110_e74f_4cb7_a998_041e3f9b537b.slice/crio-8df2e5f01cc186fb359a8d0d3221245c1f2782daa8511af918f436152f91dc1a WatchSource:0}: Error finding container 8df2e5f01cc186fb359a8d0d3221245c1f2782daa8511af918f436152f91dc1a: Status 404 returned error can't find the container with id 8df2e5f01cc186fb359a8d0d3221245c1f2782daa8511af918f436152f91dc1a Dec 11 08:16:22 crc kubenswrapper[4881]: I1211 08:16:22.169020 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/node-resolver-847k7" event={"ID":"1711214f-683a-4a2e-b9b8-f3fd2dd6dbc2","Type":"ContainerStarted","Data":"0ad11e04da8a6d33e46ca8c5f28be920d52c7c2ce22b4539fc8b7b0b85edb425"} Dec 11 08:16:22 crc kubenswrapper[4881]: I1211 08:16:22.172462 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" event={"ID":"56d69133-af36-4cbd-af7d-3a58cc4dd8ca","Type":"ContainerStarted","Data":"b73c9948845a28d7c3cb81e94d962a4f11ac061f4108a18f46d451a554e9adb7"} Dec 11 08:16:22 crc kubenswrapper[4881]: I1211 08:16:22.172527 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" event={"ID":"56d69133-af36-4cbd-af7d-3a58cc4dd8ca","Type":"ContainerStarted","Data":"b0321657968185975569a4bf8037ab956988b13493feec0be9c439fbe6c20707"} Dec 11 08:16:22 crc kubenswrapper[4881]: I1211 08:16:22.172539 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" event={"ID":"56d69133-af36-4cbd-af7d-3a58cc4dd8ca","Type":"ContainerStarted","Data":"6cbf9a6538192650367a611f2b982ed213cae6f353bfecc9536bd50779344a7f"} Dec 11 08:16:22 crc kubenswrapper[4881]: I1211 08:16:22.174504 4881 generic.go:334] "Generic (PLEG): container finished" podID="f14cc110-e74f-4cb7-a998-041e3f9b537b" containerID="1794069e7175bc0ce9dc315807ad766fd28fb46e11ed6e7addc3199cccaea63a" exitCode=0 Dec 11 08:16:22 crc kubenswrapper[4881]: I1211 08:16:22.174566 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wf8q8" event={"ID":"f14cc110-e74f-4cb7-a998-041e3f9b537b","Type":"ContainerDied","Data":"1794069e7175bc0ce9dc315807ad766fd28fb46e11ed6e7addc3199cccaea63a"} Dec 11 08:16:22 crc kubenswrapper[4881]: I1211 08:16:22.174587 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wf8q8" event={"ID":"f14cc110-e74f-4cb7-a998-041e3f9b537b","Type":"ContainerStarted","Data":"8df2e5f01cc186fb359a8d0d3221245c1f2782daa8511af918f436152f91dc1a"} Dec 11 08:16:22 crc kubenswrapper[4881]: I1211 08:16:22.176703 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-g8jhd" event={"ID":"368e635e-0e63-4202-b9e4-4a3a85c6f30c","Type":"ContainerStarted","Data":"f17445543c4ae20340b00c834ecfa381b0d9624196a6da869ea823ee99df132f"} Dec 11 08:16:22 crc kubenswrapper[4881]: I1211 08:16:22.176753 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-g8jhd" event={"ID":"368e635e-0e63-4202-b9e4-4a3a85c6f30c","Type":"ContainerStarted","Data":"99174b50078cd2e17d6bd153f068db25fdfd683389b015d06b32d66db05165b1"} Dec 11 08:16:22 crc kubenswrapper[4881]: I1211 08:16:22.186629 4881 generic.go:334] "Generic (PLEG): container finished" podID="afd0cc21-e31c-47c9-a598-cd93dde96121" containerID="44d5ca755969b2434115259f141a9f8f17da8e0c1bef5b61c48475e09dc91601" exitCode=0 Dec 11 08:16:22 crc kubenswrapper[4881]: I1211 08:16:22.186694 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-cwhxk" event={"ID":"afd0cc21-e31c-47c9-a598-cd93dde96121","Type":"ContainerDied","Data":"44d5ca755969b2434115259f141a9f8f17da8e0c1bef5b61c48475e09dc91601"} Dec 11 08:16:22 crc kubenswrapper[4881]: I1211 08:16:22.186729 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-cwhxk" event={"ID":"afd0cc21-e31c-47c9-a598-cd93dde96121","Type":"ContainerStarted","Data":"a963cd918efa9c5d38dfe25eeb4bb8c62ec96c9d7de8bb5d1c500526727d0b68"} Dec 11 08:16:22 crc kubenswrapper[4881]: I1211 08:16:22.191653 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:22Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:22 crc kubenswrapper[4881]: I1211 08:16:22.219842 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-g8jhd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"368e635e-0e63-4202-b9e4-4a3a85c6f30c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zrsnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-g8jhd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:22Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:22 crc kubenswrapper[4881]: I1211 08:16:22.233716 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-cwhxk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"afd0cc21-e31c-47c9-a598-cd93dde96121\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-cwhxk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:22Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:22 crc kubenswrapper[4881]: I1211 08:16:22.244938 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"56d69133-af36-4cbd-af7d-3a58cc4dd8ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7rwh4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7rwh4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:21Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-z9nnh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:22Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:22 crc kubenswrapper[4881]: I1211 08:16:22.257212 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:22Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:22 crc kubenswrapper[4881]: I1211 08:16:22.270777 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6ec4e8a28a722463111eeb961e6e80d1f8ef1b776c40fc212a2ca5aaf0c43b63\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6eff6a04afb607c6248352793d1205ae80c194a77222a234d586925a8ca8cd00\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:22Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:22 crc kubenswrapper[4881]: I1211 08:16:22.280417 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-847k7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1711214f-683a-4a2e-b9b8-f3fd2dd6dbc2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0ad11e04da8a6d33e46ca8c5f28be920d52c7c2ce22b4539fc8b7b0b85edb425\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2txkz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:20Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-847k7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:22Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:22 crc kubenswrapper[4881]: I1211 08:16:22.295708 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e233ddb-7c1f-4b3d-a111-de8cdc7ccd18\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://221f8c696ca3c5ab5dc2025890b5279fd406a8dfdd4ea482bb12f0c5d304255d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3dec421d26a9373c52bf2fb9434cc3f3b373f409b1d86ec1effca7534acb7262\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7cb4176d384609eb549c9f00057256bd34053ca3eca229c20cd250b93f87f910\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://91541795f1dd997a0d977904a6a3cbeae8b66b8bb57cde8dac344d3703352e1a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:15:53Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:22Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:22 crc kubenswrapper[4881]: I1211 08:16:22.309369 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:22Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:22 crc kubenswrapper[4881]: I1211 08:16:22.320923 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:20Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:20Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://35b9fdb7a609d6a955abda7e49e7bceec55811d33e14295ba7258c0e085cc517\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:22Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:22 crc kubenswrapper[4881]: I1211 08:16:22.343941 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wf8q8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f14cc110-e74f-4cb7-a998-041e3f9b537b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:21Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-wf8q8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:22Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:22 crc kubenswrapper[4881]: I1211 08:16:22.370475 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"712c62b1-8ccd-4aa3-bcf9-678e361454ec\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f4bc17ebdc46bdf12b24515e7a055043646bcea98fe47ea7f5c44e679d02bde9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://daf982782f40db65e6a33e1e1bcfd01493d5a385c52013546d38d921ac4dcad3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3ceeda38e245a549205af6a7f2468c682ca1420574f2d3b375dfba7d93a3c669\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://204b876ced5657df64e1a6074ba7ddbb3e286df0e12630d709ff64d2905b8487\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ff7ad52ec7f42bde82d0f5b9a6ef051a56c37004bb4172dbc504afde1e5f69bf\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-11T08:16:03Z\\\",\\\"message\\\":\\\"W1211 08:16:02.420586 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1211 08:16:02.420959 1 crypto.go:601] Generating new CA for check-endpoints-signer@1765440962 cert, and key in /tmp/serving-cert-2611071911/serving-signer.crt, /tmp/serving-cert-2611071911/serving-signer.key\\\\nI1211 08:16:03.011861 1 observer_polling.go:159] Starting file observer\\\\nW1211 08:16:03.024183 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1211 08:16:03.024367 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1211 08:16:03.024984 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2611071911/tls.crt::/tmp/serving-cert-2611071911/tls.key\\\\\\\"\\\\nF1211 08:16:03.405647 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:00Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6735b5cd08055deeae614855e5de8a056b276c8006a16c45f2a0fce97da172cb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:57Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70fb9b6123864b3652eabbb833b18032659a0c038f4e5857e7363a3776022396\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://70fb9b6123864b3652eabbb833b18032659a0c038f4e5857e7363a3776022396\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:15:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:15:53Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:22Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:22 crc kubenswrapper[4881]: I1211 08:16:22.397325 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://02d46e407feeba96230ff44b31f513e09434cdc3378124322398587ebda11b4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:22Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:22 crc kubenswrapper[4881]: I1211 08:16:22.421988 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:22Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:22 crc kubenswrapper[4881]: I1211 08:16:22.442853 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6ec4e8a28a722463111eeb961e6e80d1f8ef1b776c40fc212a2ca5aaf0c43b63\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6eff6a04afb607c6248352793d1205ae80c194a77222a234d586925a8ca8cd00\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:22Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:22 crc kubenswrapper[4881]: I1211 08:16:22.536837 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:22Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:22 crc kubenswrapper[4881]: I1211 08:16:22.580444 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-g8jhd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"368e635e-0e63-4202-b9e4-4a3a85c6f30c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f17445543c4ae20340b00c834ecfa381b0d9624196a6da869ea823ee99df132f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zrsnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-g8jhd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:22Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:22 crc kubenswrapper[4881]: I1211 08:16:22.597117 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-cwhxk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"afd0cc21-e31c-47c9-a598-cd93dde96121\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://44d5ca755969b2434115259f141a9f8f17da8e0c1bef5b61c48475e09dc91601\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://44d5ca755969b2434115259f141a9f8f17da8e0c1bef5b61c48475e09dc91601\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-cwhxk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:22Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:22 crc kubenswrapper[4881]: I1211 08:16:22.616552 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"56d69133-af36-4cbd-af7d-3a58cc4dd8ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b73c9948845a28d7c3cb81e94d962a4f11ac061f4108a18f46d451a554e9adb7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7rwh4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b0321657968185975569a4bf8037ab956988b13493feec0be9c439fbe6c20707\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7rwh4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:21Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-z9nnh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:22Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:22 crc kubenswrapper[4881]: I1211 08:16:22.638374 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e233ddb-7c1f-4b3d-a111-de8cdc7ccd18\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://221f8c696ca3c5ab5dc2025890b5279fd406a8dfdd4ea482bb12f0c5d304255d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3dec421d26a9373c52bf2fb9434cc3f3b373f409b1d86ec1effca7534acb7262\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7cb4176d384609eb549c9f00057256bd34053ca3eca229c20cd250b93f87f910\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://91541795f1dd997a0d977904a6a3cbeae8b66b8bb57cde8dac344d3703352e1a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:15:53Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:22Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:22 crc kubenswrapper[4881]: I1211 08:16:22.647390 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-847k7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1711214f-683a-4a2e-b9b8-f3fd2dd6dbc2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0ad11e04da8a6d33e46ca8c5f28be920d52c7c2ce22b4539fc8b7b0b85edb425\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2txkz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:20Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-847k7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:22Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:22 crc kubenswrapper[4881]: I1211 08:16:22.663115 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"712c62b1-8ccd-4aa3-bcf9-678e361454ec\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f4bc17ebdc46bdf12b24515e7a055043646bcea98fe47ea7f5c44e679d02bde9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://daf982782f40db65e6a33e1e1bcfd01493d5a385c52013546d38d921ac4dcad3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3ceeda38e245a549205af6a7f2468c682ca1420574f2d3b375dfba7d93a3c669\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://204b876ced5657df64e1a6074ba7ddbb3e286df0e12630d709ff64d2905b8487\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ff7ad52ec7f42bde82d0f5b9a6ef051a56c37004bb4172dbc504afde1e5f69bf\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-11T08:16:03Z\\\",\\\"message\\\":\\\"W1211 08:16:02.420586 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1211 08:16:02.420959 1 crypto.go:601] Generating new CA for check-endpoints-signer@1765440962 cert, and key in /tmp/serving-cert-2611071911/serving-signer.crt, /tmp/serving-cert-2611071911/serving-signer.key\\\\nI1211 08:16:03.011861 1 observer_polling.go:159] Starting file observer\\\\nW1211 08:16:03.024183 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1211 08:16:03.024367 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1211 08:16:03.024984 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2611071911/tls.crt::/tmp/serving-cert-2611071911/tls.key\\\\\\\"\\\\nF1211 08:16:03.405647 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:00Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6735b5cd08055deeae614855e5de8a056b276c8006a16c45f2a0fce97da172cb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:57Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70fb9b6123864b3652eabbb833b18032659a0c038f4e5857e7363a3776022396\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://70fb9b6123864b3652eabbb833b18032659a0c038f4e5857e7363a3776022396\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:15:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:15:53Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:22Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:22 crc kubenswrapper[4881]: I1211 08:16:22.681028 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://02d46e407feeba96230ff44b31f513e09434cdc3378124322398587ebda11b4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:22Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:22 crc kubenswrapper[4881]: I1211 08:16:22.694792 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:22Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:22 crc kubenswrapper[4881]: I1211 08:16:22.732123 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:20Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:20Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://35b9fdb7a609d6a955abda7e49e7bceec55811d33e14295ba7258c0e085cc517\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:22Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:22 crc kubenswrapper[4881]: I1211 08:16:22.771687 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wf8q8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f14cc110-e74f-4cb7-a998-041e3f9b537b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1794069e7175bc0ce9dc315807ad766fd28fb46e11ed6e7addc3199cccaea63a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1794069e7175bc0ce9dc315807ad766fd28fb46e11ed6e7addc3199cccaea63a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:21Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-wf8q8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:22Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:23 crc kubenswrapper[4881]: I1211 08:16:23.005107 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 11 08:16:23 crc kubenswrapper[4881]: I1211 08:16:23.005170 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 11 08:16:23 crc kubenswrapper[4881]: E1211 08:16:23.005267 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 11 08:16:23 crc kubenswrapper[4881]: E1211 08:16:23.005489 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 11 08:16:23 crc kubenswrapper[4881]: I1211 08:16:23.021716 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e233ddb-7c1f-4b3d-a111-de8cdc7ccd18\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://221f8c696ca3c5ab5dc2025890b5279fd406a8dfdd4ea482bb12f0c5d304255d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3dec421d26a9373c52bf2fb9434cc3f3b373f409b1d86ec1effca7534acb7262\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7cb4176d384609eb549c9f00057256bd34053ca3eca229c20cd250b93f87f910\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://91541795f1dd997a0d977904a6a3cbeae8b66b8bb57cde8dac344d3703352e1a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:15:53Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:23Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:23 crc kubenswrapper[4881]: I1211 08:16:23.037523 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-847k7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1711214f-683a-4a2e-b9b8-f3fd2dd6dbc2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0ad11e04da8a6d33e46ca8c5f28be920d52c7c2ce22b4539fc8b7b0b85edb425\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2txkz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:20Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-847k7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:23Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:23 crc kubenswrapper[4881]: I1211 08:16:23.056653 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"712c62b1-8ccd-4aa3-bcf9-678e361454ec\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f4bc17ebdc46bdf12b24515e7a055043646bcea98fe47ea7f5c44e679d02bde9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://daf982782f40db65e6a33e1e1bcfd01493d5a385c52013546d38d921ac4dcad3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3ceeda38e245a549205af6a7f2468c682ca1420574f2d3b375dfba7d93a3c669\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://204b876ced5657df64e1a6074ba7ddbb3e286df0e12630d709ff64d2905b8487\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ff7ad52ec7f42bde82d0f5b9a6ef051a56c37004bb4172dbc504afde1e5f69bf\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-11T08:16:03Z\\\",\\\"message\\\":\\\"W1211 08:16:02.420586 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1211 08:16:02.420959 1 crypto.go:601] Generating new CA for check-endpoints-signer@1765440962 cert, and key in /tmp/serving-cert-2611071911/serving-signer.crt, /tmp/serving-cert-2611071911/serving-signer.key\\\\nI1211 08:16:03.011861 1 observer_polling.go:159] Starting file observer\\\\nW1211 08:16:03.024183 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1211 08:16:03.024367 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1211 08:16:03.024984 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2611071911/tls.crt::/tmp/serving-cert-2611071911/tls.key\\\\\\\"\\\\nF1211 08:16:03.405647 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:00Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6735b5cd08055deeae614855e5de8a056b276c8006a16c45f2a0fce97da172cb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:57Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70fb9b6123864b3652eabbb833b18032659a0c038f4e5857e7363a3776022396\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://70fb9b6123864b3652eabbb833b18032659a0c038f4e5857e7363a3776022396\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:15:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:15:53Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:23Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:23 crc kubenswrapper[4881]: I1211 08:16:23.074168 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://02d46e407feeba96230ff44b31f513e09434cdc3378124322398587ebda11b4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:23Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:23 crc kubenswrapper[4881]: I1211 08:16:23.090616 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:23Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:23 crc kubenswrapper[4881]: I1211 08:16:23.105437 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:20Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:20Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://35b9fdb7a609d6a955abda7e49e7bceec55811d33e14295ba7258c0e085cc517\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:23Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:23 crc kubenswrapper[4881]: I1211 08:16:23.125605 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wf8q8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f14cc110-e74f-4cb7-a998-041e3f9b537b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1794069e7175bc0ce9dc315807ad766fd28fb46e11ed6e7addc3199cccaea63a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1794069e7175bc0ce9dc315807ad766fd28fb46e11ed6e7addc3199cccaea63a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:21Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-wf8q8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:23Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:23 crc kubenswrapper[4881]: I1211 08:16:23.140142 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:23Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:23 crc kubenswrapper[4881]: I1211 08:16:23.151893 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6ec4e8a28a722463111eeb961e6e80d1f8ef1b776c40fc212a2ca5aaf0c43b63\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6eff6a04afb607c6248352793d1205ae80c194a77222a234d586925a8ca8cd00\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:23Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:23 crc kubenswrapper[4881]: I1211 08:16:23.165105 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:23Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:23 crc kubenswrapper[4881]: I1211 08:16:23.178527 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-g8jhd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"368e635e-0e63-4202-b9e4-4a3a85c6f30c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f17445543c4ae20340b00c834ecfa381b0d9624196a6da869ea823ee99df132f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zrsnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-g8jhd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:23Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:23 crc kubenswrapper[4881]: I1211 08:16:23.191618 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wf8q8" event={"ID":"f14cc110-e74f-4cb7-a998-041e3f9b537b","Type":"ContainerStarted","Data":"5a0cb039f4c38da8b633dfcd2e0d23afbc3607b41f04d3c230aee5f86cba79b5"} Dec 11 08:16:23 crc kubenswrapper[4881]: I1211 08:16:23.191716 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wf8q8" event={"ID":"f14cc110-e74f-4cb7-a998-041e3f9b537b","Type":"ContainerStarted","Data":"5b8e9dda68edf7ec8848da4eb851807ad0dc10a086fb6129e3a22f9a35e368ea"} Dec 11 08:16:23 crc kubenswrapper[4881]: I1211 08:16:23.193781 4881 generic.go:334] "Generic (PLEG): container finished" podID="afd0cc21-e31c-47c9-a598-cd93dde96121" containerID="18c881494d5238565dfdb69e61ecdebb725f60a14c0c651e0bcc65cb2aff297a" exitCode=0 Dec 11 08:16:23 crc kubenswrapper[4881]: I1211 08:16:23.193874 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-cwhxk" event={"ID":"afd0cc21-e31c-47c9-a598-cd93dde96121","Type":"ContainerDied","Data":"18c881494d5238565dfdb69e61ecdebb725f60a14c0c651e0bcc65cb2aff297a"} Dec 11 08:16:23 crc kubenswrapper[4881]: I1211 08:16:23.203543 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-cwhxk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"afd0cc21-e31c-47c9-a598-cd93dde96121\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://44d5ca755969b2434115259f141a9f8f17da8e0c1bef5b61c48475e09dc91601\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://44d5ca755969b2434115259f141a9f8f17da8e0c1bef5b61c48475e09dc91601\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-cwhxk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:23Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:23 crc kubenswrapper[4881]: I1211 08:16:23.218948 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"56d69133-af36-4cbd-af7d-3a58cc4dd8ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b73c9948845a28d7c3cb81e94d962a4f11ac061f4108a18f46d451a554e9adb7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7rwh4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b0321657968185975569a4bf8037ab956988b13493feec0be9c439fbe6c20707\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7rwh4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:21Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-z9nnh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:23Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:23 crc kubenswrapper[4881]: I1211 08:16:23.231949 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e233ddb-7c1f-4b3d-a111-de8cdc7ccd18\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://221f8c696ca3c5ab5dc2025890b5279fd406a8dfdd4ea482bb12f0c5d304255d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3dec421d26a9373c52bf2fb9434cc3f3b373f409b1d86ec1effca7534acb7262\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7cb4176d384609eb549c9f00057256bd34053ca3eca229c20cd250b93f87f910\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://91541795f1dd997a0d977904a6a3cbeae8b66b8bb57cde8dac344d3703352e1a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:15:53Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:23Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:23 crc kubenswrapper[4881]: I1211 08:16:23.250156 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-847k7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1711214f-683a-4a2e-b9b8-f3fd2dd6dbc2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0ad11e04da8a6d33e46ca8c5f28be920d52c7c2ce22b4539fc8b7b0b85edb425\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2txkz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:20Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-847k7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:23Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:23 crc kubenswrapper[4881]: I1211 08:16:23.268397 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"712c62b1-8ccd-4aa3-bcf9-678e361454ec\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f4bc17ebdc46bdf12b24515e7a055043646bcea98fe47ea7f5c44e679d02bde9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://daf982782f40db65e6a33e1e1bcfd01493d5a385c52013546d38d921ac4dcad3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3ceeda38e245a549205af6a7f2468c682ca1420574f2d3b375dfba7d93a3c669\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://204b876ced5657df64e1a6074ba7ddbb3e286df0e12630d709ff64d2905b8487\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ff7ad52ec7f42bde82d0f5b9a6ef051a56c37004bb4172dbc504afde1e5f69bf\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-11T08:16:03Z\\\",\\\"message\\\":\\\"W1211 08:16:02.420586 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1211 08:16:02.420959 1 crypto.go:601] Generating new CA for check-endpoints-signer@1765440962 cert, and key in /tmp/serving-cert-2611071911/serving-signer.crt, /tmp/serving-cert-2611071911/serving-signer.key\\\\nI1211 08:16:03.011861 1 observer_polling.go:159] Starting file observer\\\\nW1211 08:16:03.024183 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1211 08:16:03.024367 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1211 08:16:03.024984 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2611071911/tls.crt::/tmp/serving-cert-2611071911/tls.key\\\\\\\"\\\\nF1211 08:16:03.405647 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:00Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6735b5cd08055deeae614855e5de8a056b276c8006a16c45f2a0fce97da172cb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:57Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70fb9b6123864b3652eabbb833b18032659a0c038f4e5857e7363a3776022396\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://70fb9b6123864b3652eabbb833b18032659a0c038f4e5857e7363a3776022396\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:15:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:15:53Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:23Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:23 crc kubenswrapper[4881]: I1211 08:16:23.283372 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://02d46e407feeba96230ff44b31f513e09434cdc3378124322398587ebda11b4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:23Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:23 crc kubenswrapper[4881]: I1211 08:16:23.297148 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:23Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:23 crc kubenswrapper[4881]: I1211 08:16:23.310055 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:20Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:20Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://35b9fdb7a609d6a955abda7e49e7bceec55811d33e14295ba7258c0e085cc517\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:23Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:23 crc kubenswrapper[4881]: I1211 08:16:23.329944 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wf8q8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f14cc110-e74f-4cb7-a998-041e3f9b537b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1794069e7175bc0ce9dc315807ad766fd28fb46e11ed6e7addc3199cccaea63a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1794069e7175bc0ce9dc315807ad766fd28fb46e11ed6e7addc3199cccaea63a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:21Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-wf8q8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:23Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:23 crc kubenswrapper[4881]: I1211 08:16:23.341692 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"56d69133-af36-4cbd-af7d-3a58cc4dd8ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b73c9948845a28d7c3cb81e94d962a4f11ac061f4108a18f46d451a554e9adb7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7rwh4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b0321657968185975569a4bf8037ab956988b13493feec0be9c439fbe6c20707\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7rwh4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:21Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-z9nnh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:23Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:23 crc kubenswrapper[4881]: I1211 08:16:23.357981 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:23Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:23 crc kubenswrapper[4881]: I1211 08:16:23.372245 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6ec4e8a28a722463111eeb961e6e80d1f8ef1b776c40fc212a2ca5aaf0c43b63\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6eff6a04afb607c6248352793d1205ae80c194a77222a234d586925a8ca8cd00\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:23Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:23 crc kubenswrapper[4881]: I1211 08:16:23.384866 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:23Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:23 crc kubenswrapper[4881]: I1211 08:16:23.401459 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-g8jhd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"368e635e-0e63-4202-b9e4-4a3a85c6f30c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f17445543c4ae20340b00c834ecfa381b0d9624196a6da869ea823ee99df132f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zrsnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-g8jhd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:23Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:23 crc kubenswrapper[4881]: I1211 08:16:23.412048 4881 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 11 08:16:23 crc kubenswrapper[4881]: I1211 08:16:23.413804 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:23 crc kubenswrapper[4881]: I1211 08:16:23.413834 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:23 crc kubenswrapper[4881]: I1211 08:16:23.413843 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:23 crc kubenswrapper[4881]: I1211 08:16:23.413930 4881 kubelet_node_status.go:76] "Attempting to register node" node="crc" Dec 11 08:16:23 crc kubenswrapper[4881]: I1211 08:16:23.418674 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-cwhxk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"afd0cc21-e31c-47c9-a598-cd93dde96121\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"message\\\":\\\"containers with incomplete status: [bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://44d5ca755969b2434115259f141a9f8f17da8e0c1bef5b61c48475e09dc91601\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://44d5ca755969b2434115259f141a9f8f17da8e0c1bef5b61c48475e09dc91601\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://18c881494d5238565dfdb69e61ecdebb725f60a14c0c651e0bcc65cb2aff297a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://18c881494d5238565dfdb69e61ecdebb725f60a14c0c651e0bcc65cb2aff297a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-cwhxk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:23Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:23 crc kubenswrapper[4881]: I1211 08:16:23.420068 4881 kubelet_node_status.go:115] "Node was previously registered" node="crc" Dec 11 08:16:23 crc kubenswrapper[4881]: I1211 08:16:23.420294 4881 kubelet_node_status.go:79] "Successfully registered node" node="crc" Dec 11 08:16:23 crc kubenswrapper[4881]: I1211 08:16:23.421204 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:23 crc kubenswrapper[4881]: I1211 08:16:23.421230 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:23 crc kubenswrapper[4881]: I1211 08:16:23.421240 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:23 crc kubenswrapper[4881]: I1211 08:16:23.421255 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:23 crc kubenswrapper[4881]: I1211 08:16:23.421265 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:23Z","lastTransitionTime":"2025-12-11T08:16:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:23 crc kubenswrapper[4881]: E1211 08:16:23.441102 4881 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:16:23Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:23Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:16:23Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:23Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:16:23Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:23Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:16:23Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:23Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"46a6300e-52c1-447e-8230-6662e62288c7\\\",\\\"systemUUID\\\":\\\"fece3d29-5045-4c4f-98be-52739a921bd2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:23Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:23 crc kubenswrapper[4881]: I1211 08:16:23.445181 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:23 crc kubenswrapper[4881]: I1211 08:16:23.445217 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:23 crc kubenswrapper[4881]: I1211 08:16:23.445231 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:23 crc kubenswrapper[4881]: I1211 08:16:23.445249 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:23 crc kubenswrapper[4881]: I1211 08:16:23.445260 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:23Z","lastTransitionTime":"2025-12-11T08:16:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:23 crc kubenswrapper[4881]: E1211 08:16:23.461144 4881 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:16:23Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:23Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:16:23Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:23Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:16:23Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:23Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:16:23Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:23Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"46a6300e-52c1-447e-8230-6662e62288c7\\\",\\\"systemUUID\\\":\\\"fece3d29-5045-4c4f-98be-52739a921bd2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:23Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:23 crc kubenswrapper[4881]: I1211 08:16:23.464749 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:23 crc kubenswrapper[4881]: I1211 08:16:23.464792 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:23 crc kubenswrapper[4881]: I1211 08:16:23.464803 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:23 crc kubenswrapper[4881]: I1211 08:16:23.464821 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:23 crc kubenswrapper[4881]: I1211 08:16:23.464831 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:23Z","lastTransitionTime":"2025-12-11T08:16:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:23 crc kubenswrapper[4881]: E1211 08:16:23.475995 4881 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:16:23Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:23Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:16:23Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:23Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:16:23Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:23Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:16:23Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:23Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"46a6300e-52c1-447e-8230-6662e62288c7\\\",\\\"systemUUID\\\":\\\"fece3d29-5045-4c4f-98be-52739a921bd2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:23Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:23 crc kubenswrapper[4881]: I1211 08:16:23.479187 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:23 crc kubenswrapper[4881]: I1211 08:16:23.479235 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:23 crc kubenswrapper[4881]: I1211 08:16:23.479245 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:23 crc kubenswrapper[4881]: I1211 08:16:23.479264 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:23 crc kubenswrapper[4881]: I1211 08:16:23.479274 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:23Z","lastTransitionTime":"2025-12-11T08:16:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:23 crc kubenswrapper[4881]: E1211 08:16:23.489872 4881 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:16:23Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:23Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:16:23Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:23Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:16:23Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:23Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:16:23Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:23Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"46a6300e-52c1-447e-8230-6662e62288c7\\\",\\\"systemUUID\\\":\\\"fece3d29-5045-4c4f-98be-52739a921bd2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:23Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:23 crc kubenswrapper[4881]: I1211 08:16:23.492934 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:23 crc kubenswrapper[4881]: I1211 08:16:23.492964 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:23 crc kubenswrapper[4881]: I1211 08:16:23.492974 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:23 crc kubenswrapper[4881]: I1211 08:16:23.492992 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:23 crc kubenswrapper[4881]: I1211 08:16:23.493004 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:23Z","lastTransitionTime":"2025-12-11T08:16:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:23 crc kubenswrapper[4881]: E1211 08:16:23.508187 4881 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:16:23Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:23Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:16:23Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:23Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:16:23Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:23Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:16:23Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:23Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"46a6300e-52c1-447e-8230-6662e62288c7\\\",\\\"systemUUID\\\":\\\"fece3d29-5045-4c4f-98be-52739a921bd2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:23Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:23 crc kubenswrapper[4881]: E1211 08:16:23.508431 4881 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Dec 11 08:16:23 crc kubenswrapper[4881]: I1211 08:16:23.510399 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:23 crc kubenswrapper[4881]: I1211 08:16:23.510444 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:23 crc kubenswrapper[4881]: I1211 08:16:23.510457 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:23 crc kubenswrapper[4881]: I1211 08:16:23.510475 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:23 crc kubenswrapper[4881]: I1211 08:16:23.510489 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:23Z","lastTransitionTime":"2025-12-11T08:16:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:23 crc kubenswrapper[4881]: I1211 08:16:23.613271 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:23 crc kubenswrapper[4881]: I1211 08:16:23.613329 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:23 crc kubenswrapper[4881]: I1211 08:16:23.613373 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:23 crc kubenswrapper[4881]: I1211 08:16:23.613397 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:23 crc kubenswrapper[4881]: I1211 08:16:23.613415 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:23Z","lastTransitionTime":"2025-12-11T08:16:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:23 crc kubenswrapper[4881]: I1211 08:16:23.716409 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:23 crc kubenswrapper[4881]: I1211 08:16:23.716855 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:23 crc kubenswrapper[4881]: I1211 08:16:23.716878 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:23 crc kubenswrapper[4881]: I1211 08:16:23.716905 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:23 crc kubenswrapper[4881]: I1211 08:16:23.716923 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:23Z","lastTransitionTime":"2025-12-11T08:16:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:23 crc kubenswrapper[4881]: I1211 08:16:23.820538 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:23 crc kubenswrapper[4881]: I1211 08:16:23.820611 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:23 crc kubenswrapper[4881]: I1211 08:16:23.820638 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:23 crc kubenswrapper[4881]: I1211 08:16:23.820696 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:23 crc kubenswrapper[4881]: I1211 08:16:23.820720 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:23Z","lastTransitionTime":"2025-12-11T08:16:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:23 crc kubenswrapper[4881]: I1211 08:16:23.923011 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:23 crc kubenswrapper[4881]: I1211 08:16:23.923083 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:23 crc kubenswrapper[4881]: I1211 08:16:23.923102 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:23 crc kubenswrapper[4881]: I1211 08:16:23.923128 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:23 crc kubenswrapper[4881]: I1211 08:16:23.923146 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:23Z","lastTransitionTime":"2025-12-11T08:16:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:24 crc kubenswrapper[4881]: I1211 08:16:24.005165 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 11 08:16:24 crc kubenswrapper[4881]: E1211 08:16:24.005377 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 11 08:16:24 crc kubenswrapper[4881]: I1211 08:16:24.026211 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:24 crc kubenswrapper[4881]: I1211 08:16:24.026280 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:24 crc kubenswrapper[4881]: I1211 08:16:24.026300 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:24 crc kubenswrapper[4881]: I1211 08:16:24.026325 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:24 crc kubenswrapper[4881]: I1211 08:16:24.026381 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:24Z","lastTransitionTime":"2025-12-11T08:16:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:24 crc kubenswrapper[4881]: I1211 08:16:24.129668 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:24 crc kubenswrapper[4881]: I1211 08:16:24.129749 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:24 crc kubenswrapper[4881]: I1211 08:16:24.129771 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:24 crc kubenswrapper[4881]: I1211 08:16:24.129801 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:24 crc kubenswrapper[4881]: I1211 08:16:24.129827 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:24Z","lastTransitionTime":"2025-12-11T08:16:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:24 crc kubenswrapper[4881]: I1211 08:16:24.200772 4881 generic.go:334] "Generic (PLEG): container finished" podID="afd0cc21-e31c-47c9-a598-cd93dde96121" containerID="a13ad4683fb404bf7d973528489e3c39e33c2991d86a80875dab067023626584" exitCode=0 Dec 11 08:16:24 crc kubenswrapper[4881]: I1211 08:16:24.200854 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-cwhxk" event={"ID":"afd0cc21-e31c-47c9-a598-cd93dde96121","Type":"ContainerDied","Data":"a13ad4683fb404bf7d973528489e3c39e33c2991d86a80875dab067023626584"} Dec 11 08:16:24 crc kubenswrapper[4881]: I1211 08:16:24.207651 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wf8q8" event={"ID":"f14cc110-e74f-4cb7-a998-041e3f9b537b","Type":"ContainerStarted","Data":"c23a70dbe6b32be7c8a0c3799f7dd2323f0dcc86d7d58ee3e140cda4ffbb03f0"} Dec 11 08:16:24 crc kubenswrapper[4881]: I1211 08:16:24.207723 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wf8q8" event={"ID":"f14cc110-e74f-4cb7-a998-041e3f9b537b","Type":"ContainerStarted","Data":"6d8574de8e2431b2aca7954b9a0498e353c00d982b4384c1f198b2099fe527a6"} Dec 11 08:16:24 crc kubenswrapper[4881]: I1211 08:16:24.207744 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wf8q8" event={"ID":"f14cc110-e74f-4cb7-a998-041e3f9b537b","Type":"ContainerStarted","Data":"3715d1eacbf7a843b7a1a0e83dee159ddbe5bf62812e94bf5f74f4677da2617b"} Dec 11 08:16:24 crc kubenswrapper[4881]: I1211 08:16:24.207763 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wf8q8" event={"ID":"f14cc110-e74f-4cb7-a998-041e3f9b537b","Type":"ContainerStarted","Data":"3fece2162ea78bf051c94f0f8adb0b194d05e5d3c5c25cb4e3674096d1204079"} Dec 11 08:16:24 crc kubenswrapper[4881]: I1211 08:16:24.228860 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://02d46e407feeba96230ff44b31f513e09434cdc3378124322398587ebda11b4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:24Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:24 crc kubenswrapper[4881]: I1211 08:16:24.234282 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:24 crc kubenswrapper[4881]: I1211 08:16:24.234355 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:24 crc kubenswrapper[4881]: I1211 08:16:24.234372 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:24 crc kubenswrapper[4881]: I1211 08:16:24.234393 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:24 crc kubenswrapper[4881]: I1211 08:16:24.234406 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:24Z","lastTransitionTime":"2025-12-11T08:16:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:24 crc kubenswrapper[4881]: I1211 08:16:24.254307 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:24Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:24 crc kubenswrapper[4881]: I1211 08:16:24.275272 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:20Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:20Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://35b9fdb7a609d6a955abda7e49e7bceec55811d33e14295ba7258c0e085cc517\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:24Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:24 crc kubenswrapper[4881]: I1211 08:16:24.297057 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wf8q8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f14cc110-e74f-4cb7-a998-041e3f9b537b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1794069e7175bc0ce9dc315807ad766fd28fb46e11ed6e7addc3199cccaea63a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1794069e7175bc0ce9dc315807ad766fd28fb46e11ed6e7addc3199cccaea63a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:21Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-wf8q8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:24Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:24 crc kubenswrapper[4881]: I1211 08:16:24.313110 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"712c62b1-8ccd-4aa3-bcf9-678e361454ec\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f4bc17ebdc46bdf12b24515e7a055043646bcea98fe47ea7f5c44e679d02bde9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://daf982782f40db65e6a33e1e1bcfd01493d5a385c52013546d38d921ac4dcad3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3ceeda38e245a549205af6a7f2468c682ca1420574f2d3b375dfba7d93a3c669\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://204b876ced5657df64e1a6074ba7ddbb3e286df0e12630d709ff64d2905b8487\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ff7ad52ec7f42bde82d0f5b9a6ef051a56c37004bb4172dbc504afde1e5f69bf\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-11T08:16:03Z\\\",\\\"message\\\":\\\"W1211 08:16:02.420586 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1211 08:16:02.420959 1 crypto.go:601] Generating new CA for check-endpoints-signer@1765440962 cert, and key in /tmp/serving-cert-2611071911/serving-signer.crt, /tmp/serving-cert-2611071911/serving-signer.key\\\\nI1211 08:16:03.011861 1 observer_polling.go:159] Starting file observer\\\\nW1211 08:16:03.024183 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1211 08:16:03.024367 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1211 08:16:03.024984 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2611071911/tls.crt::/tmp/serving-cert-2611071911/tls.key\\\\\\\"\\\\nF1211 08:16:03.405647 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:00Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6735b5cd08055deeae614855e5de8a056b276c8006a16c45f2a0fce97da172cb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:57Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70fb9b6123864b3652eabbb833b18032659a0c038f4e5857e7363a3776022396\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://70fb9b6123864b3652eabbb833b18032659a0c038f4e5857e7363a3776022396\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:15:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:15:53Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:24Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:24 crc kubenswrapper[4881]: I1211 08:16:24.325540 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6ec4e8a28a722463111eeb961e6e80d1f8ef1b776c40fc212a2ca5aaf0c43b63\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6eff6a04afb607c6248352793d1205ae80c194a77222a234d586925a8ca8cd00\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:24Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:24 crc kubenswrapper[4881]: I1211 08:16:24.337769 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:24 crc kubenswrapper[4881]: I1211 08:16:24.337834 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:24 crc kubenswrapper[4881]: I1211 08:16:24.337851 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:24 crc kubenswrapper[4881]: I1211 08:16:24.337877 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:24 crc kubenswrapper[4881]: I1211 08:16:24.337896 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:24Z","lastTransitionTime":"2025-12-11T08:16:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:24 crc kubenswrapper[4881]: I1211 08:16:24.338022 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:24Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:24 crc kubenswrapper[4881]: I1211 08:16:24.359135 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-g8jhd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"368e635e-0e63-4202-b9e4-4a3a85c6f30c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f17445543c4ae20340b00c834ecfa381b0d9624196a6da869ea823ee99df132f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zrsnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-g8jhd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:24Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:24 crc kubenswrapper[4881]: I1211 08:16:24.372554 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/node-ca-jzsv5"] Dec 11 08:16:24 crc kubenswrapper[4881]: I1211 08:16:24.373021 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/node-ca-jzsv5" Dec 11 08:16:24 crc kubenswrapper[4881]: I1211 08:16:24.376001 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-cwhxk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"afd0cc21-e31c-47c9-a598-cd93dde96121\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"message\\\":\\\"containers with incomplete status: [routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://44d5ca755969b2434115259f141a9f8f17da8e0c1bef5b61c48475e09dc91601\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://44d5ca755969b2434115259f141a9f8f17da8e0c1bef5b61c48475e09dc91601\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://18c881494d5238565dfdb69e61ecdebb725f60a14c0c651e0bcc65cb2aff297a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://18c881494d5238565dfdb69e61ecdebb725f60a14c0c651e0bcc65cb2aff297a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a13ad4683fb404bf7d973528489e3c39e33c2991d86a80875dab067023626584\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a13ad4683fb404bf7d973528489e3c39e33c2991d86a80875dab067023626584\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-cwhxk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:24Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:24 crc kubenswrapper[4881]: I1211 08:16:24.376461 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"openshift-service-ca.crt" Dec 11 08:16:24 crc kubenswrapper[4881]: I1211 08:16:24.376734 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"image-registry-certificates" Dec 11 08:16:24 crc kubenswrapper[4881]: I1211 08:16:24.376775 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"node-ca-dockercfg-4777p" Dec 11 08:16:24 crc kubenswrapper[4881]: I1211 08:16:24.376927 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"kube-root-ca.crt" Dec 11 08:16:24 crc kubenswrapper[4881]: I1211 08:16:24.392968 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"56d69133-af36-4cbd-af7d-3a58cc4dd8ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b73c9948845a28d7c3cb81e94d962a4f11ac061f4108a18f46d451a554e9adb7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7rwh4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b0321657968185975569a4bf8037ab956988b13493feec0be9c439fbe6c20707\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7rwh4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:21Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-z9nnh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:24Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:24 crc kubenswrapper[4881]: I1211 08:16:24.409690 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:24Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:24 crc kubenswrapper[4881]: I1211 08:16:24.412086 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/aacce2d2-7fd3-439a-b46b-51858d3de240-serviceca\") pod \"node-ca-jzsv5\" (UID: \"aacce2d2-7fd3-439a-b46b-51858d3de240\") " pod="openshift-image-registry/node-ca-jzsv5" Dec 11 08:16:24 crc kubenswrapper[4881]: I1211 08:16:24.412118 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tlbfj\" (UniqueName: \"kubernetes.io/projected/aacce2d2-7fd3-439a-b46b-51858d3de240-kube-api-access-tlbfj\") pod \"node-ca-jzsv5\" (UID: \"aacce2d2-7fd3-439a-b46b-51858d3de240\") " pod="openshift-image-registry/node-ca-jzsv5" Dec 11 08:16:24 crc kubenswrapper[4881]: I1211 08:16:24.412203 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/aacce2d2-7fd3-439a-b46b-51858d3de240-host\") pod \"node-ca-jzsv5\" (UID: \"aacce2d2-7fd3-439a-b46b-51858d3de240\") " pod="openshift-image-registry/node-ca-jzsv5" Dec 11 08:16:24 crc kubenswrapper[4881]: I1211 08:16:24.425033 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-847k7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1711214f-683a-4a2e-b9b8-f3fd2dd6dbc2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0ad11e04da8a6d33e46ca8c5f28be920d52c7c2ce22b4539fc8b7b0b85edb425\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2txkz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:20Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-847k7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:24Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:24 crc kubenswrapper[4881]: I1211 08:16:24.437832 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e233ddb-7c1f-4b3d-a111-de8cdc7ccd18\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://221f8c696ca3c5ab5dc2025890b5279fd406a8dfdd4ea482bb12f0c5d304255d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3dec421d26a9373c52bf2fb9434cc3f3b373f409b1d86ec1effca7534acb7262\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7cb4176d384609eb549c9f00057256bd34053ca3eca229c20cd250b93f87f910\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://91541795f1dd997a0d977904a6a3cbeae8b66b8bb57cde8dac344d3703352e1a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:15:53Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:24Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:24 crc kubenswrapper[4881]: I1211 08:16:24.440045 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:24 crc kubenswrapper[4881]: I1211 08:16:24.440072 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:24 crc kubenswrapper[4881]: I1211 08:16:24.440080 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:24 crc kubenswrapper[4881]: I1211 08:16:24.440094 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:24 crc kubenswrapper[4881]: I1211 08:16:24.440105 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:24Z","lastTransitionTime":"2025-12-11T08:16:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:24 crc kubenswrapper[4881]: I1211 08:16:24.451931 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e233ddb-7c1f-4b3d-a111-de8cdc7ccd18\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://221f8c696ca3c5ab5dc2025890b5279fd406a8dfdd4ea482bb12f0c5d304255d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3dec421d26a9373c52bf2fb9434cc3f3b373f409b1d86ec1effca7534acb7262\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7cb4176d384609eb549c9f00057256bd34053ca3eca229c20cd250b93f87f910\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://91541795f1dd997a0d977904a6a3cbeae8b66b8bb57cde8dac344d3703352e1a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:15:53Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:24Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:24 crc kubenswrapper[4881]: I1211 08:16:24.461533 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-847k7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1711214f-683a-4a2e-b9b8-f3fd2dd6dbc2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0ad11e04da8a6d33e46ca8c5f28be920d52c7c2ce22b4539fc8b7b0b85edb425\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2txkz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:20Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-847k7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:24Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:24 crc kubenswrapper[4881]: I1211 08:16:24.472140 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-jzsv5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aacce2d2-7fd3-439a-b46b-51858d3de240\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:24Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:24Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tlbfj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:24Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-jzsv5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:24Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:24 crc kubenswrapper[4881]: I1211 08:16:24.484741 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"712c62b1-8ccd-4aa3-bcf9-678e361454ec\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f4bc17ebdc46bdf12b24515e7a055043646bcea98fe47ea7f5c44e679d02bde9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://daf982782f40db65e6a33e1e1bcfd01493d5a385c52013546d38d921ac4dcad3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3ceeda38e245a549205af6a7f2468c682ca1420574f2d3b375dfba7d93a3c669\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://204b876ced5657df64e1a6074ba7ddbb3e286df0e12630d709ff64d2905b8487\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ff7ad52ec7f42bde82d0f5b9a6ef051a56c37004bb4172dbc504afde1e5f69bf\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-11T08:16:03Z\\\",\\\"message\\\":\\\"W1211 08:16:02.420586 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1211 08:16:02.420959 1 crypto.go:601] Generating new CA for check-endpoints-signer@1765440962 cert, and key in /tmp/serving-cert-2611071911/serving-signer.crt, /tmp/serving-cert-2611071911/serving-signer.key\\\\nI1211 08:16:03.011861 1 observer_polling.go:159] Starting file observer\\\\nW1211 08:16:03.024183 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1211 08:16:03.024367 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1211 08:16:03.024984 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2611071911/tls.crt::/tmp/serving-cert-2611071911/tls.key\\\\\\\"\\\\nF1211 08:16:03.405647 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:00Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6735b5cd08055deeae614855e5de8a056b276c8006a16c45f2a0fce97da172cb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:57Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70fb9b6123864b3652eabbb833b18032659a0c038f4e5857e7363a3776022396\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://70fb9b6123864b3652eabbb833b18032659a0c038f4e5857e7363a3776022396\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:15:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:15:53Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:24Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:24 crc kubenswrapper[4881]: I1211 08:16:24.498150 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://02d46e407feeba96230ff44b31f513e09434cdc3378124322398587ebda11b4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:24Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:24 crc kubenswrapper[4881]: I1211 08:16:24.510393 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:24Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:24 crc kubenswrapper[4881]: I1211 08:16:24.512760 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/aacce2d2-7fd3-439a-b46b-51858d3de240-host\") pod \"node-ca-jzsv5\" (UID: \"aacce2d2-7fd3-439a-b46b-51858d3de240\") " pod="openshift-image-registry/node-ca-jzsv5" Dec 11 08:16:24 crc kubenswrapper[4881]: I1211 08:16:24.512814 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/aacce2d2-7fd3-439a-b46b-51858d3de240-serviceca\") pod \"node-ca-jzsv5\" (UID: \"aacce2d2-7fd3-439a-b46b-51858d3de240\") " pod="openshift-image-registry/node-ca-jzsv5" Dec 11 08:16:24 crc kubenswrapper[4881]: I1211 08:16:24.512834 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tlbfj\" (UniqueName: \"kubernetes.io/projected/aacce2d2-7fd3-439a-b46b-51858d3de240-kube-api-access-tlbfj\") pod \"node-ca-jzsv5\" (UID: \"aacce2d2-7fd3-439a-b46b-51858d3de240\") " pod="openshift-image-registry/node-ca-jzsv5" Dec 11 08:16:24 crc kubenswrapper[4881]: I1211 08:16:24.513030 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/aacce2d2-7fd3-439a-b46b-51858d3de240-host\") pod \"node-ca-jzsv5\" (UID: \"aacce2d2-7fd3-439a-b46b-51858d3de240\") " pod="openshift-image-registry/node-ca-jzsv5" Dec 11 08:16:24 crc kubenswrapper[4881]: I1211 08:16:24.513853 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/aacce2d2-7fd3-439a-b46b-51858d3de240-serviceca\") pod \"node-ca-jzsv5\" (UID: \"aacce2d2-7fd3-439a-b46b-51858d3de240\") " pod="openshift-image-registry/node-ca-jzsv5" Dec 11 08:16:24 crc kubenswrapper[4881]: I1211 08:16:24.528604 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:20Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:20Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://35b9fdb7a609d6a955abda7e49e7bceec55811d33e14295ba7258c0e085cc517\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:24Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:24 crc kubenswrapper[4881]: I1211 08:16:24.530392 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tlbfj\" (UniqueName: \"kubernetes.io/projected/aacce2d2-7fd3-439a-b46b-51858d3de240-kube-api-access-tlbfj\") pod \"node-ca-jzsv5\" (UID: \"aacce2d2-7fd3-439a-b46b-51858d3de240\") " pod="openshift-image-registry/node-ca-jzsv5" Dec 11 08:16:24 crc kubenswrapper[4881]: I1211 08:16:24.542797 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:24 crc kubenswrapper[4881]: I1211 08:16:24.542985 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:24 crc kubenswrapper[4881]: I1211 08:16:24.543094 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:24 crc kubenswrapper[4881]: I1211 08:16:24.543167 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:24 crc kubenswrapper[4881]: I1211 08:16:24.543231 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:24Z","lastTransitionTime":"2025-12-11T08:16:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:24 crc kubenswrapper[4881]: I1211 08:16:24.557879 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wf8q8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f14cc110-e74f-4cb7-a998-041e3f9b537b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1794069e7175bc0ce9dc315807ad766fd28fb46e11ed6e7addc3199cccaea63a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1794069e7175bc0ce9dc315807ad766fd28fb46e11ed6e7addc3199cccaea63a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:21Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-wf8q8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:24Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:24 crc kubenswrapper[4881]: I1211 08:16:24.572588 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:24Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:24 crc kubenswrapper[4881]: I1211 08:16:24.589139 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6ec4e8a28a722463111eeb961e6e80d1f8ef1b776c40fc212a2ca5aaf0c43b63\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6eff6a04afb607c6248352793d1205ae80c194a77222a234d586925a8ca8cd00\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:24Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:24 crc kubenswrapper[4881]: I1211 08:16:24.608999 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:24Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:24 crc kubenswrapper[4881]: I1211 08:16:24.613162 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 11 08:16:24 crc kubenswrapper[4881]: E1211 08:16:24.613298 4881 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-11 08:16:32.613277886 +0000 UTC m=+40.990646583 (durationBeforeRetry 8s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 08:16:24 crc kubenswrapper[4881]: I1211 08:16:24.626282 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-g8jhd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"368e635e-0e63-4202-b9e4-4a3a85c6f30c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f17445543c4ae20340b00c834ecfa381b0d9624196a6da869ea823ee99df132f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zrsnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-g8jhd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:24Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:24 crc kubenswrapper[4881]: I1211 08:16:24.646044 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:24 crc kubenswrapper[4881]: I1211 08:16:24.646089 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:24 crc kubenswrapper[4881]: I1211 08:16:24.646181 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:24 crc kubenswrapper[4881]: I1211 08:16:24.646199 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:24 crc kubenswrapper[4881]: I1211 08:16:24.646215 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:24Z","lastTransitionTime":"2025-12-11T08:16:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:24 crc kubenswrapper[4881]: I1211 08:16:24.646873 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-cwhxk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"afd0cc21-e31c-47c9-a598-cd93dde96121\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"message\\\":\\\"containers with incomplete status: [routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://44d5ca755969b2434115259f141a9f8f17da8e0c1bef5b61c48475e09dc91601\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://44d5ca755969b2434115259f141a9f8f17da8e0c1bef5b61c48475e09dc91601\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://18c881494d5238565dfdb69e61ecdebb725f60a14c0c651e0bcc65cb2aff297a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://18c881494d5238565dfdb69e61ecdebb725f60a14c0c651e0bcc65cb2aff297a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a13ad4683fb404bf7d973528489e3c39e33c2991d86a80875dab067023626584\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a13ad4683fb404bf7d973528489e3c39e33c2991d86a80875dab067023626584\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-cwhxk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:24Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:24 crc kubenswrapper[4881]: I1211 08:16:24.658077 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"56d69133-af36-4cbd-af7d-3a58cc4dd8ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b73c9948845a28d7c3cb81e94d962a4f11ac061f4108a18f46d451a554e9adb7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7rwh4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b0321657968185975569a4bf8037ab956988b13493feec0be9c439fbe6c20707\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7rwh4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:21Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-z9nnh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:24Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:24 crc kubenswrapper[4881]: I1211 08:16:24.708641 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/node-ca-jzsv5" Dec 11 08:16:24 crc kubenswrapper[4881]: I1211 08:16:24.714813 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 11 08:16:24 crc kubenswrapper[4881]: I1211 08:16:24.714965 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 11 08:16:24 crc kubenswrapper[4881]: E1211 08:16:24.715053 4881 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Dec 11 08:16:24 crc kubenswrapper[4881]: E1211 08:16:24.715106 4881 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Dec 11 08:16:24 crc kubenswrapper[4881]: E1211 08:16:24.715136 4881 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 11 08:16:24 crc kubenswrapper[4881]: E1211 08:16:24.715145 4881 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Dec 11 08:16:24 crc kubenswrapper[4881]: I1211 08:16:24.715283 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 11 08:16:24 crc kubenswrapper[4881]: E1211 08:16:24.715406 4881 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Dec 11 08:16:24 crc kubenswrapper[4881]: E1211 08:16:24.715463 4881 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-12-11 08:16:32.715402148 +0000 UTC m=+41.092770885 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 11 08:16:24 crc kubenswrapper[4881]: E1211 08:16:24.715511 4881 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-12-11 08:16:32.715495231 +0000 UTC m=+41.092863968 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Dec 11 08:16:24 crc kubenswrapper[4881]: I1211 08:16:24.715559 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 11 08:16:24 crc kubenswrapper[4881]: E1211 08:16:24.715655 4881 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-12-11 08:16:32.715642504 +0000 UTC m=+41.093011241 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Dec 11 08:16:24 crc kubenswrapper[4881]: E1211 08:16:24.715716 4881 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Dec 11 08:16:24 crc kubenswrapper[4881]: E1211 08:16:24.715750 4881 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Dec 11 08:16:24 crc kubenswrapper[4881]: E1211 08:16:24.715771 4881 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 11 08:16:24 crc kubenswrapper[4881]: E1211 08:16:24.715882 4881 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-12-11 08:16:32.715856139 +0000 UTC m=+41.093224876 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 11 08:16:24 crc kubenswrapper[4881]: W1211 08:16:24.730904 4881 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podaacce2d2_7fd3_439a_b46b_51858d3de240.slice/crio-37bbe95a6019a90aa87a635a83803451b717ca4f9d03e958d5d2175fca831775 WatchSource:0}: Error finding container 37bbe95a6019a90aa87a635a83803451b717ca4f9d03e958d5d2175fca831775: Status 404 returned error can't find the container with id 37bbe95a6019a90aa87a635a83803451b717ca4f9d03e958d5d2175fca831775 Dec 11 08:16:24 crc kubenswrapper[4881]: I1211 08:16:24.749484 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:24 crc kubenswrapper[4881]: I1211 08:16:24.749569 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:24 crc kubenswrapper[4881]: I1211 08:16:24.749595 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:24 crc kubenswrapper[4881]: I1211 08:16:24.749629 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:24 crc kubenswrapper[4881]: I1211 08:16:24.749652 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:24Z","lastTransitionTime":"2025-12-11T08:16:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:24 crc kubenswrapper[4881]: I1211 08:16:24.852071 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:24 crc kubenswrapper[4881]: I1211 08:16:24.852109 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:24 crc kubenswrapper[4881]: I1211 08:16:24.852120 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:24 crc kubenswrapper[4881]: I1211 08:16:24.852138 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:24 crc kubenswrapper[4881]: I1211 08:16:24.852149 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:24Z","lastTransitionTime":"2025-12-11T08:16:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:24 crc kubenswrapper[4881]: I1211 08:16:24.954961 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:24 crc kubenswrapper[4881]: I1211 08:16:24.955091 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:24 crc kubenswrapper[4881]: I1211 08:16:24.955187 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:24 crc kubenswrapper[4881]: I1211 08:16:24.955253 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:24 crc kubenswrapper[4881]: I1211 08:16:24.955311 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:24Z","lastTransitionTime":"2025-12-11T08:16:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:25 crc kubenswrapper[4881]: I1211 08:16:25.005050 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 11 08:16:25 crc kubenswrapper[4881]: I1211 08:16:25.005121 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 11 08:16:25 crc kubenswrapper[4881]: E1211 08:16:25.005655 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 11 08:16:25 crc kubenswrapper[4881]: E1211 08:16:25.005668 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 11 08:16:25 crc kubenswrapper[4881]: I1211 08:16:25.058912 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:25 crc kubenswrapper[4881]: I1211 08:16:25.058974 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:25 crc kubenswrapper[4881]: I1211 08:16:25.058993 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:25 crc kubenswrapper[4881]: I1211 08:16:25.059021 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:25 crc kubenswrapper[4881]: I1211 08:16:25.059041 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:25Z","lastTransitionTime":"2025-12-11T08:16:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:25 crc kubenswrapper[4881]: I1211 08:16:25.161649 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:25 crc kubenswrapper[4881]: I1211 08:16:25.162584 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:25 crc kubenswrapper[4881]: I1211 08:16:25.162638 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:25 crc kubenswrapper[4881]: I1211 08:16:25.162680 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:25 crc kubenswrapper[4881]: I1211 08:16:25.162702 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:25Z","lastTransitionTime":"2025-12-11T08:16:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:25 crc kubenswrapper[4881]: I1211 08:16:25.213905 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/node-ca-jzsv5" event={"ID":"aacce2d2-7fd3-439a-b46b-51858d3de240","Type":"ContainerStarted","Data":"b6277956e6d4873b4105ce03e358e2857371feead7eb74e2b7412ad7ed4de486"} Dec 11 08:16:25 crc kubenswrapper[4881]: I1211 08:16:25.213990 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/node-ca-jzsv5" event={"ID":"aacce2d2-7fd3-439a-b46b-51858d3de240","Type":"ContainerStarted","Data":"37bbe95a6019a90aa87a635a83803451b717ca4f9d03e958d5d2175fca831775"} Dec 11 08:16:25 crc kubenswrapper[4881]: I1211 08:16:25.218063 4881 generic.go:334] "Generic (PLEG): container finished" podID="afd0cc21-e31c-47c9-a598-cd93dde96121" containerID="7b84c7832c1d1df15202c858e63e3d97319f2014f413f98a9d332a3f89a11210" exitCode=0 Dec 11 08:16:25 crc kubenswrapper[4881]: I1211 08:16:25.218431 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-cwhxk" event={"ID":"afd0cc21-e31c-47c9-a598-cd93dde96121","Type":"ContainerDied","Data":"7b84c7832c1d1df15202c858e63e3d97319f2014f413f98a9d332a3f89a11210"} Dec 11 08:16:25 crc kubenswrapper[4881]: I1211 08:16:25.231501 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-847k7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1711214f-683a-4a2e-b9b8-f3fd2dd6dbc2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0ad11e04da8a6d33e46ca8c5f28be920d52c7c2ce22b4539fc8b7b0b85edb425\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2txkz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:20Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-847k7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:25Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:25 crc kubenswrapper[4881]: I1211 08:16:25.247234 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-jzsv5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aacce2d2-7fd3-439a-b46b-51858d3de240\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6277956e6d4873b4105ce03e358e2857371feead7eb74e2b7412ad7ed4de486\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tlbfj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:24Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-jzsv5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:25Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:25 crc kubenswrapper[4881]: I1211 08:16:25.266140 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:25 crc kubenswrapper[4881]: I1211 08:16:25.266201 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:25 crc kubenswrapper[4881]: I1211 08:16:25.266217 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:25 crc kubenswrapper[4881]: I1211 08:16:25.266242 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:25 crc kubenswrapper[4881]: I1211 08:16:25.266259 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:25Z","lastTransitionTime":"2025-12-11T08:16:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:25 crc kubenswrapper[4881]: I1211 08:16:25.267754 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e233ddb-7c1f-4b3d-a111-de8cdc7ccd18\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://221f8c696ca3c5ab5dc2025890b5279fd406a8dfdd4ea482bb12f0c5d304255d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3dec421d26a9373c52bf2fb9434cc3f3b373f409b1d86ec1effca7534acb7262\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7cb4176d384609eb549c9f00057256bd34053ca3eca229c20cd250b93f87f910\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://91541795f1dd997a0d977904a6a3cbeae8b66b8bb57cde8dac344d3703352e1a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:15:53Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:25Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:25 crc kubenswrapper[4881]: I1211 08:16:25.285942 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:25Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:25 crc kubenswrapper[4881]: I1211 08:16:25.299380 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:20Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:20Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://35b9fdb7a609d6a955abda7e49e7bceec55811d33e14295ba7258c0e085cc517\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:25Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:25 crc kubenswrapper[4881]: I1211 08:16:25.320607 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wf8q8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f14cc110-e74f-4cb7-a998-041e3f9b537b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1794069e7175bc0ce9dc315807ad766fd28fb46e11ed6e7addc3199cccaea63a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1794069e7175bc0ce9dc315807ad766fd28fb46e11ed6e7addc3199cccaea63a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:21Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-wf8q8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:25Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:25 crc kubenswrapper[4881]: I1211 08:16:25.335110 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"712c62b1-8ccd-4aa3-bcf9-678e361454ec\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f4bc17ebdc46bdf12b24515e7a055043646bcea98fe47ea7f5c44e679d02bde9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://daf982782f40db65e6a33e1e1bcfd01493d5a385c52013546d38d921ac4dcad3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3ceeda38e245a549205af6a7f2468c682ca1420574f2d3b375dfba7d93a3c669\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://204b876ced5657df64e1a6074ba7ddbb3e286df0e12630d709ff64d2905b8487\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ff7ad52ec7f42bde82d0f5b9a6ef051a56c37004bb4172dbc504afde1e5f69bf\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-11T08:16:03Z\\\",\\\"message\\\":\\\"W1211 08:16:02.420586 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1211 08:16:02.420959 1 crypto.go:601] Generating new CA for check-endpoints-signer@1765440962 cert, and key in /tmp/serving-cert-2611071911/serving-signer.crt, /tmp/serving-cert-2611071911/serving-signer.key\\\\nI1211 08:16:03.011861 1 observer_polling.go:159] Starting file observer\\\\nW1211 08:16:03.024183 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1211 08:16:03.024367 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1211 08:16:03.024984 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2611071911/tls.crt::/tmp/serving-cert-2611071911/tls.key\\\\\\\"\\\\nF1211 08:16:03.405647 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:00Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6735b5cd08055deeae614855e5de8a056b276c8006a16c45f2a0fce97da172cb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:57Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70fb9b6123864b3652eabbb833b18032659a0c038f4e5857e7363a3776022396\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://70fb9b6123864b3652eabbb833b18032659a0c038f4e5857e7363a3776022396\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:15:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:15:53Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:25Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:25 crc kubenswrapper[4881]: I1211 08:16:25.350020 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://02d46e407feeba96230ff44b31f513e09434cdc3378124322398587ebda11b4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:25Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:25 crc kubenswrapper[4881]: I1211 08:16:25.365557 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:25Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:25 crc kubenswrapper[4881]: I1211 08:16:25.369642 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:25 crc kubenswrapper[4881]: I1211 08:16:25.369672 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:25 crc kubenswrapper[4881]: I1211 08:16:25.369682 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:25 crc kubenswrapper[4881]: I1211 08:16:25.369701 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:25 crc kubenswrapper[4881]: I1211 08:16:25.369711 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:25Z","lastTransitionTime":"2025-12-11T08:16:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:25 crc kubenswrapper[4881]: I1211 08:16:25.387491 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-g8jhd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"368e635e-0e63-4202-b9e4-4a3a85c6f30c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f17445543c4ae20340b00c834ecfa381b0d9624196a6da869ea823ee99df132f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zrsnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-g8jhd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:25Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:25 crc kubenswrapper[4881]: I1211 08:16:25.403278 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-cwhxk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"afd0cc21-e31c-47c9-a598-cd93dde96121\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"message\\\":\\\"containers with incomplete status: [routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://44d5ca755969b2434115259f141a9f8f17da8e0c1bef5b61c48475e09dc91601\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://44d5ca755969b2434115259f141a9f8f17da8e0c1bef5b61c48475e09dc91601\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://18c881494d5238565dfdb69e61ecdebb725f60a14c0c651e0bcc65cb2aff297a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://18c881494d5238565dfdb69e61ecdebb725f60a14c0c651e0bcc65cb2aff297a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a13ad4683fb404bf7d973528489e3c39e33c2991d86a80875dab067023626584\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a13ad4683fb404bf7d973528489e3c39e33c2991d86a80875dab067023626584\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-cwhxk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:25Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:25 crc kubenswrapper[4881]: I1211 08:16:25.414346 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"56d69133-af36-4cbd-af7d-3a58cc4dd8ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b73c9948845a28d7c3cb81e94d962a4f11ac061f4108a18f46d451a554e9adb7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7rwh4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b0321657968185975569a4bf8037ab956988b13493feec0be9c439fbe6c20707\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7rwh4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:21Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-z9nnh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:25Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:25 crc kubenswrapper[4881]: I1211 08:16:25.425439 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:25Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:25 crc kubenswrapper[4881]: I1211 08:16:25.437972 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6ec4e8a28a722463111eeb961e6e80d1f8ef1b776c40fc212a2ca5aaf0c43b63\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6eff6a04afb607c6248352793d1205ae80c194a77222a234d586925a8ca8cd00\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:25Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:25 crc kubenswrapper[4881]: I1211 08:16:25.450385 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:25Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:25 crc kubenswrapper[4881]: I1211 08:16:25.463438 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6ec4e8a28a722463111eeb961e6e80d1f8ef1b776c40fc212a2ca5aaf0c43b63\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6eff6a04afb607c6248352793d1205ae80c194a77222a234d586925a8ca8cd00\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:25Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:25 crc kubenswrapper[4881]: I1211 08:16:25.479102 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:25 crc kubenswrapper[4881]: I1211 08:16:25.479140 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:25 crc kubenswrapper[4881]: I1211 08:16:25.479150 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:25 crc kubenswrapper[4881]: I1211 08:16:25.479165 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:25 crc kubenswrapper[4881]: I1211 08:16:25.479174 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:25Z","lastTransitionTime":"2025-12-11T08:16:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:25 crc kubenswrapper[4881]: I1211 08:16:25.480262 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:25Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:25 crc kubenswrapper[4881]: I1211 08:16:25.496244 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-g8jhd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"368e635e-0e63-4202-b9e4-4a3a85c6f30c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f17445543c4ae20340b00c834ecfa381b0d9624196a6da869ea823ee99df132f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zrsnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-g8jhd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:25Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:25 crc kubenswrapper[4881]: I1211 08:16:25.510431 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-cwhxk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"afd0cc21-e31c-47c9-a598-cd93dde96121\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://44d5ca755969b2434115259f141a9f8f17da8e0c1bef5b61c48475e09dc91601\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://44d5ca755969b2434115259f141a9f8f17da8e0c1bef5b61c48475e09dc91601\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://18c881494d5238565dfdb69e61ecdebb725f60a14c0c651e0bcc65cb2aff297a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://18c881494d5238565dfdb69e61ecdebb725f60a14c0c651e0bcc65cb2aff297a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a13ad4683fb404bf7d973528489e3c39e33c2991d86a80875dab067023626584\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a13ad4683fb404bf7d973528489e3c39e33c2991d86a80875dab067023626584\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b84c7832c1d1df15202c858e63e3d97319f2014f413f98a9d332a3f89a11210\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b84c7832c1d1df15202c858e63e3d97319f2014f413f98a9d332a3f89a11210\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-cwhxk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:25Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:25 crc kubenswrapper[4881]: I1211 08:16:25.521379 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"56d69133-af36-4cbd-af7d-3a58cc4dd8ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b73c9948845a28d7c3cb81e94d962a4f11ac061f4108a18f46d451a554e9adb7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7rwh4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b0321657968185975569a4bf8037ab956988b13493feec0be9c439fbe6c20707\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7rwh4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:21Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-z9nnh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:25Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:25 crc kubenswrapper[4881]: I1211 08:16:25.532663 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e233ddb-7c1f-4b3d-a111-de8cdc7ccd18\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://221f8c696ca3c5ab5dc2025890b5279fd406a8dfdd4ea482bb12f0c5d304255d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3dec421d26a9373c52bf2fb9434cc3f3b373f409b1d86ec1effca7534acb7262\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7cb4176d384609eb549c9f00057256bd34053ca3eca229c20cd250b93f87f910\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://91541795f1dd997a0d977904a6a3cbeae8b66b8bb57cde8dac344d3703352e1a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:15:53Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:25Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:25 crc kubenswrapper[4881]: I1211 08:16:25.542224 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-847k7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1711214f-683a-4a2e-b9b8-f3fd2dd6dbc2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0ad11e04da8a6d33e46ca8c5f28be920d52c7c2ce22b4539fc8b7b0b85edb425\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2txkz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:20Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-847k7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:25Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:25 crc kubenswrapper[4881]: I1211 08:16:25.551636 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-jzsv5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aacce2d2-7fd3-439a-b46b-51858d3de240\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6277956e6d4873b4105ce03e358e2857371feead7eb74e2b7412ad7ed4de486\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tlbfj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:24Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-jzsv5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:25Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:25 crc kubenswrapper[4881]: I1211 08:16:25.563392 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"712c62b1-8ccd-4aa3-bcf9-678e361454ec\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f4bc17ebdc46bdf12b24515e7a055043646bcea98fe47ea7f5c44e679d02bde9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://daf982782f40db65e6a33e1e1bcfd01493d5a385c52013546d38d921ac4dcad3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3ceeda38e245a549205af6a7f2468c682ca1420574f2d3b375dfba7d93a3c669\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://204b876ced5657df64e1a6074ba7ddbb3e286df0e12630d709ff64d2905b8487\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ff7ad52ec7f42bde82d0f5b9a6ef051a56c37004bb4172dbc504afde1e5f69bf\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-11T08:16:03Z\\\",\\\"message\\\":\\\"W1211 08:16:02.420586 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1211 08:16:02.420959 1 crypto.go:601] Generating new CA for check-endpoints-signer@1765440962 cert, and key in /tmp/serving-cert-2611071911/serving-signer.crt, /tmp/serving-cert-2611071911/serving-signer.key\\\\nI1211 08:16:03.011861 1 observer_polling.go:159] Starting file observer\\\\nW1211 08:16:03.024183 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1211 08:16:03.024367 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1211 08:16:03.024984 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2611071911/tls.crt::/tmp/serving-cert-2611071911/tls.key\\\\\\\"\\\\nF1211 08:16:03.405647 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:00Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6735b5cd08055deeae614855e5de8a056b276c8006a16c45f2a0fce97da172cb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:57Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70fb9b6123864b3652eabbb833b18032659a0c038f4e5857e7363a3776022396\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://70fb9b6123864b3652eabbb833b18032659a0c038f4e5857e7363a3776022396\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:15:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:15:53Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:25Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:25 crc kubenswrapper[4881]: I1211 08:16:25.573829 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://02d46e407feeba96230ff44b31f513e09434cdc3378124322398587ebda11b4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:25Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:25 crc kubenswrapper[4881]: I1211 08:16:25.582006 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:25 crc kubenswrapper[4881]: I1211 08:16:25.582044 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:25 crc kubenswrapper[4881]: I1211 08:16:25.582052 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:25 crc kubenswrapper[4881]: I1211 08:16:25.582070 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:25 crc kubenswrapper[4881]: I1211 08:16:25.582082 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:25Z","lastTransitionTime":"2025-12-11T08:16:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:25 crc kubenswrapper[4881]: I1211 08:16:25.585306 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:25Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:25 crc kubenswrapper[4881]: I1211 08:16:25.595181 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:20Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:20Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://35b9fdb7a609d6a955abda7e49e7bceec55811d33e14295ba7258c0e085cc517\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:25Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:25 crc kubenswrapper[4881]: I1211 08:16:25.610657 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wf8q8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f14cc110-e74f-4cb7-a998-041e3f9b537b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1794069e7175bc0ce9dc315807ad766fd28fb46e11ed6e7addc3199cccaea63a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1794069e7175bc0ce9dc315807ad766fd28fb46e11ed6e7addc3199cccaea63a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:21Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-wf8q8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:25Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:25 crc kubenswrapper[4881]: I1211 08:16:25.684751 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:25 crc kubenswrapper[4881]: I1211 08:16:25.684801 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:25 crc kubenswrapper[4881]: I1211 08:16:25.684818 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:25 crc kubenswrapper[4881]: I1211 08:16:25.684838 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:25 crc kubenswrapper[4881]: I1211 08:16:25.684850 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:25Z","lastTransitionTime":"2025-12-11T08:16:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:25 crc kubenswrapper[4881]: I1211 08:16:25.787738 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:25 crc kubenswrapper[4881]: I1211 08:16:25.787781 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:25 crc kubenswrapper[4881]: I1211 08:16:25.787791 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:25 crc kubenswrapper[4881]: I1211 08:16:25.787812 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:25 crc kubenswrapper[4881]: I1211 08:16:25.787831 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:25Z","lastTransitionTime":"2025-12-11T08:16:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:25 crc kubenswrapper[4881]: I1211 08:16:25.890636 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:25 crc kubenswrapper[4881]: I1211 08:16:25.890697 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:25 crc kubenswrapper[4881]: I1211 08:16:25.890711 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:25 crc kubenswrapper[4881]: I1211 08:16:25.890731 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:25 crc kubenswrapper[4881]: I1211 08:16:25.890745 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:25Z","lastTransitionTime":"2025-12-11T08:16:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:25 crc kubenswrapper[4881]: I1211 08:16:25.993389 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:25 crc kubenswrapper[4881]: I1211 08:16:25.993440 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:25 crc kubenswrapper[4881]: I1211 08:16:25.993451 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:25 crc kubenswrapper[4881]: I1211 08:16:25.993470 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:25 crc kubenswrapper[4881]: I1211 08:16:25.993481 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:25Z","lastTransitionTime":"2025-12-11T08:16:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:26 crc kubenswrapper[4881]: I1211 08:16:26.004765 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 11 08:16:26 crc kubenswrapper[4881]: E1211 08:16:26.004963 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 11 08:16:26 crc kubenswrapper[4881]: I1211 08:16:26.096386 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:26 crc kubenswrapper[4881]: I1211 08:16:26.096433 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:26 crc kubenswrapper[4881]: I1211 08:16:26.096442 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:26 crc kubenswrapper[4881]: I1211 08:16:26.096459 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:26 crc kubenswrapper[4881]: I1211 08:16:26.096471 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:26Z","lastTransitionTime":"2025-12-11T08:16:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:26 crc kubenswrapper[4881]: I1211 08:16:26.199282 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:26 crc kubenswrapper[4881]: I1211 08:16:26.199329 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:26 crc kubenswrapper[4881]: I1211 08:16:26.199360 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:26 crc kubenswrapper[4881]: I1211 08:16:26.199377 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:26 crc kubenswrapper[4881]: I1211 08:16:26.199389 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:26Z","lastTransitionTime":"2025-12-11T08:16:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:26 crc kubenswrapper[4881]: I1211 08:16:26.225076 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wf8q8" event={"ID":"f14cc110-e74f-4cb7-a998-041e3f9b537b","Type":"ContainerStarted","Data":"88bd335385bc696076b9b2719bfbd27b294003397c545f24ee2c50cd854fcc28"} Dec 11 08:16:26 crc kubenswrapper[4881]: I1211 08:16:26.227802 4881 generic.go:334] "Generic (PLEG): container finished" podID="afd0cc21-e31c-47c9-a598-cd93dde96121" containerID="bb6df865673b8073f0fa13525833ae978ff17856a9b4149d549e177bc5d9a494" exitCode=0 Dec 11 08:16:26 crc kubenswrapper[4881]: I1211 08:16:26.227908 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-cwhxk" event={"ID":"afd0cc21-e31c-47c9-a598-cd93dde96121","Type":"ContainerDied","Data":"bb6df865673b8073f0fa13525833ae978ff17856a9b4149d549e177bc5d9a494"} Dec 11 08:16:26 crc kubenswrapper[4881]: I1211 08:16:26.247803 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-g8jhd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"368e635e-0e63-4202-b9e4-4a3a85c6f30c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f17445543c4ae20340b00c834ecfa381b0d9624196a6da869ea823ee99df132f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zrsnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-g8jhd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:26Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:26 crc kubenswrapper[4881]: I1211 08:16:26.264100 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-cwhxk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"afd0cc21-e31c-47c9-a598-cd93dde96121\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://44d5ca755969b2434115259f141a9f8f17da8e0c1bef5b61c48475e09dc91601\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://44d5ca755969b2434115259f141a9f8f17da8e0c1bef5b61c48475e09dc91601\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://18c881494d5238565dfdb69e61ecdebb725f60a14c0c651e0bcc65cb2aff297a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://18c881494d5238565dfdb69e61ecdebb725f60a14c0c651e0bcc65cb2aff297a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a13ad4683fb404bf7d973528489e3c39e33c2991d86a80875dab067023626584\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a13ad4683fb404bf7d973528489e3c39e33c2991d86a80875dab067023626584\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b84c7832c1d1df15202c858e63e3d97319f2014f413f98a9d332a3f89a11210\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b84c7832c1d1df15202c858e63e3d97319f2014f413f98a9d332a3f89a11210\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb6df865673b8073f0fa13525833ae978ff17856a9b4149d549e177bc5d9a494\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bb6df865673b8073f0fa13525833ae978ff17856a9b4149d549e177bc5d9a494\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-cwhxk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:26Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:26 crc kubenswrapper[4881]: I1211 08:16:26.276299 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"56d69133-af36-4cbd-af7d-3a58cc4dd8ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b73c9948845a28d7c3cb81e94d962a4f11ac061f4108a18f46d451a554e9adb7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7rwh4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b0321657968185975569a4bf8037ab956988b13493feec0be9c439fbe6c20707\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7rwh4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:21Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-z9nnh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:26Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:26 crc kubenswrapper[4881]: I1211 08:16:26.290505 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:26Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:26 crc kubenswrapper[4881]: I1211 08:16:26.301650 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:26 crc kubenswrapper[4881]: I1211 08:16:26.301692 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:26 crc kubenswrapper[4881]: I1211 08:16:26.301701 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:26 crc kubenswrapper[4881]: I1211 08:16:26.301717 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:26 crc kubenswrapper[4881]: I1211 08:16:26.301731 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:26Z","lastTransitionTime":"2025-12-11T08:16:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:26 crc kubenswrapper[4881]: I1211 08:16:26.303179 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6ec4e8a28a722463111eeb961e6e80d1f8ef1b776c40fc212a2ca5aaf0c43b63\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6eff6a04afb607c6248352793d1205ae80c194a77222a234d586925a8ca8cd00\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:26Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:26 crc kubenswrapper[4881]: I1211 08:16:26.313149 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:26Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:26 crc kubenswrapper[4881]: I1211 08:16:26.323738 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-jzsv5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aacce2d2-7fd3-439a-b46b-51858d3de240\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6277956e6d4873b4105ce03e358e2857371feead7eb74e2b7412ad7ed4de486\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tlbfj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:24Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-jzsv5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:26Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:26 crc kubenswrapper[4881]: I1211 08:16:26.335135 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e233ddb-7c1f-4b3d-a111-de8cdc7ccd18\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://221f8c696ca3c5ab5dc2025890b5279fd406a8dfdd4ea482bb12f0c5d304255d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3dec421d26a9373c52bf2fb9434cc3f3b373f409b1d86ec1effca7534acb7262\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7cb4176d384609eb549c9f00057256bd34053ca3eca229c20cd250b93f87f910\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://91541795f1dd997a0d977904a6a3cbeae8b66b8bb57cde8dac344d3703352e1a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:15:53Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:26Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:26 crc kubenswrapper[4881]: I1211 08:16:26.351559 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-847k7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1711214f-683a-4a2e-b9b8-f3fd2dd6dbc2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0ad11e04da8a6d33e46ca8c5f28be920d52c7c2ce22b4539fc8b7b0b85edb425\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2txkz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:20Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-847k7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:26Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:26 crc kubenswrapper[4881]: I1211 08:16:26.362801 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:20Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:20Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://35b9fdb7a609d6a955abda7e49e7bceec55811d33e14295ba7258c0e085cc517\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:26Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:26 crc kubenswrapper[4881]: I1211 08:16:26.379309 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wf8q8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f14cc110-e74f-4cb7-a998-041e3f9b537b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1794069e7175bc0ce9dc315807ad766fd28fb46e11ed6e7addc3199cccaea63a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1794069e7175bc0ce9dc315807ad766fd28fb46e11ed6e7addc3199cccaea63a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:21Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-wf8q8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:26Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:26 crc kubenswrapper[4881]: I1211 08:16:26.391270 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"712c62b1-8ccd-4aa3-bcf9-678e361454ec\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f4bc17ebdc46bdf12b24515e7a055043646bcea98fe47ea7f5c44e679d02bde9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://daf982782f40db65e6a33e1e1bcfd01493d5a385c52013546d38d921ac4dcad3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3ceeda38e245a549205af6a7f2468c682ca1420574f2d3b375dfba7d93a3c669\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://204b876ced5657df64e1a6074ba7ddbb3e286df0e12630d709ff64d2905b8487\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ff7ad52ec7f42bde82d0f5b9a6ef051a56c37004bb4172dbc504afde1e5f69bf\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-11T08:16:03Z\\\",\\\"message\\\":\\\"W1211 08:16:02.420586 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1211 08:16:02.420959 1 crypto.go:601] Generating new CA for check-endpoints-signer@1765440962 cert, and key in /tmp/serving-cert-2611071911/serving-signer.crt, /tmp/serving-cert-2611071911/serving-signer.key\\\\nI1211 08:16:03.011861 1 observer_polling.go:159] Starting file observer\\\\nW1211 08:16:03.024183 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1211 08:16:03.024367 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1211 08:16:03.024984 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2611071911/tls.crt::/tmp/serving-cert-2611071911/tls.key\\\\\\\"\\\\nF1211 08:16:03.405647 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:00Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6735b5cd08055deeae614855e5de8a056b276c8006a16c45f2a0fce97da172cb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:57Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70fb9b6123864b3652eabbb833b18032659a0c038f4e5857e7363a3776022396\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://70fb9b6123864b3652eabbb833b18032659a0c038f4e5857e7363a3776022396\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:15:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:15:53Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:26Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:26 crc kubenswrapper[4881]: I1211 08:16:26.403838 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:26 crc kubenswrapper[4881]: I1211 08:16:26.403869 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:26 crc kubenswrapper[4881]: I1211 08:16:26.403880 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:26 crc kubenswrapper[4881]: I1211 08:16:26.403895 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:26 crc kubenswrapper[4881]: I1211 08:16:26.403904 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:26Z","lastTransitionTime":"2025-12-11T08:16:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:26 crc kubenswrapper[4881]: I1211 08:16:26.403958 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://02d46e407feeba96230ff44b31f513e09434cdc3378124322398587ebda11b4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:26Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:26 crc kubenswrapper[4881]: I1211 08:16:26.416285 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:26Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:26 crc kubenswrapper[4881]: I1211 08:16:26.508816 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:26 crc kubenswrapper[4881]: I1211 08:16:26.508865 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:26 crc kubenswrapper[4881]: I1211 08:16:26.508874 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:26 crc kubenswrapper[4881]: I1211 08:16:26.508887 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:26 crc kubenswrapper[4881]: I1211 08:16:26.508901 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:26Z","lastTransitionTime":"2025-12-11T08:16:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:26 crc kubenswrapper[4881]: I1211 08:16:26.611816 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:26 crc kubenswrapper[4881]: I1211 08:16:26.611865 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:26 crc kubenswrapper[4881]: I1211 08:16:26.611878 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:26 crc kubenswrapper[4881]: I1211 08:16:26.611909 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:26 crc kubenswrapper[4881]: I1211 08:16:26.611922 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:26Z","lastTransitionTime":"2025-12-11T08:16:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:26 crc kubenswrapper[4881]: I1211 08:16:26.714180 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:26 crc kubenswrapper[4881]: I1211 08:16:26.714231 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:26 crc kubenswrapper[4881]: I1211 08:16:26.714246 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:26 crc kubenswrapper[4881]: I1211 08:16:26.714268 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:26 crc kubenswrapper[4881]: I1211 08:16:26.714282 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:26Z","lastTransitionTime":"2025-12-11T08:16:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:26 crc kubenswrapper[4881]: I1211 08:16:26.816791 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:26 crc kubenswrapper[4881]: I1211 08:16:26.816838 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:26 crc kubenswrapper[4881]: I1211 08:16:26.816850 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:26 crc kubenswrapper[4881]: I1211 08:16:26.816865 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:26 crc kubenswrapper[4881]: I1211 08:16:26.816876 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:26Z","lastTransitionTime":"2025-12-11T08:16:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:26 crc kubenswrapper[4881]: I1211 08:16:26.919625 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:26 crc kubenswrapper[4881]: I1211 08:16:26.919669 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:26 crc kubenswrapper[4881]: I1211 08:16:26.919681 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:26 crc kubenswrapper[4881]: I1211 08:16:26.919701 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:26 crc kubenswrapper[4881]: I1211 08:16:26.919714 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:26Z","lastTransitionTime":"2025-12-11T08:16:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:27 crc kubenswrapper[4881]: I1211 08:16:27.005207 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 11 08:16:27 crc kubenswrapper[4881]: I1211 08:16:27.005209 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 11 08:16:27 crc kubenswrapper[4881]: E1211 08:16:27.005377 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 11 08:16:27 crc kubenswrapper[4881]: E1211 08:16:27.005543 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 11 08:16:27 crc kubenswrapper[4881]: I1211 08:16:27.022161 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:27 crc kubenswrapper[4881]: I1211 08:16:27.022222 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:27 crc kubenswrapper[4881]: I1211 08:16:27.022245 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:27 crc kubenswrapper[4881]: I1211 08:16:27.022275 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:27 crc kubenswrapper[4881]: I1211 08:16:27.022299 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:27Z","lastTransitionTime":"2025-12-11T08:16:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:27 crc kubenswrapper[4881]: I1211 08:16:27.124844 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:27 crc kubenswrapper[4881]: I1211 08:16:27.124890 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:27 crc kubenswrapper[4881]: I1211 08:16:27.124901 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:27 crc kubenswrapper[4881]: I1211 08:16:27.124919 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:27 crc kubenswrapper[4881]: I1211 08:16:27.124933 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:27Z","lastTransitionTime":"2025-12-11T08:16:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:27 crc kubenswrapper[4881]: I1211 08:16:27.227615 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:27 crc kubenswrapper[4881]: I1211 08:16:27.227672 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:27 crc kubenswrapper[4881]: I1211 08:16:27.227693 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:27 crc kubenswrapper[4881]: I1211 08:16:27.227713 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:27 crc kubenswrapper[4881]: I1211 08:16:27.227732 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:27Z","lastTransitionTime":"2025-12-11T08:16:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:27 crc kubenswrapper[4881]: I1211 08:16:27.234520 4881 generic.go:334] "Generic (PLEG): container finished" podID="afd0cc21-e31c-47c9-a598-cd93dde96121" containerID="8a6eb6e7ccd64f8d181fe104b44a4299f9b28d0409e46175da23144ef88e4129" exitCode=0 Dec 11 08:16:27 crc kubenswrapper[4881]: I1211 08:16:27.234567 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-cwhxk" event={"ID":"afd0cc21-e31c-47c9-a598-cd93dde96121","Type":"ContainerDied","Data":"8a6eb6e7ccd64f8d181fe104b44a4299f9b28d0409e46175da23144ef88e4129"} Dec 11 08:16:27 crc kubenswrapper[4881]: I1211 08:16:27.249601 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6ec4e8a28a722463111eeb961e6e80d1f8ef1b776c40fc212a2ca5aaf0c43b63\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6eff6a04afb607c6248352793d1205ae80c194a77222a234d586925a8ca8cd00\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:27Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:27 crc kubenswrapper[4881]: I1211 08:16:27.262785 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:27Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:27 crc kubenswrapper[4881]: I1211 08:16:27.274679 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-g8jhd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"368e635e-0e63-4202-b9e4-4a3a85c6f30c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f17445543c4ae20340b00c834ecfa381b0d9624196a6da869ea823ee99df132f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zrsnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-g8jhd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:27Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:27 crc kubenswrapper[4881]: I1211 08:16:27.290433 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-cwhxk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"afd0cc21-e31c-47c9-a598-cd93dde96121\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://44d5ca755969b2434115259f141a9f8f17da8e0c1bef5b61c48475e09dc91601\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://44d5ca755969b2434115259f141a9f8f17da8e0c1bef5b61c48475e09dc91601\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://18c881494d5238565dfdb69e61ecdebb725f60a14c0c651e0bcc65cb2aff297a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://18c881494d5238565dfdb69e61ecdebb725f60a14c0c651e0bcc65cb2aff297a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a13ad4683fb404bf7d973528489e3c39e33c2991d86a80875dab067023626584\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a13ad4683fb404bf7d973528489e3c39e33c2991d86a80875dab067023626584\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b84c7832c1d1df15202c858e63e3d97319f2014f413f98a9d332a3f89a11210\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b84c7832c1d1df15202c858e63e3d97319f2014f413f98a9d332a3f89a11210\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb6df865673b8073f0fa13525833ae978ff17856a9b4149d549e177bc5d9a494\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bb6df865673b8073f0fa13525833ae978ff17856a9b4149d549e177bc5d9a494\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8a6eb6e7ccd64f8d181fe104b44a4299f9b28d0409e46175da23144ef88e4129\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8a6eb6e7ccd64f8d181fe104b44a4299f9b28d0409e46175da23144ef88e4129\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-cwhxk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:27Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:27 crc kubenswrapper[4881]: I1211 08:16:27.302158 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"56d69133-af36-4cbd-af7d-3a58cc4dd8ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b73c9948845a28d7c3cb81e94d962a4f11ac061f4108a18f46d451a554e9adb7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7rwh4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b0321657968185975569a4bf8037ab956988b13493feec0be9c439fbe6c20707\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7rwh4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:21Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-z9nnh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:27Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:27 crc kubenswrapper[4881]: I1211 08:16:27.315414 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:27Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:27 crc kubenswrapper[4881]: I1211 08:16:27.328220 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-847k7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1711214f-683a-4a2e-b9b8-f3fd2dd6dbc2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0ad11e04da8a6d33e46ca8c5f28be920d52c7c2ce22b4539fc8b7b0b85edb425\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2txkz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:20Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-847k7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:27Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:27 crc kubenswrapper[4881]: I1211 08:16:27.329847 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:27 crc kubenswrapper[4881]: I1211 08:16:27.329881 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:27 crc kubenswrapper[4881]: I1211 08:16:27.329897 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:27 crc kubenswrapper[4881]: I1211 08:16:27.329916 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:27 crc kubenswrapper[4881]: I1211 08:16:27.329927 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:27Z","lastTransitionTime":"2025-12-11T08:16:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:27 crc kubenswrapper[4881]: I1211 08:16:27.338807 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-jzsv5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aacce2d2-7fd3-439a-b46b-51858d3de240\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6277956e6d4873b4105ce03e358e2857371feead7eb74e2b7412ad7ed4de486\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tlbfj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:24Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-jzsv5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:27Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:27 crc kubenswrapper[4881]: I1211 08:16:27.350898 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e233ddb-7c1f-4b3d-a111-de8cdc7ccd18\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://221f8c696ca3c5ab5dc2025890b5279fd406a8dfdd4ea482bb12f0c5d304255d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3dec421d26a9373c52bf2fb9434cc3f3b373f409b1d86ec1effca7534acb7262\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7cb4176d384609eb549c9f00057256bd34053ca3eca229c20cd250b93f87f910\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://91541795f1dd997a0d977904a6a3cbeae8b66b8bb57cde8dac344d3703352e1a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:15:53Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:27Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:27 crc kubenswrapper[4881]: I1211 08:16:27.365677 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://02d46e407feeba96230ff44b31f513e09434cdc3378124322398587ebda11b4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:27Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:27 crc kubenswrapper[4881]: I1211 08:16:27.377404 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:27Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:27 crc kubenswrapper[4881]: I1211 08:16:27.387592 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:20Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:20Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://35b9fdb7a609d6a955abda7e49e7bceec55811d33e14295ba7258c0e085cc517\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:27Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:27 crc kubenswrapper[4881]: I1211 08:16:27.406157 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wf8q8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f14cc110-e74f-4cb7-a998-041e3f9b537b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1794069e7175bc0ce9dc315807ad766fd28fb46e11ed6e7addc3199cccaea63a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1794069e7175bc0ce9dc315807ad766fd28fb46e11ed6e7addc3199cccaea63a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:21Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-wf8q8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:27Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:27 crc kubenswrapper[4881]: I1211 08:16:27.418674 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"712c62b1-8ccd-4aa3-bcf9-678e361454ec\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f4bc17ebdc46bdf12b24515e7a055043646bcea98fe47ea7f5c44e679d02bde9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://daf982782f40db65e6a33e1e1bcfd01493d5a385c52013546d38d921ac4dcad3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3ceeda38e245a549205af6a7f2468c682ca1420574f2d3b375dfba7d93a3c669\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://204b876ced5657df64e1a6074ba7ddbb3e286df0e12630d709ff64d2905b8487\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ff7ad52ec7f42bde82d0f5b9a6ef051a56c37004bb4172dbc504afde1e5f69bf\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-11T08:16:03Z\\\",\\\"message\\\":\\\"W1211 08:16:02.420586 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1211 08:16:02.420959 1 crypto.go:601] Generating new CA for check-endpoints-signer@1765440962 cert, and key in /tmp/serving-cert-2611071911/serving-signer.crt, /tmp/serving-cert-2611071911/serving-signer.key\\\\nI1211 08:16:03.011861 1 observer_polling.go:159] Starting file observer\\\\nW1211 08:16:03.024183 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1211 08:16:03.024367 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1211 08:16:03.024984 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2611071911/tls.crt::/tmp/serving-cert-2611071911/tls.key\\\\\\\"\\\\nF1211 08:16:03.405647 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:00Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6735b5cd08055deeae614855e5de8a056b276c8006a16c45f2a0fce97da172cb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:57Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70fb9b6123864b3652eabbb833b18032659a0c038f4e5857e7363a3776022396\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://70fb9b6123864b3652eabbb833b18032659a0c038f4e5857e7363a3776022396\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:15:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:15:53Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:27Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:27 crc kubenswrapper[4881]: I1211 08:16:27.432368 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:27 crc kubenswrapper[4881]: I1211 08:16:27.432401 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:27 crc kubenswrapper[4881]: I1211 08:16:27.432411 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:27 crc kubenswrapper[4881]: I1211 08:16:27.432424 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:27 crc kubenswrapper[4881]: I1211 08:16:27.432435 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:27Z","lastTransitionTime":"2025-12-11T08:16:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:27 crc kubenswrapper[4881]: I1211 08:16:27.535232 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:27 crc kubenswrapper[4881]: I1211 08:16:27.535587 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:27 crc kubenswrapper[4881]: I1211 08:16:27.535600 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:27 crc kubenswrapper[4881]: I1211 08:16:27.535622 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:27 crc kubenswrapper[4881]: I1211 08:16:27.535635 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:27Z","lastTransitionTime":"2025-12-11T08:16:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:27 crc kubenswrapper[4881]: I1211 08:16:27.638666 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:27 crc kubenswrapper[4881]: I1211 08:16:27.638899 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:27 crc kubenswrapper[4881]: I1211 08:16:27.638909 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:27 crc kubenswrapper[4881]: I1211 08:16:27.638927 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:27 crc kubenswrapper[4881]: I1211 08:16:27.638938 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:27Z","lastTransitionTime":"2025-12-11T08:16:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:27 crc kubenswrapper[4881]: I1211 08:16:27.743059 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:27 crc kubenswrapper[4881]: I1211 08:16:27.743108 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:27 crc kubenswrapper[4881]: I1211 08:16:27.743120 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:27 crc kubenswrapper[4881]: I1211 08:16:27.743140 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:27 crc kubenswrapper[4881]: I1211 08:16:27.743153 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:27Z","lastTransitionTime":"2025-12-11T08:16:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:27 crc kubenswrapper[4881]: I1211 08:16:27.846180 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:27 crc kubenswrapper[4881]: I1211 08:16:27.846223 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:27 crc kubenswrapper[4881]: I1211 08:16:27.846237 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:27 crc kubenswrapper[4881]: I1211 08:16:27.846258 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:27 crc kubenswrapper[4881]: I1211 08:16:27.846271 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:27Z","lastTransitionTime":"2025-12-11T08:16:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:27 crc kubenswrapper[4881]: I1211 08:16:27.949693 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:27 crc kubenswrapper[4881]: I1211 08:16:27.949736 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:27 crc kubenswrapper[4881]: I1211 08:16:27.949747 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:27 crc kubenswrapper[4881]: I1211 08:16:27.949763 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:27 crc kubenswrapper[4881]: I1211 08:16:27.949773 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:27Z","lastTransitionTime":"2025-12-11T08:16:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:28 crc kubenswrapper[4881]: I1211 08:16:28.004541 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 11 08:16:28 crc kubenswrapper[4881]: E1211 08:16:28.004694 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 11 08:16:28 crc kubenswrapper[4881]: I1211 08:16:28.052416 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:28 crc kubenswrapper[4881]: I1211 08:16:28.052458 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:28 crc kubenswrapper[4881]: I1211 08:16:28.052470 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:28 crc kubenswrapper[4881]: I1211 08:16:28.052485 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:28 crc kubenswrapper[4881]: I1211 08:16:28.052498 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:28Z","lastTransitionTime":"2025-12-11T08:16:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:28 crc kubenswrapper[4881]: I1211 08:16:28.155458 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:28 crc kubenswrapper[4881]: I1211 08:16:28.155518 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:28 crc kubenswrapper[4881]: I1211 08:16:28.155535 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:28 crc kubenswrapper[4881]: I1211 08:16:28.155559 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:28 crc kubenswrapper[4881]: I1211 08:16:28.155581 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:28Z","lastTransitionTime":"2025-12-11T08:16:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:28 crc kubenswrapper[4881]: I1211 08:16:28.243619 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wf8q8" event={"ID":"f14cc110-e74f-4cb7-a998-041e3f9b537b","Type":"ContainerStarted","Data":"0c9d06c04708f285b468dcbb0b0406b77e16ebe289cefb4cb0fb5d9dc84e3dc1"} Dec 11 08:16:28 crc kubenswrapper[4881]: I1211 08:16:28.243972 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-wf8q8" Dec 11 08:16:28 crc kubenswrapper[4881]: I1211 08:16:28.244041 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-wf8q8" Dec 11 08:16:28 crc kubenswrapper[4881]: I1211 08:16:28.244069 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-wf8q8" Dec 11 08:16:28 crc kubenswrapper[4881]: I1211 08:16:28.250103 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-cwhxk" event={"ID":"afd0cc21-e31c-47c9-a598-cd93dde96121","Type":"ContainerStarted","Data":"0f0031f6a7c33c9d1c538ec8384083ab5bc7b7fcda9c01c8e5c922a7e375a7e5"} Dec 11 08:16:28 crc kubenswrapper[4881]: I1211 08:16:28.258485 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:28 crc kubenswrapper[4881]: I1211 08:16:28.258536 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:28 crc kubenswrapper[4881]: I1211 08:16:28.258551 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:28 crc kubenswrapper[4881]: I1211 08:16:28.258576 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:28 crc kubenswrapper[4881]: I1211 08:16:28.258593 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:28Z","lastTransitionTime":"2025-12-11T08:16:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:28 crc kubenswrapper[4881]: I1211 08:16:28.263781 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6ec4e8a28a722463111eeb961e6e80d1f8ef1b776c40fc212a2ca5aaf0c43b63\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6eff6a04afb607c6248352793d1205ae80c194a77222a234d586925a8ca8cd00\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:28Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:28 crc kubenswrapper[4881]: I1211 08:16:28.273868 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-wf8q8" Dec 11 08:16:28 crc kubenswrapper[4881]: I1211 08:16:28.274324 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-wf8q8" Dec 11 08:16:28 crc kubenswrapper[4881]: I1211 08:16:28.279506 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:28Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:28 crc kubenswrapper[4881]: I1211 08:16:28.291929 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-g8jhd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"368e635e-0e63-4202-b9e4-4a3a85c6f30c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f17445543c4ae20340b00c834ecfa381b0d9624196a6da869ea823ee99df132f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zrsnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-g8jhd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:28Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:28 crc kubenswrapper[4881]: I1211 08:16:28.304819 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-cwhxk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"afd0cc21-e31c-47c9-a598-cd93dde96121\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://44d5ca755969b2434115259f141a9f8f17da8e0c1bef5b61c48475e09dc91601\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://44d5ca755969b2434115259f141a9f8f17da8e0c1bef5b61c48475e09dc91601\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://18c881494d5238565dfdb69e61ecdebb725f60a14c0c651e0bcc65cb2aff297a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://18c881494d5238565dfdb69e61ecdebb725f60a14c0c651e0bcc65cb2aff297a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a13ad4683fb404bf7d973528489e3c39e33c2991d86a80875dab067023626584\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a13ad4683fb404bf7d973528489e3c39e33c2991d86a80875dab067023626584\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b84c7832c1d1df15202c858e63e3d97319f2014f413f98a9d332a3f89a11210\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b84c7832c1d1df15202c858e63e3d97319f2014f413f98a9d332a3f89a11210\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb6df865673b8073f0fa13525833ae978ff17856a9b4149d549e177bc5d9a494\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bb6df865673b8073f0fa13525833ae978ff17856a9b4149d549e177bc5d9a494\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8a6eb6e7ccd64f8d181fe104b44a4299f9b28d0409e46175da23144ef88e4129\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8a6eb6e7ccd64f8d181fe104b44a4299f9b28d0409e46175da23144ef88e4129\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-cwhxk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:28Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:28 crc kubenswrapper[4881]: I1211 08:16:28.314792 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"56d69133-af36-4cbd-af7d-3a58cc4dd8ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b73c9948845a28d7c3cb81e94d962a4f11ac061f4108a18f46d451a554e9adb7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7rwh4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b0321657968185975569a4bf8037ab956988b13493feec0be9c439fbe6c20707\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7rwh4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:21Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-z9nnh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:28Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:28 crc kubenswrapper[4881]: I1211 08:16:28.327108 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:28Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:28 crc kubenswrapper[4881]: I1211 08:16:28.341760 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-847k7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1711214f-683a-4a2e-b9b8-f3fd2dd6dbc2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0ad11e04da8a6d33e46ca8c5f28be920d52c7c2ce22b4539fc8b7b0b85edb425\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2txkz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:20Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-847k7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:28Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:28 crc kubenswrapper[4881]: I1211 08:16:28.352697 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-jzsv5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aacce2d2-7fd3-439a-b46b-51858d3de240\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6277956e6d4873b4105ce03e358e2857371feead7eb74e2b7412ad7ed4de486\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tlbfj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:24Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-jzsv5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:28Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:28 crc kubenswrapper[4881]: I1211 08:16:28.363428 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:28 crc kubenswrapper[4881]: I1211 08:16:28.363489 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:28 crc kubenswrapper[4881]: I1211 08:16:28.363510 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:28 crc kubenswrapper[4881]: I1211 08:16:28.363535 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:28 crc kubenswrapper[4881]: I1211 08:16:28.363550 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:28Z","lastTransitionTime":"2025-12-11T08:16:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:28 crc kubenswrapper[4881]: I1211 08:16:28.368200 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e233ddb-7c1f-4b3d-a111-de8cdc7ccd18\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://221f8c696ca3c5ab5dc2025890b5279fd406a8dfdd4ea482bb12f0c5d304255d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3dec421d26a9373c52bf2fb9434cc3f3b373f409b1d86ec1effca7534acb7262\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7cb4176d384609eb549c9f00057256bd34053ca3eca229c20cd250b93f87f910\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://91541795f1dd997a0d977904a6a3cbeae8b66b8bb57cde8dac344d3703352e1a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:15:53Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:28Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:28 crc kubenswrapper[4881]: I1211 08:16:28.380544 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://02d46e407feeba96230ff44b31f513e09434cdc3378124322398587ebda11b4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:28Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:28 crc kubenswrapper[4881]: I1211 08:16:28.401719 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:28Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:28 crc kubenswrapper[4881]: I1211 08:16:28.413168 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:20Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:20Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://35b9fdb7a609d6a955abda7e49e7bceec55811d33e14295ba7258c0e085cc517\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:28Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:28 crc kubenswrapper[4881]: I1211 08:16:28.431582 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wf8q8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f14cc110-e74f-4cb7-a998-041e3f9b537b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3fece2162ea78bf051c94f0f8adb0b194d05e5d3c5c25cb4e3674096d1204079\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3715d1eacbf7a843b7a1a0e83dee159ddbe5bf62812e94bf5f74f4677da2617b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c23a70dbe6b32be7c8a0c3799f7dd2323f0dcc86d7d58ee3e140cda4ffbb03f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6d8574de8e2431b2aca7954b9a0498e353c00d982b4384c1f198b2099fe527a6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5a0cb039f4c38da8b633dfcd2e0d23afbc3607b41f04d3c230aee5f86cba79b5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5b8e9dda68edf7ec8848da4eb851807ad0dc10a086fb6129e3a22f9a35e368ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0c9d06c04708f285b468dcbb0b0406b77e16ebe289cefb4cb0fb5d9dc84e3dc1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://88bd335385bc696076b9b2719bfbd27b294003397c545f24ee2c50cd854fcc28\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1794069e7175bc0ce9dc315807ad766fd28fb46e11ed6e7addc3199cccaea63a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1794069e7175bc0ce9dc315807ad766fd28fb46e11ed6e7addc3199cccaea63a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:21Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-wf8q8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:28Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:28 crc kubenswrapper[4881]: I1211 08:16:28.446031 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"712c62b1-8ccd-4aa3-bcf9-678e361454ec\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f4bc17ebdc46bdf12b24515e7a055043646bcea98fe47ea7f5c44e679d02bde9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://daf982782f40db65e6a33e1e1bcfd01493d5a385c52013546d38d921ac4dcad3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3ceeda38e245a549205af6a7f2468c682ca1420574f2d3b375dfba7d93a3c669\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://204b876ced5657df64e1a6074ba7ddbb3e286df0e12630d709ff64d2905b8487\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ff7ad52ec7f42bde82d0f5b9a6ef051a56c37004bb4172dbc504afde1e5f69bf\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-11T08:16:03Z\\\",\\\"message\\\":\\\"W1211 08:16:02.420586 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1211 08:16:02.420959 1 crypto.go:601] Generating new CA for check-endpoints-signer@1765440962 cert, and key in /tmp/serving-cert-2611071911/serving-signer.crt, /tmp/serving-cert-2611071911/serving-signer.key\\\\nI1211 08:16:03.011861 1 observer_polling.go:159] Starting file observer\\\\nW1211 08:16:03.024183 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1211 08:16:03.024367 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1211 08:16:03.024984 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2611071911/tls.crt::/tmp/serving-cert-2611071911/tls.key\\\\\\\"\\\\nF1211 08:16:03.405647 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:00Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6735b5cd08055deeae614855e5de8a056b276c8006a16c45f2a0fce97da172cb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:57Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70fb9b6123864b3652eabbb833b18032659a0c038f4e5857e7363a3776022396\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://70fb9b6123864b3652eabbb833b18032659a0c038f4e5857e7363a3776022396\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:15:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:15:53Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:28Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:28 crc kubenswrapper[4881]: I1211 08:16:28.458533 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:28Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:28 crc kubenswrapper[4881]: I1211 08:16:28.464964 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:28 crc kubenswrapper[4881]: I1211 08:16:28.464988 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:28 crc kubenswrapper[4881]: I1211 08:16:28.464996 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:28 crc kubenswrapper[4881]: I1211 08:16:28.465012 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:28 crc kubenswrapper[4881]: I1211 08:16:28.465022 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:28Z","lastTransitionTime":"2025-12-11T08:16:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:28 crc kubenswrapper[4881]: I1211 08:16:28.471083 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-g8jhd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"368e635e-0e63-4202-b9e4-4a3a85c6f30c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f17445543c4ae20340b00c834ecfa381b0d9624196a6da869ea823ee99df132f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zrsnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-g8jhd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:28Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:28 crc kubenswrapper[4881]: I1211 08:16:28.483696 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-cwhxk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"afd0cc21-e31c-47c9-a598-cd93dde96121\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0f0031f6a7c33c9d1c538ec8384083ab5bc7b7fcda9c01c8e5c922a7e375a7e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://44d5ca755969b2434115259f141a9f8f17da8e0c1bef5b61c48475e09dc91601\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://44d5ca755969b2434115259f141a9f8f17da8e0c1bef5b61c48475e09dc91601\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://18c881494d5238565dfdb69e61ecdebb725f60a14c0c651e0bcc65cb2aff297a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://18c881494d5238565dfdb69e61ecdebb725f60a14c0c651e0bcc65cb2aff297a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a13ad4683fb404bf7d973528489e3c39e33c2991d86a80875dab067023626584\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a13ad4683fb404bf7d973528489e3c39e33c2991d86a80875dab067023626584\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b84c7832c1d1df15202c858e63e3d97319f2014f413f98a9d332a3f89a11210\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b84c7832c1d1df15202c858e63e3d97319f2014f413f98a9d332a3f89a11210\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb6df865673b8073f0fa13525833ae978ff17856a9b4149d549e177bc5d9a494\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bb6df865673b8073f0fa13525833ae978ff17856a9b4149d549e177bc5d9a494\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8a6eb6e7ccd64f8d181fe104b44a4299f9b28d0409e46175da23144ef88e4129\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8a6eb6e7ccd64f8d181fe104b44a4299f9b28d0409e46175da23144ef88e4129\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-cwhxk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:28Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:28 crc kubenswrapper[4881]: I1211 08:16:28.497319 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"56d69133-af36-4cbd-af7d-3a58cc4dd8ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b73c9948845a28d7c3cb81e94d962a4f11ac061f4108a18f46d451a554e9adb7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7rwh4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b0321657968185975569a4bf8037ab956988b13493feec0be9c439fbe6c20707\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7rwh4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:21Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-z9nnh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:28Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:28 crc kubenswrapper[4881]: I1211 08:16:28.511037 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:28Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:28 crc kubenswrapper[4881]: I1211 08:16:28.525995 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6ec4e8a28a722463111eeb961e6e80d1f8ef1b776c40fc212a2ca5aaf0c43b63\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6eff6a04afb607c6248352793d1205ae80c194a77222a234d586925a8ca8cd00\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:28Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:28 crc kubenswrapper[4881]: I1211 08:16:28.547669 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-847k7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1711214f-683a-4a2e-b9b8-f3fd2dd6dbc2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0ad11e04da8a6d33e46ca8c5f28be920d52c7c2ce22b4539fc8b7b0b85edb425\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2txkz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:20Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-847k7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:28Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:28 crc kubenswrapper[4881]: I1211 08:16:28.557163 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-jzsv5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aacce2d2-7fd3-439a-b46b-51858d3de240\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6277956e6d4873b4105ce03e358e2857371feead7eb74e2b7412ad7ed4de486\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tlbfj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:24Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-jzsv5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:28Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:28 crc kubenswrapper[4881]: I1211 08:16:28.566662 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:28 crc kubenswrapper[4881]: I1211 08:16:28.566709 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:28 crc kubenswrapper[4881]: I1211 08:16:28.566720 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:28 crc kubenswrapper[4881]: I1211 08:16:28.566738 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:28 crc kubenswrapper[4881]: I1211 08:16:28.566751 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:28Z","lastTransitionTime":"2025-12-11T08:16:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:28 crc kubenswrapper[4881]: I1211 08:16:28.568570 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e233ddb-7c1f-4b3d-a111-de8cdc7ccd18\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://221f8c696ca3c5ab5dc2025890b5279fd406a8dfdd4ea482bb12f0c5d304255d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3dec421d26a9373c52bf2fb9434cc3f3b373f409b1d86ec1effca7534acb7262\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7cb4176d384609eb549c9f00057256bd34053ca3eca229c20cd250b93f87f910\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://91541795f1dd997a0d977904a6a3cbeae8b66b8bb57cde8dac344d3703352e1a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:15:53Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:28Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:28 crc kubenswrapper[4881]: I1211 08:16:28.580243 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:28Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:28 crc kubenswrapper[4881]: I1211 08:16:28.590892 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:20Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:20Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://35b9fdb7a609d6a955abda7e49e7bceec55811d33e14295ba7258c0e085cc517\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:28Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:28 crc kubenswrapper[4881]: I1211 08:16:28.607403 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wf8q8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f14cc110-e74f-4cb7-a998-041e3f9b537b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3fece2162ea78bf051c94f0f8adb0b194d05e5d3c5c25cb4e3674096d1204079\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3715d1eacbf7a843b7a1a0e83dee159ddbe5bf62812e94bf5f74f4677da2617b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c23a70dbe6b32be7c8a0c3799f7dd2323f0dcc86d7d58ee3e140cda4ffbb03f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6d8574de8e2431b2aca7954b9a0498e353c00d982b4384c1f198b2099fe527a6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5a0cb039f4c38da8b633dfcd2e0d23afbc3607b41f04d3c230aee5f86cba79b5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5b8e9dda68edf7ec8848da4eb851807ad0dc10a086fb6129e3a22f9a35e368ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0c9d06c04708f285b468dcbb0b0406b77e16ebe289cefb4cb0fb5d9dc84e3dc1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://88bd335385bc696076b9b2719bfbd27b294003397c545f24ee2c50cd854fcc28\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1794069e7175bc0ce9dc315807ad766fd28fb46e11ed6e7addc3199cccaea63a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1794069e7175bc0ce9dc315807ad766fd28fb46e11ed6e7addc3199cccaea63a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:21Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-wf8q8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:28Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:28 crc kubenswrapper[4881]: I1211 08:16:28.619238 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"712c62b1-8ccd-4aa3-bcf9-678e361454ec\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f4bc17ebdc46bdf12b24515e7a055043646bcea98fe47ea7f5c44e679d02bde9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://daf982782f40db65e6a33e1e1bcfd01493d5a385c52013546d38d921ac4dcad3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3ceeda38e245a549205af6a7f2468c682ca1420574f2d3b375dfba7d93a3c669\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://204b876ced5657df64e1a6074ba7ddbb3e286df0e12630d709ff64d2905b8487\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ff7ad52ec7f42bde82d0f5b9a6ef051a56c37004bb4172dbc504afde1e5f69bf\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-11T08:16:03Z\\\",\\\"message\\\":\\\"W1211 08:16:02.420586 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1211 08:16:02.420959 1 crypto.go:601] Generating new CA for check-endpoints-signer@1765440962 cert, and key in /tmp/serving-cert-2611071911/serving-signer.crt, /tmp/serving-cert-2611071911/serving-signer.key\\\\nI1211 08:16:03.011861 1 observer_polling.go:159] Starting file observer\\\\nW1211 08:16:03.024183 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1211 08:16:03.024367 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1211 08:16:03.024984 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2611071911/tls.crt::/tmp/serving-cert-2611071911/tls.key\\\\\\\"\\\\nF1211 08:16:03.405647 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:00Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6735b5cd08055deeae614855e5de8a056b276c8006a16c45f2a0fce97da172cb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:57Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70fb9b6123864b3652eabbb833b18032659a0c038f4e5857e7363a3776022396\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://70fb9b6123864b3652eabbb833b18032659a0c038f4e5857e7363a3776022396\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:15:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:15:53Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:28Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:28 crc kubenswrapper[4881]: I1211 08:16:28.635238 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://02d46e407feeba96230ff44b31f513e09434cdc3378124322398587ebda11b4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:28Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:28 crc kubenswrapper[4881]: I1211 08:16:28.669217 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:28 crc kubenswrapper[4881]: I1211 08:16:28.669290 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:28 crc kubenswrapper[4881]: I1211 08:16:28.669314 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:28 crc kubenswrapper[4881]: I1211 08:16:28.669380 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:28 crc kubenswrapper[4881]: I1211 08:16:28.669406 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:28Z","lastTransitionTime":"2025-12-11T08:16:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:28 crc kubenswrapper[4881]: I1211 08:16:28.772131 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:28 crc kubenswrapper[4881]: I1211 08:16:28.772205 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:28 crc kubenswrapper[4881]: I1211 08:16:28.772227 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:28 crc kubenswrapper[4881]: I1211 08:16:28.772257 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:28 crc kubenswrapper[4881]: I1211 08:16:28.772328 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:28Z","lastTransitionTime":"2025-12-11T08:16:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:28 crc kubenswrapper[4881]: I1211 08:16:28.875526 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:28 crc kubenswrapper[4881]: I1211 08:16:28.875582 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:28 crc kubenswrapper[4881]: I1211 08:16:28.875603 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:28 crc kubenswrapper[4881]: I1211 08:16:28.875631 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:28 crc kubenswrapper[4881]: I1211 08:16:28.875651 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:28Z","lastTransitionTime":"2025-12-11T08:16:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:29 crc kubenswrapper[4881]: I1211 08:16:29.036006 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 11 08:16:29 crc kubenswrapper[4881]: I1211 08:16:29.036038 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 11 08:16:29 crc kubenswrapper[4881]: I1211 08:16:29.036088 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:29 crc kubenswrapper[4881]: I1211 08:16:29.036127 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:29 crc kubenswrapper[4881]: I1211 08:16:29.036141 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:29 crc kubenswrapper[4881]: I1211 08:16:29.036169 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:29 crc kubenswrapper[4881]: I1211 08:16:29.036182 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:29Z","lastTransitionTime":"2025-12-11T08:16:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:29 crc kubenswrapper[4881]: E1211 08:16:29.036322 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 11 08:16:29 crc kubenswrapper[4881]: E1211 08:16:29.039123 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 11 08:16:29 crc kubenswrapper[4881]: I1211 08:16:29.139527 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:29 crc kubenswrapper[4881]: I1211 08:16:29.139567 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:29 crc kubenswrapper[4881]: I1211 08:16:29.139576 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:29 crc kubenswrapper[4881]: I1211 08:16:29.139591 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:29 crc kubenswrapper[4881]: I1211 08:16:29.139600 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:29Z","lastTransitionTime":"2025-12-11T08:16:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:29 crc kubenswrapper[4881]: I1211 08:16:29.242284 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:29 crc kubenswrapper[4881]: I1211 08:16:29.242357 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:29 crc kubenswrapper[4881]: I1211 08:16:29.242374 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:29 crc kubenswrapper[4881]: I1211 08:16:29.242393 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:29 crc kubenswrapper[4881]: I1211 08:16:29.242405 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:29Z","lastTransitionTime":"2025-12-11T08:16:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:29 crc kubenswrapper[4881]: I1211 08:16:29.345408 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:29 crc kubenswrapper[4881]: I1211 08:16:29.345942 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:29 crc kubenswrapper[4881]: I1211 08:16:29.346010 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:29 crc kubenswrapper[4881]: I1211 08:16:29.346080 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:29 crc kubenswrapper[4881]: I1211 08:16:29.346142 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:29Z","lastTransitionTime":"2025-12-11T08:16:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:29 crc kubenswrapper[4881]: I1211 08:16:29.448933 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:29 crc kubenswrapper[4881]: I1211 08:16:29.448993 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:29 crc kubenswrapper[4881]: I1211 08:16:29.449016 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:29 crc kubenswrapper[4881]: I1211 08:16:29.449045 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:29 crc kubenswrapper[4881]: I1211 08:16:29.449065 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:29Z","lastTransitionTime":"2025-12-11T08:16:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:29 crc kubenswrapper[4881]: I1211 08:16:29.551591 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:29 crc kubenswrapper[4881]: I1211 08:16:29.551620 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:29 crc kubenswrapper[4881]: I1211 08:16:29.551629 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:29 crc kubenswrapper[4881]: I1211 08:16:29.551642 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:29 crc kubenswrapper[4881]: I1211 08:16:29.551650 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:29Z","lastTransitionTime":"2025-12-11T08:16:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:29 crc kubenswrapper[4881]: I1211 08:16:29.654137 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:29 crc kubenswrapper[4881]: I1211 08:16:29.654215 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:29 crc kubenswrapper[4881]: I1211 08:16:29.654240 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:29 crc kubenswrapper[4881]: I1211 08:16:29.654274 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:29 crc kubenswrapper[4881]: I1211 08:16:29.654297 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:29Z","lastTransitionTime":"2025-12-11T08:16:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:29 crc kubenswrapper[4881]: I1211 08:16:29.757557 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:29 crc kubenswrapper[4881]: I1211 08:16:29.757617 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:29 crc kubenswrapper[4881]: I1211 08:16:29.757634 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:29 crc kubenswrapper[4881]: I1211 08:16:29.757662 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:29 crc kubenswrapper[4881]: I1211 08:16:29.757680 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:29Z","lastTransitionTime":"2025-12-11T08:16:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:29 crc kubenswrapper[4881]: I1211 08:16:29.861495 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:29 crc kubenswrapper[4881]: I1211 08:16:29.861560 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:29 crc kubenswrapper[4881]: I1211 08:16:29.861576 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:29 crc kubenswrapper[4881]: I1211 08:16:29.861600 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:29 crc kubenswrapper[4881]: I1211 08:16:29.861616 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:29Z","lastTransitionTime":"2025-12-11T08:16:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:29 crc kubenswrapper[4881]: I1211 08:16:29.964144 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:29 crc kubenswrapper[4881]: I1211 08:16:29.964199 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:29 crc kubenswrapper[4881]: I1211 08:16:29.964215 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:29 crc kubenswrapper[4881]: I1211 08:16:29.964240 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:29 crc kubenswrapper[4881]: I1211 08:16:29.964257 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:29Z","lastTransitionTime":"2025-12-11T08:16:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:30 crc kubenswrapper[4881]: I1211 08:16:30.004964 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 11 08:16:30 crc kubenswrapper[4881]: E1211 08:16:30.005207 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 11 08:16:30 crc kubenswrapper[4881]: I1211 08:16:30.067090 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:30 crc kubenswrapper[4881]: I1211 08:16:30.067174 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:30 crc kubenswrapper[4881]: I1211 08:16:30.067196 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:30 crc kubenswrapper[4881]: I1211 08:16:30.067222 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:30 crc kubenswrapper[4881]: I1211 08:16:30.067240 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:30Z","lastTransitionTime":"2025-12-11T08:16:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:30 crc kubenswrapper[4881]: I1211 08:16:30.170021 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:30 crc kubenswrapper[4881]: I1211 08:16:30.170054 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:30 crc kubenswrapper[4881]: I1211 08:16:30.170065 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:30 crc kubenswrapper[4881]: I1211 08:16:30.170083 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:30 crc kubenswrapper[4881]: I1211 08:16:30.170095 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:30Z","lastTransitionTime":"2025-12-11T08:16:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:30 crc kubenswrapper[4881]: I1211 08:16:30.271777 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:30 crc kubenswrapper[4881]: I1211 08:16:30.271817 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:30 crc kubenswrapper[4881]: I1211 08:16:30.271829 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:30 crc kubenswrapper[4881]: I1211 08:16:30.271843 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:30 crc kubenswrapper[4881]: I1211 08:16:30.271855 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:30Z","lastTransitionTime":"2025-12-11T08:16:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:30 crc kubenswrapper[4881]: I1211 08:16:30.377760 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:30 crc kubenswrapper[4881]: I1211 08:16:30.377824 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:30 crc kubenswrapper[4881]: I1211 08:16:30.377842 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:30 crc kubenswrapper[4881]: I1211 08:16:30.377865 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:30 crc kubenswrapper[4881]: I1211 08:16:30.377897 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:30Z","lastTransitionTime":"2025-12-11T08:16:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:30 crc kubenswrapper[4881]: I1211 08:16:30.480255 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:30 crc kubenswrapper[4881]: I1211 08:16:30.480289 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:30 crc kubenswrapper[4881]: I1211 08:16:30.480300 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:30 crc kubenswrapper[4881]: I1211 08:16:30.480316 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:30 crc kubenswrapper[4881]: I1211 08:16:30.480326 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:30Z","lastTransitionTime":"2025-12-11T08:16:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:30 crc kubenswrapper[4881]: I1211 08:16:30.583056 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:30 crc kubenswrapper[4881]: I1211 08:16:30.583094 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:30 crc kubenswrapper[4881]: I1211 08:16:30.583103 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:30 crc kubenswrapper[4881]: I1211 08:16:30.583118 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:30 crc kubenswrapper[4881]: I1211 08:16:30.583128 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:30Z","lastTransitionTime":"2025-12-11T08:16:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:30 crc kubenswrapper[4881]: I1211 08:16:30.685776 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:30 crc kubenswrapper[4881]: I1211 08:16:30.685817 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:30 crc kubenswrapper[4881]: I1211 08:16:30.685827 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:30 crc kubenswrapper[4881]: I1211 08:16:30.685843 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:30 crc kubenswrapper[4881]: I1211 08:16:30.685853 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:30Z","lastTransitionTime":"2025-12-11T08:16:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:30 crc kubenswrapper[4881]: I1211 08:16:30.788453 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:30 crc kubenswrapper[4881]: I1211 08:16:30.788500 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:30 crc kubenswrapper[4881]: I1211 08:16:30.788509 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:30 crc kubenswrapper[4881]: I1211 08:16:30.788526 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:30 crc kubenswrapper[4881]: I1211 08:16:30.788538 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:30Z","lastTransitionTime":"2025-12-11T08:16:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:30 crc kubenswrapper[4881]: I1211 08:16:30.891758 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:30 crc kubenswrapper[4881]: I1211 08:16:30.891834 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:30 crc kubenswrapper[4881]: I1211 08:16:30.891856 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:30 crc kubenswrapper[4881]: I1211 08:16:30.891886 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:30 crc kubenswrapper[4881]: I1211 08:16:30.891926 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:30Z","lastTransitionTime":"2025-12-11T08:16:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:30 crc kubenswrapper[4881]: I1211 08:16:30.994425 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:30 crc kubenswrapper[4881]: I1211 08:16:30.994485 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:30 crc kubenswrapper[4881]: I1211 08:16:30.994494 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:30 crc kubenswrapper[4881]: I1211 08:16:30.994521 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:30 crc kubenswrapper[4881]: I1211 08:16:30.994530 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:30Z","lastTransitionTime":"2025-12-11T08:16:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:31 crc kubenswrapper[4881]: I1211 08:16:31.004778 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 11 08:16:31 crc kubenswrapper[4881]: I1211 08:16:31.004875 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 11 08:16:31 crc kubenswrapper[4881]: E1211 08:16:31.004920 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 11 08:16:31 crc kubenswrapper[4881]: E1211 08:16:31.005028 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 11 08:16:31 crc kubenswrapper[4881]: I1211 08:16:31.097731 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:31 crc kubenswrapper[4881]: I1211 08:16:31.097787 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:31 crc kubenswrapper[4881]: I1211 08:16:31.097810 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:31 crc kubenswrapper[4881]: I1211 08:16:31.097836 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:31 crc kubenswrapper[4881]: I1211 08:16:31.097853 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:31Z","lastTransitionTime":"2025-12-11T08:16:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:31 crc kubenswrapper[4881]: I1211 08:16:31.201428 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:31 crc kubenswrapper[4881]: I1211 08:16:31.201482 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:31 crc kubenswrapper[4881]: I1211 08:16:31.201491 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:31 crc kubenswrapper[4881]: I1211 08:16:31.201516 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:31 crc kubenswrapper[4881]: I1211 08:16:31.201536 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:31Z","lastTransitionTime":"2025-12-11T08:16:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:31 crc kubenswrapper[4881]: I1211 08:16:31.304767 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:31 crc kubenswrapper[4881]: I1211 08:16:31.304810 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:31 crc kubenswrapper[4881]: I1211 08:16:31.304822 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:31 crc kubenswrapper[4881]: I1211 08:16:31.304839 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:31 crc kubenswrapper[4881]: I1211 08:16:31.304849 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:31Z","lastTransitionTime":"2025-12-11T08:16:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:31 crc kubenswrapper[4881]: I1211 08:16:31.407799 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:31 crc kubenswrapper[4881]: I1211 08:16:31.407847 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:31 crc kubenswrapper[4881]: I1211 08:16:31.407863 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:31 crc kubenswrapper[4881]: I1211 08:16:31.407886 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:31 crc kubenswrapper[4881]: I1211 08:16:31.407903 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:31Z","lastTransitionTime":"2025-12-11T08:16:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:31 crc kubenswrapper[4881]: I1211 08:16:31.510039 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:31 crc kubenswrapper[4881]: I1211 08:16:31.510073 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:31 crc kubenswrapper[4881]: I1211 08:16:31.510082 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:31 crc kubenswrapper[4881]: I1211 08:16:31.510094 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:31 crc kubenswrapper[4881]: I1211 08:16:31.510118 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:31Z","lastTransitionTime":"2025-12-11T08:16:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:31 crc kubenswrapper[4881]: I1211 08:16:31.612690 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:31 crc kubenswrapper[4881]: I1211 08:16:31.612748 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:31 crc kubenswrapper[4881]: I1211 08:16:31.612757 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:31 crc kubenswrapper[4881]: I1211 08:16:31.612834 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:31 crc kubenswrapper[4881]: I1211 08:16:31.612871 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:31Z","lastTransitionTime":"2025-12-11T08:16:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:31 crc kubenswrapper[4881]: I1211 08:16:31.715948 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:31 crc kubenswrapper[4881]: I1211 08:16:31.716002 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:31 crc kubenswrapper[4881]: I1211 08:16:31.716011 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:31 crc kubenswrapper[4881]: I1211 08:16:31.716028 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:31 crc kubenswrapper[4881]: I1211 08:16:31.716037 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:31Z","lastTransitionTime":"2025-12-11T08:16:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:31 crc kubenswrapper[4881]: I1211 08:16:31.819595 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:31 crc kubenswrapper[4881]: I1211 08:16:31.819679 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:31 crc kubenswrapper[4881]: I1211 08:16:31.819700 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:31 crc kubenswrapper[4881]: I1211 08:16:31.819725 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:31 crc kubenswrapper[4881]: I1211 08:16:31.819742 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:31Z","lastTransitionTime":"2025-12-11T08:16:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:31 crc kubenswrapper[4881]: I1211 08:16:31.922300 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:31 crc kubenswrapper[4881]: I1211 08:16:31.922406 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:31 crc kubenswrapper[4881]: I1211 08:16:31.922425 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:31 crc kubenswrapper[4881]: I1211 08:16:31.922488 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:31 crc kubenswrapper[4881]: I1211 08:16:31.922508 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:31Z","lastTransitionTime":"2025-12-11T08:16:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:32 crc kubenswrapper[4881]: I1211 08:16:32.004468 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 11 08:16:32 crc kubenswrapper[4881]: E1211 08:16:32.004623 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 11 08:16:32 crc kubenswrapper[4881]: I1211 08:16:32.026358 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:32 crc kubenswrapper[4881]: I1211 08:16:32.026421 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:32 crc kubenswrapper[4881]: I1211 08:16:32.026431 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:32 crc kubenswrapper[4881]: I1211 08:16:32.026452 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:32 crc kubenswrapper[4881]: I1211 08:16:32.026465 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:32Z","lastTransitionTime":"2025-12-11T08:16:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:32 crc kubenswrapper[4881]: I1211 08:16:32.130071 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:32 crc kubenswrapper[4881]: I1211 08:16:32.130118 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:32 crc kubenswrapper[4881]: I1211 08:16:32.130178 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:32 crc kubenswrapper[4881]: I1211 08:16:32.130199 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:32 crc kubenswrapper[4881]: I1211 08:16:32.130211 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:32Z","lastTransitionTime":"2025-12-11T08:16:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:32 crc kubenswrapper[4881]: I1211 08:16:32.233817 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:32 crc kubenswrapper[4881]: I1211 08:16:32.233909 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:32 crc kubenswrapper[4881]: I1211 08:16:32.233934 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:32 crc kubenswrapper[4881]: I1211 08:16:32.233965 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:32 crc kubenswrapper[4881]: I1211 08:16:32.233988 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:32Z","lastTransitionTime":"2025-12-11T08:16:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:32 crc kubenswrapper[4881]: I1211 08:16:32.275631 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-wf8q8_f14cc110-e74f-4cb7-a998-041e3f9b537b/ovnkube-controller/0.log" Dec 11 08:16:32 crc kubenswrapper[4881]: I1211 08:16:32.281115 4881 generic.go:334] "Generic (PLEG): container finished" podID="f14cc110-e74f-4cb7-a998-041e3f9b537b" containerID="0c9d06c04708f285b468dcbb0b0406b77e16ebe289cefb4cb0fb5d9dc84e3dc1" exitCode=1 Dec 11 08:16:32 crc kubenswrapper[4881]: I1211 08:16:32.281188 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wf8q8" event={"ID":"f14cc110-e74f-4cb7-a998-041e3f9b537b","Type":"ContainerDied","Data":"0c9d06c04708f285b468dcbb0b0406b77e16ebe289cefb4cb0fb5d9dc84e3dc1"} Dec 11 08:16:32 crc kubenswrapper[4881]: I1211 08:16:32.282308 4881 scope.go:117] "RemoveContainer" containerID="0c9d06c04708f285b468dcbb0b0406b77e16ebe289cefb4cb0fb5d9dc84e3dc1" Dec 11 08:16:32 crc kubenswrapper[4881]: I1211 08:16:32.298262 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:32Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:32 crc kubenswrapper[4881]: I1211 08:16:32.317910 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-g8jhd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"368e635e-0e63-4202-b9e4-4a3a85c6f30c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f17445543c4ae20340b00c834ecfa381b0d9624196a6da869ea823ee99df132f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zrsnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-g8jhd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:32Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:32 crc kubenswrapper[4881]: I1211 08:16:32.334480 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-cwhxk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"afd0cc21-e31c-47c9-a598-cd93dde96121\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0f0031f6a7c33c9d1c538ec8384083ab5bc7b7fcda9c01c8e5c922a7e375a7e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://44d5ca755969b2434115259f141a9f8f17da8e0c1bef5b61c48475e09dc91601\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://44d5ca755969b2434115259f141a9f8f17da8e0c1bef5b61c48475e09dc91601\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://18c881494d5238565dfdb69e61ecdebb725f60a14c0c651e0bcc65cb2aff297a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://18c881494d5238565dfdb69e61ecdebb725f60a14c0c651e0bcc65cb2aff297a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a13ad4683fb404bf7d973528489e3c39e33c2991d86a80875dab067023626584\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a13ad4683fb404bf7d973528489e3c39e33c2991d86a80875dab067023626584\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b84c7832c1d1df15202c858e63e3d97319f2014f413f98a9d332a3f89a11210\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b84c7832c1d1df15202c858e63e3d97319f2014f413f98a9d332a3f89a11210\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb6df865673b8073f0fa13525833ae978ff17856a9b4149d549e177bc5d9a494\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bb6df865673b8073f0fa13525833ae978ff17856a9b4149d549e177bc5d9a494\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8a6eb6e7ccd64f8d181fe104b44a4299f9b28d0409e46175da23144ef88e4129\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8a6eb6e7ccd64f8d181fe104b44a4299f9b28d0409e46175da23144ef88e4129\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-cwhxk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:32Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:32 crc kubenswrapper[4881]: I1211 08:16:32.336510 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:32 crc kubenswrapper[4881]: I1211 08:16:32.336567 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:32 crc kubenswrapper[4881]: I1211 08:16:32.336585 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:32 crc kubenswrapper[4881]: I1211 08:16:32.336605 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:32 crc kubenswrapper[4881]: I1211 08:16:32.336620 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:32Z","lastTransitionTime":"2025-12-11T08:16:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:32 crc kubenswrapper[4881]: I1211 08:16:32.354093 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"56d69133-af36-4cbd-af7d-3a58cc4dd8ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b73c9948845a28d7c3cb81e94d962a4f11ac061f4108a18f46d451a554e9adb7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7rwh4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b0321657968185975569a4bf8037ab956988b13493feec0be9c439fbe6c20707\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7rwh4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:21Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-z9nnh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:32Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:32 crc kubenswrapper[4881]: I1211 08:16:32.369031 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:32Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:32 crc kubenswrapper[4881]: I1211 08:16:32.383175 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6ec4e8a28a722463111eeb961e6e80d1f8ef1b776c40fc212a2ca5aaf0c43b63\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6eff6a04afb607c6248352793d1205ae80c194a77222a234d586925a8ca8cd00\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:32Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:32 crc kubenswrapper[4881]: I1211 08:16:32.394008 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-847k7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1711214f-683a-4a2e-b9b8-f3fd2dd6dbc2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0ad11e04da8a6d33e46ca8c5f28be920d52c7c2ce22b4539fc8b7b0b85edb425\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2txkz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:20Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-847k7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:32Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:32 crc kubenswrapper[4881]: I1211 08:16:32.404494 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-jzsv5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aacce2d2-7fd3-439a-b46b-51858d3de240\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6277956e6d4873b4105ce03e358e2857371feead7eb74e2b7412ad7ed4de486\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tlbfj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:24Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-jzsv5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:32Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:32 crc kubenswrapper[4881]: I1211 08:16:32.417485 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e233ddb-7c1f-4b3d-a111-de8cdc7ccd18\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://221f8c696ca3c5ab5dc2025890b5279fd406a8dfdd4ea482bb12f0c5d304255d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3dec421d26a9373c52bf2fb9434cc3f3b373f409b1d86ec1effca7534acb7262\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7cb4176d384609eb549c9f00057256bd34053ca3eca229c20cd250b93f87f910\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://91541795f1dd997a0d977904a6a3cbeae8b66b8bb57cde8dac344d3703352e1a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:15:53Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:32Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:32 crc kubenswrapper[4881]: I1211 08:16:32.427407 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:32Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:32 crc kubenswrapper[4881]: I1211 08:16:32.440029 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:32 crc kubenswrapper[4881]: I1211 08:16:32.440125 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:32 crc kubenswrapper[4881]: I1211 08:16:32.440144 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:32 crc kubenswrapper[4881]: I1211 08:16:32.440165 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:32 crc kubenswrapper[4881]: I1211 08:16:32.440183 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:32Z","lastTransitionTime":"2025-12-11T08:16:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:32 crc kubenswrapper[4881]: I1211 08:16:32.442762 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:20Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:20Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://35b9fdb7a609d6a955abda7e49e7bceec55811d33e14295ba7258c0e085cc517\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:32Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:32 crc kubenswrapper[4881]: I1211 08:16:32.468254 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wf8q8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f14cc110-e74f-4cb7-a998-041e3f9b537b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3fece2162ea78bf051c94f0f8adb0b194d05e5d3c5c25cb4e3674096d1204079\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3715d1eacbf7a843b7a1a0e83dee159ddbe5bf62812e94bf5f74f4677da2617b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c23a70dbe6b32be7c8a0c3799f7dd2323f0dcc86d7d58ee3e140cda4ffbb03f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6d8574de8e2431b2aca7954b9a0498e353c00d982b4384c1f198b2099fe527a6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5a0cb039f4c38da8b633dfcd2e0d23afbc3607b41f04d3c230aee5f86cba79b5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5b8e9dda68edf7ec8848da4eb851807ad0dc10a086fb6129e3a22f9a35e368ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0c9d06c04708f285b468dcbb0b0406b77e16ebe289cefb4cb0fb5d9dc84e3dc1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0c9d06c04708f285b468dcbb0b0406b77e16ebe289cefb4cb0fb5d9dc84e3dc1\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-11T08:16:31Z\\\",\\\"message\\\":\\\" (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1211 08:16:31.679255 6216 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1211 08:16:31.679430 6216 reflector.go:311] Stopping reflector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1211 08:16:31.679547 6216 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1211 08:16:31.679591 6216 reflector.go:311] Stopping reflector *v1.EgressIP (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressip/v1/apis/informers/externalversions/factory.go:140\\\\nI1211 08:16:31.679687 6216 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1211 08:16:31.679942 6216 reflector.go:311] Stopping reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1211 08:16:31.680464 6216 reflector.go:311] Stopping reflector *v1.Pod (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1211 08:16:31.680515 6216 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1211 08:16:31.680865 6216 factory.go:656] Stopping \\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://88bd335385bc696076b9b2719bfbd27b294003397c545f24ee2c50cd854fcc28\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1794069e7175bc0ce9dc315807ad766fd28fb46e11ed6e7addc3199cccaea63a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1794069e7175bc0ce9dc315807ad766fd28fb46e11ed6e7addc3199cccaea63a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:21Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-wf8q8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:32Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:32 crc kubenswrapper[4881]: I1211 08:16:32.489031 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"712c62b1-8ccd-4aa3-bcf9-678e361454ec\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f4bc17ebdc46bdf12b24515e7a055043646bcea98fe47ea7f5c44e679d02bde9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://daf982782f40db65e6a33e1e1bcfd01493d5a385c52013546d38d921ac4dcad3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3ceeda38e245a549205af6a7f2468c682ca1420574f2d3b375dfba7d93a3c669\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://204b876ced5657df64e1a6074ba7ddbb3e286df0e12630d709ff64d2905b8487\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ff7ad52ec7f42bde82d0f5b9a6ef051a56c37004bb4172dbc504afde1e5f69bf\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-11T08:16:03Z\\\",\\\"message\\\":\\\"W1211 08:16:02.420586 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1211 08:16:02.420959 1 crypto.go:601] Generating new CA for check-endpoints-signer@1765440962 cert, and key in /tmp/serving-cert-2611071911/serving-signer.crt, /tmp/serving-cert-2611071911/serving-signer.key\\\\nI1211 08:16:03.011861 1 observer_polling.go:159] Starting file observer\\\\nW1211 08:16:03.024183 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1211 08:16:03.024367 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1211 08:16:03.024984 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2611071911/tls.crt::/tmp/serving-cert-2611071911/tls.key\\\\\\\"\\\\nF1211 08:16:03.405647 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:00Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6735b5cd08055deeae614855e5de8a056b276c8006a16c45f2a0fce97da172cb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:57Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70fb9b6123864b3652eabbb833b18032659a0c038f4e5857e7363a3776022396\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://70fb9b6123864b3652eabbb833b18032659a0c038f4e5857e7363a3776022396\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:15:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:15:53Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:32Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:32 crc kubenswrapper[4881]: I1211 08:16:32.507061 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://02d46e407feeba96230ff44b31f513e09434cdc3378124322398587ebda11b4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:32Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:32 crc kubenswrapper[4881]: I1211 08:16:32.543406 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:32 crc kubenswrapper[4881]: I1211 08:16:32.543449 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:32 crc kubenswrapper[4881]: I1211 08:16:32.543458 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:32 crc kubenswrapper[4881]: I1211 08:16:32.543472 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:32 crc kubenswrapper[4881]: I1211 08:16:32.543482 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:32Z","lastTransitionTime":"2025-12-11T08:16:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:32 crc kubenswrapper[4881]: I1211 08:16:32.646397 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:32 crc kubenswrapper[4881]: I1211 08:16:32.646446 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:32 crc kubenswrapper[4881]: I1211 08:16:32.646462 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:32 crc kubenswrapper[4881]: I1211 08:16:32.646480 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:32 crc kubenswrapper[4881]: I1211 08:16:32.646491 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:32Z","lastTransitionTime":"2025-12-11T08:16:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:32 crc kubenswrapper[4881]: I1211 08:16:32.701458 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 11 08:16:32 crc kubenswrapper[4881]: E1211 08:16:32.701745 4881 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-11 08:16:48.701709099 +0000 UTC m=+57.079077796 (durationBeforeRetry 16s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 08:16:32 crc kubenswrapper[4881]: I1211 08:16:32.749653 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:32 crc kubenswrapper[4881]: I1211 08:16:32.749704 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:32 crc kubenswrapper[4881]: I1211 08:16:32.749715 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:32 crc kubenswrapper[4881]: I1211 08:16:32.749733 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:32 crc kubenswrapper[4881]: I1211 08:16:32.749744 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:32Z","lastTransitionTime":"2025-12-11T08:16:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:32 crc kubenswrapper[4881]: I1211 08:16:32.802807 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 11 08:16:32 crc kubenswrapper[4881]: I1211 08:16:32.802855 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 11 08:16:32 crc kubenswrapper[4881]: I1211 08:16:32.802888 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 11 08:16:32 crc kubenswrapper[4881]: I1211 08:16:32.802908 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 11 08:16:32 crc kubenswrapper[4881]: E1211 08:16:32.802982 4881 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Dec 11 08:16:32 crc kubenswrapper[4881]: E1211 08:16:32.803061 4881 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Dec 11 08:16:32 crc kubenswrapper[4881]: E1211 08:16:32.803103 4881 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Dec 11 08:16:32 crc kubenswrapper[4881]: E1211 08:16:32.803125 4881 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 11 08:16:32 crc kubenswrapper[4881]: E1211 08:16:32.803061 4881 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Dec 11 08:16:32 crc kubenswrapper[4881]: E1211 08:16:32.803200 4881 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Dec 11 08:16:32 crc kubenswrapper[4881]: E1211 08:16:32.803214 4881 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 11 08:16:32 crc kubenswrapper[4881]: E1211 08:16:32.803079 4881 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-12-11 08:16:48.803060572 +0000 UTC m=+57.180429269 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Dec 11 08:16:32 crc kubenswrapper[4881]: E1211 08:16:32.802987 4881 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Dec 11 08:16:32 crc kubenswrapper[4881]: E1211 08:16:32.803315 4881 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-12-11 08:16:48.803283678 +0000 UTC m=+57.180652385 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 11 08:16:32 crc kubenswrapper[4881]: E1211 08:16:32.803352 4881 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-12-11 08:16:48.803326839 +0000 UTC m=+57.180695546 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 11 08:16:32 crc kubenswrapper[4881]: E1211 08:16:32.803385 4881 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-12-11 08:16:48.80337231 +0000 UTC m=+57.180741017 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Dec 11 08:16:32 crc kubenswrapper[4881]: I1211 08:16:32.857743 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:32 crc kubenswrapper[4881]: I1211 08:16:32.857815 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:32 crc kubenswrapper[4881]: I1211 08:16:32.857831 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:32 crc kubenswrapper[4881]: I1211 08:16:32.857864 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:32 crc kubenswrapper[4881]: I1211 08:16:32.857878 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:32Z","lastTransitionTime":"2025-12-11T08:16:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:32 crc kubenswrapper[4881]: I1211 08:16:32.961857 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:32 crc kubenswrapper[4881]: I1211 08:16:32.961919 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:32 crc kubenswrapper[4881]: I1211 08:16:32.961932 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:32 crc kubenswrapper[4881]: I1211 08:16:32.961955 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:32 crc kubenswrapper[4881]: I1211 08:16:32.961971 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:32Z","lastTransitionTime":"2025-12-11T08:16:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:33 crc kubenswrapper[4881]: I1211 08:16:33.004763 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 11 08:16:33 crc kubenswrapper[4881]: I1211 08:16:33.004840 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 11 08:16:33 crc kubenswrapper[4881]: E1211 08:16:33.005028 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 11 08:16:33 crc kubenswrapper[4881]: E1211 08:16:33.005183 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 11 08:16:33 crc kubenswrapper[4881]: I1211 08:16:33.025616 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:33Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:33 crc kubenswrapper[4881]: I1211 08:16:33.044097 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:20Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:20Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://35b9fdb7a609d6a955abda7e49e7bceec55811d33e14295ba7258c0e085cc517\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:33Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:33 crc kubenswrapper[4881]: I1211 08:16:33.064453 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:33 crc kubenswrapper[4881]: I1211 08:16:33.064492 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:33 crc kubenswrapper[4881]: I1211 08:16:33.064504 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:33 crc kubenswrapper[4881]: I1211 08:16:33.064521 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:33 crc kubenswrapper[4881]: I1211 08:16:33.064532 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:33Z","lastTransitionTime":"2025-12-11T08:16:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:33 crc kubenswrapper[4881]: I1211 08:16:33.065209 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wf8q8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f14cc110-e74f-4cb7-a998-041e3f9b537b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3fece2162ea78bf051c94f0f8adb0b194d05e5d3c5c25cb4e3674096d1204079\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3715d1eacbf7a843b7a1a0e83dee159ddbe5bf62812e94bf5f74f4677da2617b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c23a70dbe6b32be7c8a0c3799f7dd2323f0dcc86d7d58ee3e140cda4ffbb03f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6d8574de8e2431b2aca7954b9a0498e353c00d982b4384c1f198b2099fe527a6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5a0cb039f4c38da8b633dfcd2e0d23afbc3607b41f04d3c230aee5f86cba79b5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5b8e9dda68edf7ec8848da4eb851807ad0dc10a086fb6129e3a22f9a35e368ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0c9d06c04708f285b468dcbb0b0406b77e16ebe289cefb4cb0fb5d9dc84e3dc1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0c9d06c04708f285b468dcbb0b0406b77e16ebe289cefb4cb0fb5d9dc84e3dc1\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-11T08:16:31Z\\\",\\\"message\\\":\\\" (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1211 08:16:31.679255 6216 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1211 08:16:31.679430 6216 reflector.go:311] Stopping reflector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1211 08:16:31.679547 6216 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1211 08:16:31.679591 6216 reflector.go:311] Stopping reflector *v1.EgressIP (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressip/v1/apis/informers/externalversions/factory.go:140\\\\nI1211 08:16:31.679687 6216 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1211 08:16:31.679942 6216 reflector.go:311] Stopping reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1211 08:16:31.680464 6216 reflector.go:311] Stopping reflector *v1.Pod (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1211 08:16:31.680515 6216 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1211 08:16:31.680865 6216 factory.go:656] Stopping \\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://88bd335385bc696076b9b2719bfbd27b294003397c545f24ee2c50cd854fcc28\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1794069e7175bc0ce9dc315807ad766fd28fb46e11ed6e7addc3199cccaea63a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1794069e7175bc0ce9dc315807ad766fd28fb46e11ed6e7addc3199cccaea63a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:21Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-wf8q8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:33Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:33 crc kubenswrapper[4881]: I1211 08:16:33.079754 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"712c62b1-8ccd-4aa3-bcf9-678e361454ec\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f4bc17ebdc46bdf12b24515e7a055043646bcea98fe47ea7f5c44e679d02bde9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://daf982782f40db65e6a33e1e1bcfd01493d5a385c52013546d38d921ac4dcad3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3ceeda38e245a549205af6a7f2468c682ca1420574f2d3b375dfba7d93a3c669\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://204b876ced5657df64e1a6074ba7ddbb3e286df0e12630d709ff64d2905b8487\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ff7ad52ec7f42bde82d0f5b9a6ef051a56c37004bb4172dbc504afde1e5f69bf\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-11T08:16:03Z\\\",\\\"message\\\":\\\"W1211 08:16:02.420586 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1211 08:16:02.420959 1 crypto.go:601] Generating new CA for check-endpoints-signer@1765440962 cert, and key in /tmp/serving-cert-2611071911/serving-signer.crt, /tmp/serving-cert-2611071911/serving-signer.key\\\\nI1211 08:16:03.011861 1 observer_polling.go:159] Starting file observer\\\\nW1211 08:16:03.024183 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1211 08:16:03.024367 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1211 08:16:03.024984 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2611071911/tls.crt::/tmp/serving-cert-2611071911/tls.key\\\\\\\"\\\\nF1211 08:16:03.405647 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:00Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6735b5cd08055deeae614855e5de8a056b276c8006a16c45f2a0fce97da172cb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:57Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70fb9b6123864b3652eabbb833b18032659a0c038f4e5857e7363a3776022396\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://70fb9b6123864b3652eabbb833b18032659a0c038f4e5857e7363a3776022396\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:15:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:15:53Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:33Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:33 crc kubenswrapper[4881]: I1211 08:16:33.092201 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://02d46e407feeba96230ff44b31f513e09434cdc3378124322398587ebda11b4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:33Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:33 crc kubenswrapper[4881]: I1211 08:16:33.106734 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:33Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:33 crc kubenswrapper[4881]: I1211 08:16:33.120082 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-g8jhd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"368e635e-0e63-4202-b9e4-4a3a85c6f30c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f17445543c4ae20340b00c834ecfa381b0d9624196a6da869ea823ee99df132f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zrsnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-g8jhd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:33Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:33 crc kubenswrapper[4881]: I1211 08:16:33.137517 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-cwhxk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"afd0cc21-e31c-47c9-a598-cd93dde96121\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0f0031f6a7c33c9d1c538ec8384083ab5bc7b7fcda9c01c8e5c922a7e375a7e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://44d5ca755969b2434115259f141a9f8f17da8e0c1bef5b61c48475e09dc91601\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://44d5ca755969b2434115259f141a9f8f17da8e0c1bef5b61c48475e09dc91601\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://18c881494d5238565dfdb69e61ecdebb725f60a14c0c651e0bcc65cb2aff297a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://18c881494d5238565dfdb69e61ecdebb725f60a14c0c651e0bcc65cb2aff297a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a13ad4683fb404bf7d973528489e3c39e33c2991d86a80875dab067023626584\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a13ad4683fb404bf7d973528489e3c39e33c2991d86a80875dab067023626584\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b84c7832c1d1df15202c858e63e3d97319f2014f413f98a9d332a3f89a11210\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b84c7832c1d1df15202c858e63e3d97319f2014f413f98a9d332a3f89a11210\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb6df865673b8073f0fa13525833ae978ff17856a9b4149d549e177bc5d9a494\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bb6df865673b8073f0fa13525833ae978ff17856a9b4149d549e177bc5d9a494\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8a6eb6e7ccd64f8d181fe104b44a4299f9b28d0409e46175da23144ef88e4129\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8a6eb6e7ccd64f8d181fe104b44a4299f9b28d0409e46175da23144ef88e4129\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-cwhxk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:33Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:33 crc kubenswrapper[4881]: I1211 08:16:33.151224 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"56d69133-af36-4cbd-af7d-3a58cc4dd8ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b73c9948845a28d7c3cb81e94d962a4f11ac061f4108a18f46d451a554e9adb7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7rwh4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b0321657968185975569a4bf8037ab956988b13493feec0be9c439fbe6c20707\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7rwh4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:21Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-z9nnh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:33Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:33 crc kubenswrapper[4881]: I1211 08:16:33.166004 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:33Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:33 crc kubenswrapper[4881]: I1211 08:16:33.166245 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:33 crc kubenswrapper[4881]: I1211 08:16:33.166299 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:33 crc kubenswrapper[4881]: I1211 08:16:33.166315 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:33 crc kubenswrapper[4881]: I1211 08:16:33.166360 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:33 crc kubenswrapper[4881]: I1211 08:16:33.166377 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:33Z","lastTransitionTime":"2025-12-11T08:16:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:33 crc kubenswrapper[4881]: I1211 08:16:33.181741 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6ec4e8a28a722463111eeb961e6e80d1f8ef1b776c40fc212a2ca5aaf0c43b63\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6eff6a04afb607c6248352793d1205ae80c194a77222a234d586925a8ca8cd00\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:33Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:33 crc kubenswrapper[4881]: I1211 08:16:33.195873 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-847k7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1711214f-683a-4a2e-b9b8-f3fd2dd6dbc2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0ad11e04da8a6d33e46ca8c5f28be920d52c7c2ce22b4539fc8b7b0b85edb425\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2txkz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:20Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-847k7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:33Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:33 crc kubenswrapper[4881]: I1211 08:16:33.208277 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-jzsv5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aacce2d2-7fd3-439a-b46b-51858d3de240\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6277956e6d4873b4105ce03e358e2857371feead7eb74e2b7412ad7ed4de486\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tlbfj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:24Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-jzsv5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:33Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:33 crc kubenswrapper[4881]: I1211 08:16:33.220737 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e233ddb-7c1f-4b3d-a111-de8cdc7ccd18\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://221f8c696ca3c5ab5dc2025890b5279fd406a8dfdd4ea482bb12f0c5d304255d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3dec421d26a9373c52bf2fb9434cc3f3b373f409b1d86ec1effca7534acb7262\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7cb4176d384609eb549c9f00057256bd34053ca3eca229c20cd250b93f87f910\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://91541795f1dd997a0d977904a6a3cbeae8b66b8bb57cde8dac344d3703352e1a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:15:53Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:33Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:33 crc kubenswrapper[4881]: I1211 08:16:33.269825 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:33 crc kubenswrapper[4881]: I1211 08:16:33.269883 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:33 crc kubenswrapper[4881]: I1211 08:16:33.269893 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:33 crc kubenswrapper[4881]: I1211 08:16:33.269913 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:33 crc kubenswrapper[4881]: I1211 08:16:33.269925 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:33Z","lastTransitionTime":"2025-12-11T08:16:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:33 crc kubenswrapper[4881]: I1211 08:16:33.286915 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-wf8q8_f14cc110-e74f-4cb7-a998-041e3f9b537b/ovnkube-controller/0.log" Dec 11 08:16:33 crc kubenswrapper[4881]: I1211 08:16:33.291469 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wf8q8" event={"ID":"f14cc110-e74f-4cb7-a998-041e3f9b537b","Type":"ContainerStarted","Data":"58ff0c7d416fda4200920825cedb24d309b747147b3e209166f99140da0e11d4"} Dec 11 08:16:33 crc kubenswrapper[4881]: I1211 08:16:33.291980 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-wf8q8" Dec 11 08:16:33 crc kubenswrapper[4881]: I1211 08:16:33.308524 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://02d46e407feeba96230ff44b31f513e09434cdc3378124322398587ebda11b4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:33Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:33 crc kubenswrapper[4881]: I1211 08:16:33.323304 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:33Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:33 crc kubenswrapper[4881]: I1211 08:16:33.335973 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:20Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:20Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://35b9fdb7a609d6a955abda7e49e7bceec55811d33e14295ba7258c0e085cc517\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:33Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:33 crc kubenswrapper[4881]: I1211 08:16:33.369573 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wf8q8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f14cc110-e74f-4cb7-a998-041e3f9b537b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3fece2162ea78bf051c94f0f8adb0b194d05e5d3c5c25cb4e3674096d1204079\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3715d1eacbf7a843b7a1a0e83dee159ddbe5bf62812e94bf5f74f4677da2617b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c23a70dbe6b32be7c8a0c3799f7dd2323f0dcc86d7d58ee3e140cda4ffbb03f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6d8574de8e2431b2aca7954b9a0498e353c00d982b4384c1f198b2099fe527a6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5a0cb039f4c38da8b633dfcd2e0d23afbc3607b41f04d3c230aee5f86cba79b5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5b8e9dda68edf7ec8848da4eb851807ad0dc10a086fb6129e3a22f9a35e368ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://58ff0c7d416fda4200920825cedb24d309b747147b3e209166f99140da0e11d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0c9d06c04708f285b468dcbb0b0406b77e16ebe289cefb4cb0fb5d9dc84e3dc1\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-11T08:16:31Z\\\",\\\"message\\\":\\\" (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1211 08:16:31.679255 6216 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1211 08:16:31.679430 6216 reflector.go:311] Stopping reflector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1211 08:16:31.679547 6216 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1211 08:16:31.679591 6216 reflector.go:311] Stopping reflector *v1.EgressIP (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressip/v1/apis/informers/externalversions/factory.go:140\\\\nI1211 08:16:31.679687 6216 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1211 08:16:31.679942 6216 reflector.go:311] Stopping reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1211 08:16:31.680464 6216 reflector.go:311] Stopping reflector *v1.Pod (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1211 08:16:31.680515 6216 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1211 08:16:31.680865 6216 factory.go:656] Stopping \\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:27Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://88bd335385bc696076b9b2719bfbd27b294003397c545f24ee2c50cd854fcc28\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1794069e7175bc0ce9dc315807ad766fd28fb46e11ed6e7addc3199cccaea63a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1794069e7175bc0ce9dc315807ad766fd28fb46e11ed6e7addc3199cccaea63a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:21Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-wf8q8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:33Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:33 crc kubenswrapper[4881]: I1211 08:16:33.372847 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:33 crc kubenswrapper[4881]: I1211 08:16:33.372896 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:33 crc kubenswrapper[4881]: I1211 08:16:33.372910 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:33 crc kubenswrapper[4881]: I1211 08:16:33.372931 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:33 crc kubenswrapper[4881]: I1211 08:16:33.372945 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:33Z","lastTransitionTime":"2025-12-11T08:16:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:33 crc kubenswrapper[4881]: I1211 08:16:33.392198 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"712c62b1-8ccd-4aa3-bcf9-678e361454ec\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f4bc17ebdc46bdf12b24515e7a055043646bcea98fe47ea7f5c44e679d02bde9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://daf982782f40db65e6a33e1e1bcfd01493d5a385c52013546d38d921ac4dcad3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3ceeda38e245a549205af6a7f2468c682ca1420574f2d3b375dfba7d93a3c669\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://204b876ced5657df64e1a6074ba7ddbb3e286df0e12630d709ff64d2905b8487\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ff7ad52ec7f42bde82d0f5b9a6ef051a56c37004bb4172dbc504afde1e5f69bf\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-11T08:16:03Z\\\",\\\"message\\\":\\\"W1211 08:16:02.420586 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1211 08:16:02.420959 1 crypto.go:601] Generating new CA for check-endpoints-signer@1765440962 cert, and key in /tmp/serving-cert-2611071911/serving-signer.crt, /tmp/serving-cert-2611071911/serving-signer.key\\\\nI1211 08:16:03.011861 1 observer_polling.go:159] Starting file observer\\\\nW1211 08:16:03.024183 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1211 08:16:03.024367 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1211 08:16:03.024984 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2611071911/tls.crt::/tmp/serving-cert-2611071911/tls.key\\\\\\\"\\\\nF1211 08:16:03.405647 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:00Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6735b5cd08055deeae614855e5de8a056b276c8006a16c45f2a0fce97da172cb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:57Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70fb9b6123864b3652eabbb833b18032659a0c038f4e5857e7363a3776022396\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://70fb9b6123864b3652eabbb833b18032659a0c038f4e5857e7363a3776022396\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:15:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:15:53Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:33Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:33 crc kubenswrapper[4881]: I1211 08:16:33.408794 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6ec4e8a28a722463111eeb961e6e80d1f8ef1b776c40fc212a2ca5aaf0c43b63\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6eff6a04afb607c6248352793d1205ae80c194a77222a234d586925a8ca8cd00\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:33Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:33 crc kubenswrapper[4881]: I1211 08:16:33.428558 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:33Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:33 crc kubenswrapper[4881]: I1211 08:16:33.444472 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-g8jhd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"368e635e-0e63-4202-b9e4-4a3a85c6f30c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f17445543c4ae20340b00c834ecfa381b0d9624196a6da869ea823ee99df132f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zrsnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-g8jhd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:33Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:33 crc kubenswrapper[4881]: I1211 08:16:33.470353 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-cwhxk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"afd0cc21-e31c-47c9-a598-cd93dde96121\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0f0031f6a7c33c9d1c538ec8384083ab5bc7b7fcda9c01c8e5c922a7e375a7e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://44d5ca755969b2434115259f141a9f8f17da8e0c1bef5b61c48475e09dc91601\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://44d5ca755969b2434115259f141a9f8f17da8e0c1bef5b61c48475e09dc91601\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://18c881494d5238565dfdb69e61ecdebb725f60a14c0c651e0bcc65cb2aff297a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://18c881494d5238565dfdb69e61ecdebb725f60a14c0c651e0bcc65cb2aff297a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a13ad4683fb404bf7d973528489e3c39e33c2991d86a80875dab067023626584\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a13ad4683fb404bf7d973528489e3c39e33c2991d86a80875dab067023626584\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b84c7832c1d1df15202c858e63e3d97319f2014f413f98a9d332a3f89a11210\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b84c7832c1d1df15202c858e63e3d97319f2014f413f98a9d332a3f89a11210\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb6df865673b8073f0fa13525833ae978ff17856a9b4149d549e177bc5d9a494\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bb6df865673b8073f0fa13525833ae978ff17856a9b4149d549e177bc5d9a494\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8a6eb6e7ccd64f8d181fe104b44a4299f9b28d0409e46175da23144ef88e4129\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8a6eb6e7ccd64f8d181fe104b44a4299f9b28d0409e46175da23144ef88e4129\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-cwhxk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:33Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:33 crc kubenswrapper[4881]: I1211 08:16:33.474995 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:33 crc kubenswrapper[4881]: I1211 08:16:33.475038 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:33 crc kubenswrapper[4881]: I1211 08:16:33.475052 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:33 crc kubenswrapper[4881]: I1211 08:16:33.475070 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:33 crc kubenswrapper[4881]: I1211 08:16:33.475082 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:33Z","lastTransitionTime":"2025-12-11T08:16:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:33 crc kubenswrapper[4881]: I1211 08:16:33.484609 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"56d69133-af36-4cbd-af7d-3a58cc4dd8ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b73c9948845a28d7c3cb81e94d962a4f11ac061f4108a18f46d451a554e9adb7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7rwh4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b0321657968185975569a4bf8037ab956988b13493feec0be9c439fbe6c20707\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7rwh4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:21Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-z9nnh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:33Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:33 crc kubenswrapper[4881]: I1211 08:16:33.498577 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:33Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:33 crc kubenswrapper[4881]: I1211 08:16:33.509399 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-847k7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1711214f-683a-4a2e-b9b8-f3fd2dd6dbc2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0ad11e04da8a6d33e46ca8c5f28be920d52c7c2ce22b4539fc8b7b0b85edb425\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2txkz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:20Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-847k7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:33Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:33 crc kubenswrapper[4881]: I1211 08:16:33.520996 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-jzsv5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aacce2d2-7fd3-439a-b46b-51858d3de240\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6277956e6d4873b4105ce03e358e2857371feead7eb74e2b7412ad7ed4de486\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tlbfj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:24Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-jzsv5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:33Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:33 crc kubenswrapper[4881]: I1211 08:16:33.535061 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e233ddb-7c1f-4b3d-a111-de8cdc7ccd18\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://221f8c696ca3c5ab5dc2025890b5279fd406a8dfdd4ea482bb12f0c5d304255d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3dec421d26a9373c52bf2fb9434cc3f3b373f409b1d86ec1effca7534acb7262\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7cb4176d384609eb549c9f00057256bd34053ca3eca229c20cd250b93f87f910\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://91541795f1dd997a0d977904a6a3cbeae8b66b8bb57cde8dac344d3703352e1a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:15:53Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:33Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:33 crc kubenswrapper[4881]: I1211 08:16:33.577411 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:33 crc kubenswrapper[4881]: I1211 08:16:33.577457 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:33 crc kubenswrapper[4881]: I1211 08:16:33.577466 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:33 crc kubenswrapper[4881]: I1211 08:16:33.577481 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:33 crc kubenswrapper[4881]: I1211 08:16:33.577491 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:33Z","lastTransitionTime":"2025-12-11T08:16:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:33 crc kubenswrapper[4881]: I1211 08:16:33.679719 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:33 crc kubenswrapper[4881]: I1211 08:16:33.679768 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:33 crc kubenswrapper[4881]: I1211 08:16:33.679781 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:33 crc kubenswrapper[4881]: I1211 08:16:33.679800 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:33 crc kubenswrapper[4881]: I1211 08:16:33.679846 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:33Z","lastTransitionTime":"2025-12-11T08:16:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:33 crc kubenswrapper[4881]: I1211 08:16:33.782449 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:33 crc kubenswrapper[4881]: I1211 08:16:33.782493 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:33 crc kubenswrapper[4881]: I1211 08:16:33.782506 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:33 crc kubenswrapper[4881]: I1211 08:16:33.782523 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:33 crc kubenswrapper[4881]: I1211 08:16:33.782535 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:33Z","lastTransitionTime":"2025-12-11T08:16:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:33 crc kubenswrapper[4881]: I1211 08:16:33.884898 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:33 crc kubenswrapper[4881]: I1211 08:16:33.884950 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:33 crc kubenswrapper[4881]: I1211 08:16:33.884969 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:33 crc kubenswrapper[4881]: I1211 08:16:33.884993 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:33 crc kubenswrapper[4881]: I1211 08:16:33.885009 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:33Z","lastTransitionTime":"2025-12-11T08:16:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:33 crc kubenswrapper[4881]: I1211 08:16:33.909107 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:33 crc kubenswrapper[4881]: I1211 08:16:33.909148 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:33 crc kubenswrapper[4881]: I1211 08:16:33.909156 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:33 crc kubenswrapper[4881]: I1211 08:16:33.909167 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:33 crc kubenswrapper[4881]: I1211 08:16:33.909175 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:33Z","lastTransitionTime":"2025-12-11T08:16:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:33 crc kubenswrapper[4881]: E1211 08:16:33.922839 4881 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:16:33Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:33Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:16:33Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:33Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:16:33Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:33Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:16:33Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:33Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"46a6300e-52c1-447e-8230-6662e62288c7\\\",\\\"systemUUID\\\":\\\"fece3d29-5045-4c4f-98be-52739a921bd2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:33Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:33 crc kubenswrapper[4881]: I1211 08:16:33.927427 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:33 crc kubenswrapper[4881]: I1211 08:16:33.927526 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:33 crc kubenswrapper[4881]: I1211 08:16:33.927584 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:33 crc kubenswrapper[4881]: I1211 08:16:33.927666 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:33 crc kubenswrapper[4881]: I1211 08:16:33.927740 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:33Z","lastTransitionTime":"2025-12-11T08:16:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:33 crc kubenswrapper[4881]: E1211 08:16:33.941062 4881 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:16:33Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:33Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:16:33Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:33Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:16:33Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:33Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:16:33Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:33Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"46a6300e-52c1-447e-8230-6662e62288c7\\\",\\\"systemUUID\\\":\\\"fece3d29-5045-4c4f-98be-52739a921bd2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:33Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:33 crc kubenswrapper[4881]: I1211 08:16:33.945154 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:33 crc kubenswrapper[4881]: I1211 08:16:33.945218 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:33 crc kubenswrapper[4881]: I1211 08:16:33.945240 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:33 crc kubenswrapper[4881]: I1211 08:16:33.945265 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:33 crc kubenswrapper[4881]: I1211 08:16:33.945285 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:33Z","lastTransitionTime":"2025-12-11T08:16:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:33 crc kubenswrapper[4881]: E1211 08:16:33.959188 4881 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:16:33Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:33Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:16:33Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:33Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:16:33Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:33Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:16:33Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:33Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"46a6300e-52c1-447e-8230-6662e62288c7\\\",\\\"systemUUID\\\":\\\"fece3d29-5045-4c4f-98be-52739a921bd2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:33Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:33 crc kubenswrapper[4881]: I1211 08:16:33.962817 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:33 crc kubenswrapper[4881]: I1211 08:16:33.962907 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:33 crc kubenswrapper[4881]: I1211 08:16:33.962965 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:33 crc kubenswrapper[4881]: I1211 08:16:33.963039 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:33 crc kubenswrapper[4881]: I1211 08:16:33.963125 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:33Z","lastTransitionTime":"2025-12-11T08:16:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:33 crc kubenswrapper[4881]: E1211 08:16:33.979818 4881 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:16:33Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:33Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:16:33Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:33Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:16:33Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:33Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:16:33Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:33Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"46a6300e-52c1-447e-8230-6662e62288c7\\\",\\\"systemUUID\\\":\\\"fece3d29-5045-4c4f-98be-52739a921bd2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:33Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:33 crc kubenswrapper[4881]: I1211 08:16:33.984389 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:33 crc kubenswrapper[4881]: I1211 08:16:33.984438 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:33 crc kubenswrapper[4881]: I1211 08:16:33.984452 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:33 crc kubenswrapper[4881]: I1211 08:16:33.984476 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:33 crc kubenswrapper[4881]: I1211 08:16:33.984497 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:33Z","lastTransitionTime":"2025-12-11T08:16:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:34 crc kubenswrapper[4881]: I1211 08:16:34.005238 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 11 08:16:34 crc kubenswrapper[4881]: E1211 08:16:34.005068 4881 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:16:33Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:33Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:16:33Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:33Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:16:33Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:33Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:16:33Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:33Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"46a6300e-52c1-447e-8230-6662e62288c7\\\",\\\"systemUUID\\\":\\\"fece3d29-5045-4c4f-98be-52739a921bd2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:34Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:34 crc kubenswrapper[4881]: E1211 08:16:34.005309 4881 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Dec 11 08:16:34 crc kubenswrapper[4881]: E1211 08:16:34.005434 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 11 08:16:34 crc kubenswrapper[4881]: I1211 08:16:34.009305 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:34 crc kubenswrapper[4881]: I1211 08:16:34.009415 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:34 crc kubenswrapper[4881]: I1211 08:16:34.009443 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:34 crc kubenswrapper[4881]: I1211 08:16:34.009478 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:34 crc kubenswrapper[4881]: I1211 08:16:34.009505 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:34Z","lastTransitionTime":"2025-12-11T08:16:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:34 crc kubenswrapper[4881]: I1211 08:16:34.113707 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:34 crc kubenswrapper[4881]: I1211 08:16:34.113771 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:34 crc kubenswrapper[4881]: I1211 08:16:34.113788 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:34 crc kubenswrapper[4881]: I1211 08:16:34.113860 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:34 crc kubenswrapper[4881]: I1211 08:16:34.113880 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:34Z","lastTransitionTime":"2025-12-11T08:16:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:34 crc kubenswrapper[4881]: I1211 08:16:34.217093 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:34 crc kubenswrapper[4881]: I1211 08:16:34.217163 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:34 crc kubenswrapper[4881]: I1211 08:16:34.217183 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:34 crc kubenswrapper[4881]: I1211 08:16:34.217210 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:34 crc kubenswrapper[4881]: I1211 08:16:34.217229 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:34Z","lastTransitionTime":"2025-12-11T08:16:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:34 crc kubenswrapper[4881]: I1211 08:16:34.320462 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:34 crc kubenswrapper[4881]: I1211 08:16:34.320523 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:34 crc kubenswrapper[4881]: I1211 08:16:34.320542 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:34 crc kubenswrapper[4881]: I1211 08:16:34.320568 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:34 crc kubenswrapper[4881]: I1211 08:16:34.320587 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:34Z","lastTransitionTime":"2025-12-11T08:16:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:34 crc kubenswrapper[4881]: I1211 08:16:34.423525 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:34 crc kubenswrapper[4881]: I1211 08:16:34.423579 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:34 crc kubenswrapper[4881]: I1211 08:16:34.423591 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:34 crc kubenswrapper[4881]: I1211 08:16:34.423612 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:34 crc kubenswrapper[4881]: I1211 08:16:34.423626 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:34Z","lastTransitionTime":"2025-12-11T08:16:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:34 crc kubenswrapper[4881]: I1211 08:16:34.494113 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-kqw5r"] Dec 11 08:16:34 crc kubenswrapper[4881]: I1211 08:16:34.494823 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-kqw5r" Dec 11 08:16:34 crc kubenswrapper[4881]: I1211 08:16:34.496934 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-control-plane-dockercfg-gs7dd" Dec 11 08:16:34 crc kubenswrapper[4881]: I1211 08:16:34.497437 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-control-plane-metrics-cert" Dec 11 08:16:34 crc kubenswrapper[4881]: I1211 08:16:34.519749 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e233ddb-7c1f-4b3d-a111-de8cdc7ccd18\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://221f8c696ca3c5ab5dc2025890b5279fd406a8dfdd4ea482bb12f0c5d304255d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3dec421d26a9373c52bf2fb9434cc3f3b373f409b1d86ec1effca7534acb7262\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7cb4176d384609eb549c9f00057256bd34053ca3eca229c20cd250b93f87f910\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://91541795f1dd997a0d977904a6a3cbeae8b66b8bb57cde8dac344d3703352e1a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:15:53Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:34Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:34 crc kubenswrapper[4881]: I1211 08:16:34.525717 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:34 crc kubenswrapper[4881]: I1211 08:16:34.525752 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:34 crc kubenswrapper[4881]: I1211 08:16:34.525763 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:34 crc kubenswrapper[4881]: I1211 08:16:34.525780 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:34 crc kubenswrapper[4881]: I1211 08:16:34.525793 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:34Z","lastTransitionTime":"2025-12-11T08:16:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:34 crc kubenswrapper[4881]: I1211 08:16:34.534945 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-847k7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1711214f-683a-4a2e-b9b8-f3fd2dd6dbc2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0ad11e04da8a6d33e46ca8c5f28be920d52c7c2ce22b4539fc8b7b0b85edb425\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2txkz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:20Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-847k7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:34Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:34 crc kubenswrapper[4881]: I1211 08:16:34.552998 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-jzsv5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aacce2d2-7fd3-439a-b46b-51858d3de240\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6277956e6d4873b4105ce03e358e2857371feead7eb74e2b7412ad7ed4de486\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tlbfj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:24Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-jzsv5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:34Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:34 crc kubenswrapper[4881]: I1211 08:16:34.576291 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wf8q8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f14cc110-e74f-4cb7-a998-041e3f9b537b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3fece2162ea78bf051c94f0f8adb0b194d05e5d3c5c25cb4e3674096d1204079\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3715d1eacbf7a843b7a1a0e83dee159ddbe5bf62812e94bf5f74f4677da2617b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c23a70dbe6b32be7c8a0c3799f7dd2323f0dcc86d7d58ee3e140cda4ffbb03f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6d8574de8e2431b2aca7954b9a0498e353c00d982b4384c1f198b2099fe527a6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5a0cb039f4c38da8b633dfcd2e0d23afbc3607b41f04d3c230aee5f86cba79b5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5b8e9dda68edf7ec8848da4eb851807ad0dc10a086fb6129e3a22f9a35e368ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://58ff0c7d416fda4200920825cedb24d309b747147b3e209166f99140da0e11d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0c9d06c04708f285b468dcbb0b0406b77e16ebe289cefb4cb0fb5d9dc84e3dc1\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-11T08:16:31Z\\\",\\\"message\\\":\\\" (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1211 08:16:31.679255 6216 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1211 08:16:31.679430 6216 reflector.go:311] Stopping reflector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1211 08:16:31.679547 6216 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1211 08:16:31.679591 6216 reflector.go:311] Stopping reflector *v1.EgressIP (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressip/v1/apis/informers/externalversions/factory.go:140\\\\nI1211 08:16:31.679687 6216 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1211 08:16:31.679942 6216 reflector.go:311] Stopping reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1211 08:16:31.680464 6216 reflector.go:311] Stopping reflector *v1.Pod (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1211 08:16:31.680515 6216 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1211 08:16:31.680865 6216 factory.go:656] Stopping \\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:27Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://88bd335385bc696076b9b2719bfbd27b294003397c545f24ee2c50cd854fcc28\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1794069e7175bc0ce9dc315807ad766fd28fb46e11ed6e7addc3199cccaea63a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1794069e7175bc0ce9dc315807ad766fd28fb46e11ed6e7addc3199cccaea63a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:21Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-wf8q8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:34Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:34 crc kubenswrapper[4881]: I1211 08:16:34.588798 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"712c62b1-8ccd-4aa3-bcf9-678e361454ec\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f4bc17ebdc46bdf12b24515e7a055043646bcea98fe47ea7f5c44e679d02bde9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://daf982782f40db65e6a33e1e1bcfd01493d5a385c52013546d38d921ac4dcad3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3ceeda38e245a549205af6a7f2468c682ca1420574f2d3b375dfba7d93a3c669\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://204b876ced5657df64e1a6074ba7ddbb3e286df0e12630d709ff64d2905b8487\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ff7ad52ec7f42bde82d0f5b9a6ef051a56c37004bb4172dbc504afde1e5f69bf\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-11T08:16:03Z\\\",\\\"message\\\":\\\"W1211 08:16:02.420586 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1211 08:16:02.420959 1 crypto.go:601] Generating new CA for check-endpoints-signer@1765440962 cert, and key in /tmp/serving-cert-2611071911/serving-signer.crt, /tmp/serving-cert-2611071911/serving-signer.key\\\\nI1211 08:16:03.011861 1 observer_polling.go:159] Starting file observer\\\\nW1211 08:16:03.024183 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1211 08:16:03.024367 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1211 08:16:03.024984 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2611071911/tls.crt::/tmp/serving-cert-2611071911/tls.key\\\\\\\"\\\\nF1211 08:16:03.405647 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:00Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6735b5cd08055deeae614855e5de8a056b276c8006a16c45f2a0fce97da172cb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:57Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70fb9b6123864b3652eabbb833b18032659a0c038f4e5857e7363a3776022396\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://70fb9b6123864b3652eabbb833b18032659a0c038f4e5857e7363a3776022396\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:15:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:15:53Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:34Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:34 crc kubenswrapper[4881]: I1211 08:16:34.602150 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://02d46e407feeba96230ff44b31f513e09434cdc3378124322398587ebda11b4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:34Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:34 crc kubenswrapper[4881]: I1211 08:16:34.613888 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:34Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:34 crc kubenswrapper[4881]: I1211 08:16:34.621742 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/71774618-27c8-499d-83d9-e88693c86758-env-overrides\") pod \"ovnkube-control-plane-749d76644c-kqw5r\" (UID: \"71774618-27c8-499d-83d9-e88693c86758\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-kqw5r" Dec 11 08:16:34 crc kubenswrapper[4881]: I1211 08:16:34.621768 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/71774618-27c8-499d-83d9-e88693c86758-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-kqw5r\" (UID: \"71774618-27c8-499d-83d9-e88693c86758\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-kqw5r" Dec 11 08:16:34 crc kubenswrapper[4881]: I1211 08:16:34.621816 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/71774618-27c8-499d-83d9-e88693c86758-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-kqw5r\" (UID: \"71774618-27c8-499d-83d9-e88693c86758\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-kqw5r" Dec 11 08:16:34 crc kubenswrapper[4881]: I1211 08:16:34.621838 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6btfk\" (UniqueName: \"kubernetes.io/projected/71774618-27c8-499d-83d9-e88693c86758-kube-api-access-6btfk\") pod \"ovnkube-control-plane-749d76644c-kqw5r\" (UID: \"71774618-27c8-499d-83d9-e88693c86758\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-kqw5r" Dec 11 08:16:34 crc kubenswrapper[4881]: I1211 08:16:34.624883 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:20Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:20Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://35b9fdb7a609d6a955abda7e49e7bceec55811d33e14295ba7258c0e085cc517\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:34Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:34 crc kubenswrapper[4881]: I1211 08:16:34.628569 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:34 crc kubenswrapper[4881]: I1211 08:16:34.628619 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:34 crc kubenswrapper[4881]: I1211 08:16:34.628632 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:34 crc kubenswrapper[4881]: I1211 08:16:34.628649 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:34 crc kubenswrapper[4881]: I1211 08:16:34.628660 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:34Z","lastTransitionTime":"2025-12-11T08:16:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:34 crc kubenswrapper[4881]: I1211 08:16:34.638792 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-cwhxk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"afd0cc21-e31c-47c9-a598-cd93dde96121\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0f0031f6a7c33c9d1c538ec8384083ab5bc7b7fcda9c01c8e5c922a7e375a7e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://44d5ca755969b2434115259f141a9f8f17da8e0c1bef5b61c48475e09dc91601\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://44d5ca755969b2434115259f141a9f8f17da8e0c1bef5b61c48475e09dc91601\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://18c881494d5238565dfdb69e61ecdebb725f60a14c0c651e0bcc65cb2aff297a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://18c881494d5238565dfdb69e61ecdebb725f60a14c0c651e0bcc65cb2aff297a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a13ad4683fb404bf7d973528489e3c39e33c2991d86a80875dab067023626584\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a13ad4683fb404bf7d973528489e3c39e33c2991d86a80875dab067023626584\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b84c7832c1d1df15202c858e63e3d97319f2014f413f98a9d332a3f89a11210\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b84c7832c1d1df15202c858e63e3d97319f2014f413f98a9d332a3f89a11210\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb6df865673b8073f0fa13525833ae978ff17856a9b4149d549e177bc5d9a494\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bb6df865673b8073f0fa13525833ae978ff17856a9b4149d549e177bc5d9a494\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8a6eb6e7ccd64f8d181fe104b44a4299f9b28d0409e46175da23144ef88e4129\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8a6eb6e7ccd64f8d181fe104b44a4299f9b28d0409e46175da23144ef88e4129\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-cwhxk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:34Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:34 crc kubenswrapper[4881]: I1211 08:16:34.649057 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"56d69133-af36-4cbd-af7d-3a58cc4dd8ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b73c9948845a28d7c3cb81e94d962a4f11ac061f4108a18f46d451a554e9adb7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7rwh4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b0321657968185975569a4bf8037ab956988b13493feec0be9c439fbe6c20707\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7rwh4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:21Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-z9nnh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:34Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:34 crc kubenswrapper[4881]: I1211 08:16:34.659835 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-kqw5r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"71774618-27c8-499d-83d9-e88693c86758\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:34Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:34Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:34Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6btfk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6btfk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-kqw5r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:34Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:34 crc kubenswrapper[4881]: I1211 08:16:34.670018 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:34Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:34 crc kubenswrapper[4881]: I1211 08:16:34.681165 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6ec4e8a28a722463111eeb961e6e80d1f8ef1b776c40fc212a2ca5aaf0c43b63\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6eff6a04afb607c6248352793d1205ae80c194a77222a234d586925a8ca8cd00\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:34Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:34 crc kubenswrapper[4881]: I1211 08:16:34.694295 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:34Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:34 crc kubenswrapper[4881]: I1211 08:16:34.706143 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-g8jhd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"368e635e-0e63-4202-b9e4-4a3a85c6f30c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f17445543c4ae20340b00c834ecfa381b0d9624196a6da869ea823ee99df132f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zrsnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-g8jhd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:34Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:34 crc kubenswrapper[4881]: I1211 08:16:34.722763 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/71774618-27c8-499d-83d9-e88693c86758-env-overrides\") pod \"ovnkube-control-plane-749d76644c-kqw5r\" (UID: \"71774618-27c8-499d-83d9-e88693c86758\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-kqw5r" Dec 11 08:16:34 crc kubenswrapper[4881]: I1211 08:16:34.723407 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/71774618-27c8-499d-83d9-e88693c86758-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-kqw5r\" (UID: \"71774618-27c8-499d-83d9-e88693c86758\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-kqw5r" Dec 11 08:16:34 crc kubenswrapper[4881]: I1211 08:16:34.723324 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/71774618-27c8-499d-83d9-e88693c86758-env-overrides\") pod \"ovnkube-control-plane-749d76644c-kqw5r\" (UID: \"71774618-27c8-499d-83d9-e88693c86758\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-kqw5r" Dec 11 08:16:34 crc kubenswrapper[4881]: I1211 08:16:34.723507 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/71774618-27c8-499d-83d9-e88693c86758-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-kqw5r\" (UID: \"71774618-27c8-499d-83d9-e88693c86758\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-kqw5r" Dec 11 08:16:34 crc kubenswrapper[4881]: I1211 08:16:34.724251 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6btfk\" (UniqueName: \"kubernetes.io/projected/71774618-27c8-499d-83d9-e88693c86758-kube-api-access-6btfk\") pod \"ovnkube-control-plane-749d76644c-kqw5r\" (UID: \"71774618-27c8-499d-83d9-e88693c86758\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-kqw5r" Dec 11 08:16:34 crc kubenswrapper[4881]: I1211 08:16:34.724155 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/71774618-27c8-499d-83d9-e88693c86758-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-kqw5r\" (UID: \"71774618-27c8-499d-83d9-e88693c86758\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-kqw5r" Dec 11 08:16:34 crc kubenswrapper[4881]: I1211 08:16:34.730697 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/71774618-27c8-499d-83d9-e88693c86758-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-kqw5r\" (UID: \"71774618-27c8-499d-83d9-e88693c86758\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-kqw5r" Dec 11 08:16:34 crc kubenswrapper[4881]: I1211 08:16:34.730855 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:34 crc kubenswrapper[4881]: I1211 08:16:34.730886 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:34 crc kubenswrapper[4881]: I1211 08:16:34.730898 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:34 crc kubenswrapper[4881]: I1211 08:16:34.730918 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:34 crc kubenswrapper[4881]: I1211 08:16:34.730930 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:34Z","lastTransitionTime":"2025-12-11T08:16:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:34 crc kubenswrapper[4881]: I1211 08:16:34.739551 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6btfk\" (UniqueName: \"kubernetes.io/projected/71774618-27c8-499d-83d9-e88693c86758-kube-api-access-6btfk\") pod \"ovnkube-control-plane-749d76644c-kqw5r\" (UID: \"71774618-27c8-499d-83d9-e88693c86758\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-kqw5r" Dec 11 08:16:34 crc kubenswrapper[4881]: I1211 08:16:34.808846 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-kqw5r" Dec 11 08:16:34 crc kubenswrapper[4881]: W1211 08:16:34.820712 4881 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod71774618_27c8_499d_83d9_e88693c86758.slice/crio-830612a5e83bb4f483c900f1473f6c9a60d30dcf42ef7b75cf6a84c089766bf5 WatchSource:0}: Error finding container 830612a5e83bb4f483c900f1473f6c9a60d30dcf42ef7b75cf6a84c089766bf5: Status 404 returned error can't find the container with id 830612a5e83bb4f483c900f1473f6c9a60d30dcf42ef7b75cf6a84c089766bf5 Dec 11 08:16:34 crc kubenswrapper[4881]: I1211 08:16:34.833179 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:34 crc kubenswrapper[4881]: I1211 08:16:34.833216 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:34 crc kubenswrapper[4881]: I1211 08:16:34.833227 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:34 crc kubenswrapper[4881]: I1211 08:16:34.833243 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:34 crc kubenswrapper[4881]: I1211 08:16:34.833255 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:34Z","lastTransitionTime":"2025-12-11T08:16:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:34 crc kubenswrapper[4881]: I1211 08:16:34.936415 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:34 crc kubenswrapper[4881]: I1211 08:16:34.936467 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:34 crc kubenswrapper[4881]: I1211 08:16:34.936476 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:34 crc kubenswrapper[4881]: I1211 08:16:34.936495 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:34 crc kubenswrapper[4881]: I1211 08:16:34.936505 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:34Z","lastTransitionTime":"2025-12-11T08:16:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:35 crc kubenswrapper[4881]: I1211 08:16:35.004378 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 11 08:16:35 crc kubenswrapper[4881]: E1211 08:16:35.004532 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 11 08:16:35 crc kubenswrapper[4881]: I1211 08:16:35.004615 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 11 08:16:35 crc kubenswrapper[4881]: E1211 08:16:35.004776 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 11 08:16:35 crc kubenswrapper[4881]: I1211 08:16:35.039421 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:35 crc kubenswrapper[4881]: I1211 08:16:35.039462 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:35 crc kubenswrapper[4881]: I1211 08:16:35.039475 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:35 crc kubenswrapper[4881]: I1211 08:16:35.039494 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:35 crc kubenswrapper[4881]: I1211 08:16:35.039506 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:35Z","lastTransitionTime":"2025-12-11T08:16:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:35 crc kubenswrapper[4881]: I1211 08:16:35.142446 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:35 crc kubenswrapper[4881]: I1211 08:16:35.142483 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:35 crc kubenswrapper[4881]: I1211 08:16:35.142495 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:35 crc kubenswrapper[4881]: I1211 08:16:35.142513 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:35 crc kubenswrapper[4881]: I1211 08:16:35.142525 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:35Z","lastTransitionTime":"2025-12-11T08:16:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:35 crc kubenswrapper[4881]: I1211 08:16:35.232824 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/network-metrics-daemon-bzslm"] Dec 11 08:16:35 crc kubenswrapper[4881]: I1211 08:16:35.233526 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bzslm" Dec 11 08:16:35 crc kubenswrapper[4881]: E1211 08:16:35.233661 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bzslm" podUID="3a9b30f4-30b0-4b16-9a4a-135d2e7a9beb" Dec 11 08:16:35 crc kubenswrapper[4881]: I1211 08:16:35.245029 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:35 crc kubenswrapper[4881]: I1211 08:16:35.245099 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:35 crc kubenswrapper[4881]: I1211 08:16:35.245120 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:35 crc kubenswrapper[4881]: I1211 08:16:35.245147 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:35 crc kubenswrapper[4881]: I1211 08:16:35.245164 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:35Z","lastTransitionTime":"2025-12-11T08:16:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:35 crc kubenswrapper[4881]: I1211 08:16:35.247278 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e233ddb-7c1f-4b3d-a111-de8cdc7ccd18\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://221f8c696ca3c5ab5dc2025890b5279fd406a8dfdd4ea482bb12f0c5d304255d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3dec421d26a9373c52bf2fb9434cc3f3b373f409b1d86ec1effca7534acb7262\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7cb4176d384609eb549c9f00057256bd34053ca3eca229c20cd250b93f87f910\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://91541795f1dd997a0d977904a6a3cbeae8b66b8bb57cde8dac344d3703352e1a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:15:53Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:35Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:35 crc kubenswrapper[4881]: I1211 08:16:35.266724 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-847k7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1711214f-683a-4a2e-b9b8-f3fd2dd6dbc2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0ad11e04da8a6d33e46ca8c5f28be920d52c7c2ce22b4539fc8b7b0b85edb425\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2txkz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:20Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-847k7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:35Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:35 crc kubenswrapper[4881]: I1211 08:16:35.283284 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-jzsv5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aacce2d2-7fd3-439a-b46b-51858d3de240\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6277956e6d4873b4105ce03e358e2857371feead7eb74e2b7412ad7ed4de486\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tlbfj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:24Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-jzsv5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:35Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:35 crc kubenswrapper[4881]: I1211 08:16:35.296679 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"712c62b1-8ccd-4aa3-bcf9-678e361454ec\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f4bc17ebdc46bdf12b24515e7a055043646bcea98fe47ea7f5c44e679d02bde9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://daf982782f40db65e6a33e1e1bcfd01493d5a385c52013546d38d921ac4dcad3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3ceeda38e245a549205af6a7f2468c682ca1420574f2d3b375dfba7d93a3c669\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://204b876ced5657df64e1a6074ba7ddbb3e286df0e12630d709ff64d2905b8487\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ff7ad52ec7f42bde82d0f5b9a6ef051a56c37004bb4172dbc504afde1e5f69bf\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-11T08:16:03Z\\\",\\\"message\\\":\\\"W1211 08:16:02.420586 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1211 08:16:02.420959 1 crypto.go:601] Generating new CA for check-endpoints-signer@1765440962 cert, and key in /tmp/serving-cert-2611071911/serving-signer.crt, /tmp/serving-cert-2611071911/serving-signer.key\\\\nI1211 08:16:03.011861 1 observer_polling.go:159] Starting file observer\\\\nW1211 08:16:03.024183 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1211 08:16:03.024367 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1211 08:16:03.024984 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2611071911/tls.crt::/tmp/serving-cert-2611071911/tls.key\\\\\\\"\\\\nF1211 08:16:03.405647 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:00Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6735b5cd08055deeae614855e5de8a056b276c8006a16c45f2a0fce97da172cb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:57Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70fb9b6123864b3652eabbb833b18032659a0c038f4e5857e7363a3776022396\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://70fb9b6123864b3652eabbb833b18032659a0c038f4e5857e7363a3776022396\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:15:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:15:53Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:35Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:35 crc kubenswrapper[4881]: I1211 08:16:35.299511 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-wf8q8_f14cc110-e74f-4cb7-a998-041e3f9b537b/ovnkube-controller/1.log" Dec 11 08:16:35 crc kubenswrapper[4881]: I1211 08:16:35.300126 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-wf8q8_f14cc110-e74f-4cb7-a998-041e3f9b537b/ovnkube-controller/0.log" Dec 11 08:16:35 crc kubenswrapper[4881]: I1211 08:16:35.303001 4881 generic.go:334] "Generic (PLEG): container finished" podID="f14cc110-e74f-4cb7-a998-041e3f9b537b" containerID="58ff0c7d416fda4200920825cedb24d309b747147b3e209166f99140da0e11d4" exitCode=1 Dec 11 08:16:35 crc kubenswrapper[4881]: I1211 08:16:35.303099 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wf8q8" event={"ID":"f14cc110-e74f-4cb7-a998-041e3f9b537b","Type":"ContainerDied","Data":"58ff0c7d416fda4200920825cedb24d309b747147b3e209166f99140da0e11d4"} Dec 11 08:16:35 crc kubenswrapper[4881]: I1211 08:16:35.303173 4881 scope.go:117] "RemoveContainer" containerID="0c9d06c04708f285b468dcbb0b0406b77e16ebe289cefb4cb0fb5d9dc84e3dc1" Dec 11 08:16:35 crc kubenswrapper[4881]: I1211 08:16:35.303866 4881 scope.go:117] "RemoveContainer" containerID="58ff0c7d416fda4200920825cedb24d309b747147b3e209166f99140da0e11d4" Dec 11 08:16:35 crc kubenswrapper[4881]: E1211 08:16:35.304022 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-wf8q8_openshift-ovn-kubernetes(f14cc110-e74f-4cb7-a998-041e3f9b537b)\"" pod="openshift-ovn-kubernetes/ovnkube-node-wf8q8" podUID="f14cc110-e74f-4cb7-a998-041e3f9b537b" Dec 11 08:16:35 crc kubenswrapper[4881]: I1211 08:16:35.305862 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-kqw5r" event={"ID":"71774618-27c8-499d-83d9-e88693c86758","Type":"ContainerStarted","Data":"c48e4553e3eb00f1e725dd9cacf2b25360544772ef82d6846361a4b7c44c6244"} Dec 11 08:16:35 crc kubenswrapper[4881]: I1211 08:16:35.305910 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-kqw5r" event={"ID":"71774618-27c8-499d-83d9-e88693c86758","Type":"ContainerStarted","Data":"136c88f0cab9d7a47a8c5925592e8cc09b62e515258829cc096df9ad3bb30615"} Dec 11 08:16:35 crc kubenswrapper[4881]: I1211 08:16:35.305925 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-kqw5r" event={"ID":"71774618-27c8-499d-83d9-e88693c86758","Type":"ContainerStarted","Data":"830612a5e83bb4f483c900f1473f6c9a60d30dcf42ef7b75cf6a84c089766bf5"} Dec 11 08:16:35 crc kubenswrapper[4881]: I1211 08:16:35.310897 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://02d46e407feeba96230ff44b31f513e09434cdc3378124322398587ebda11b4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:35Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:35 crc kubenswrapper[4881]: I1211 08:16:35.324817 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:35Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:35 crc kubenswrapper[4881]: I1211 08:16:35.331090 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/3a9b30f4-30b0-4b16-9a4a-135d2e7a9beb-metrics-certs\") pod \"network-metrics-daemon-bzslm\" (UID: \"3a9b30f4-30b0-4b16-9a4a-135d2e7a9beb\") " pod="openshift-multus/network-metrics-daemon-bzslm" Dec 11 08:16:35 crc kubenswrapper[4881]: I1211 08:16:35.331317 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k86hj\" (UniqueName: \"kubernetes.io/projected/3a9b30f4-30b0-4b16-9a4a-135d2e7a9beb-kube-api-access-k86hj\") pod \"network-metrics-daemon-bzslm\" (UID: \"3a9b30f4-30b0-4b16-9a4a-135d2e7a9beb\") " pod="openshift-multus/network-metrics-daemon-bzslm" Dec 11 08:16:35 crc kubenswrapper[4881]: I1211 08:16:35.333757 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:20Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:20Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://35b9fdb7a609d6a955abda7e49e7bceec55811d33e14295ba7258c0e085cc517\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:35Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:35 crc kubenswrapper[4881]: I1211 08:16:35.347567 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:35 crc kubenswrapper[4881]: I1211 08:16:35.347609 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:35 crc kubenswrapper[4881]: I1211 08:16:35.347618 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:35 crc kubenswrapper[4881]: I1211 08:16:35.347634 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:35 crc kubenswrapper[4881]: I1211 08:16:35.347643 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:35Z","lastTransitionTime":"2025-12-11T08:16:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:35 crc kubenswrapper[4881]: I1211 08:16:35.348447 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wf8q8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f14cc110-e74f-4cb7-a998-041e3f9b537b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3fece2162ea78bf051c94f0f8adb0b194d05e5d3c5c25cb4e3674096d1204079\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3715d1eacbf7a843b7a1a0e83dee159ddbe5bf62812e94bf5f74f4677da2617b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c23a70dbe6b32be7c8a0c3799f7dd2323f0dcc86d7d58ee3e140cda4ffbb03f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6d8574de8e2431b2aca7954b9a0498e353c00d982b4384c1f198b2099fe527a6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5a0cb039f4c38da8b633dfcd2e0d23afbc3607b41f04d3c230aee5f86cba79b5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5b8e9dda68edf7ec8848da4eb851807ad0dc10a086fb6129e3a22f9a35e368ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://58ff0c7d416fda4200920825cedb24d309b747147b3e209166f99140da0e11d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0c9d06c04708f285b468dcbb0b0406b77e16ebe289cefb4cb0fb5d9dc84e3dc1\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-11T08:16:31Z\\\",\\\"message\\\":\\\" (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1211 08:16:31.679255 6216 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1211 08:16:31.679430 6216 reflector.go:311] Stopping reflector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1211 08:16:31.679547 6216 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1211 08:16:31.679591 6216 reflector.go:311] Stopping reflector *v1.EgressIP (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressip/v1/apis/informers/externalversions/factory.go:140\\\\nI1211 08:16:31.679687 6216 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1211 08:16:31.679942 6216 reflector.go:311] Stopping reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1211 08:16:31.680464 6216 reflector.go:311] Stopping reflector *v1.Pod (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1211 08:16:31.680515 6216 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1211 08:16:31.680865 6216 factory.go:656] Stopping \\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:27Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://88bd335385bc696076b9b2719bfbd27b294003397c545f24ee2c50cd854fcc28\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1794069e7175bc0ce9dc315807ad766fd28fb46e11ed6e7addc3199cccaea63a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1794069e7175bc0ce9dc315807ad766fd28fb46e11ed6e7addc3199cccaea63a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:21Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-wf8q8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:35Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:35 crc kubenswrapper[4881]: I1211 08:16:35.361944 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:35Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:35 crc kubenswrapper[4881]: I1211 08:16:35.377456 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6ec4e8a28a722463111eeb961e6e80d1f8ef1b776c40fc212a2ca5aaf0c43b63\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6eff6a04afb607c6248352793d1205ae80c194a77222a234d586925a8ca8cd00\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:35Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:35 crc kubenswrapper[4881]: I1211 08:16:35.391747 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:35Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:35 crc kubenswrapper[4881]: I1211 08:16:35.405813 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-g8jhd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"368e635e-0e63-4202-b9e4-4a3a85c6f30c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f17445543c4ae20340b00c834ecfa381b0d9624196a6da869ea823ee99df132f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zrsnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-g8jhd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:35Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:35 crc kubenswrapper[4881]: I1211 08:16:35.425106 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-cwhxk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"afd0cc21-e31c-47c9-a598-cd93dde96121\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0f0031f6a7c33c9d1c538ec8384083ab5bc7b7fcda9c01c8e5c922a7e375a7e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://44d5ca755969b2434115259f141a9f8f17da8e0c1bef5b61c48475e09dc91601\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://44d5ca755969b2434115259f141a9f8f17da8e0c1bef5b61c48475e09dc91601\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://18c881494d5238565dfdb69e61ecdebb725f60a14c0c651e0bcc65cb2aff297a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://18c881494d5238565dfdb69e61ecdebb725f60a14c0c651e0bcc65cb2aff297a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a13ad4683fb404bf7d973528489e3c39e33c2991d86a80875dab067023626584\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a13ad4683fb404bf7d973528489e3c39e33c2991d86a80875dab067023626584\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b84c7832c1d1df15202c858e63e3d97319f2014f413f98a9d332a3f89a11210\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b84c7832c1d1df15202c858e63e3d97319f2014f413f98a9d332a3f89a11210\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb6df865673b8073f0fa13525833ae978ff17856a9b4149d549e177bc5d9a494\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bb6df865673b8073f0fa13525833ae978ff17856a9b4149d549e177bc5d9a494\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8a6eb6e7ccd64f8d181fe104b44a4299f9b28d0409e46175da23144ef88e4129\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8a6eb6e7ccd64f8d181fe104b44a4299f9b28d0409e46175da23144ef88e4129\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-cwhxk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:35Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:35 crc kubenswrapper[4881]: I1211 08:16:35.432927 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/3a9b30f4-30b0-4b16-9a4a-135d2e7a9beb-metrics-certs\") pod \"network-metrics-daemon-bzslm\" (UID: \"3a9b30f4-30b0-4b16-9a4a-135d2e7a9beb\") " pod="openshift-multus/network-metrics-daemon-bzslm" Dec 11 08:16:35 crc kubenswrapper[4881]: I1211 08:16:35.433173 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k86hj\" (UniqueName: \"kubernetes.io/projected/3a9b30f4-30b0-4b16-9a4a-135d2e7a9beb-kube-api-access-k86hj\") pod \"network-metrics-daemon-bzslm\" (UID: \"3a9b30f4-30b0-4b16-9a4a-135d2e7a9beb\") " pod="openshift-multus/network-metrics-daemon-bzslm" Dec 11 08:16:35 crc kubenswrapper[4881]: E1211 08:16:35.433208 4881 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Dec 11 08:16:35 crc kubenswrapper[4881]: E1211 08:16:35.433354 4881 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/3a9b30f4-30b0-4b16-9a4a-135d2e7a9beb-metrics-certs podName:3a9b30f4-30b0-4b16-9a4a-135d2e7a9beb nodeName:}" failed. No retries permitted until 2025-12-11 08:16:35.933306744 +0000 UTC m=+44.310675651 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/3a9b30f4-30b0-4b16-9a4a-135d2e7a9beb-metrics-certs") pod "network-metrics-daemon-bzslm" (UID: "3a9b30f4-30b0-4b16-9a4a-135d2e7a9beb") : object "openshift-multus"/"metrics-daemon-secret" not registered Dec 11 08:16:35 crc kubenswrapper[4881]: I1211 08:16:35.439793 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"56d69133-af36-4cbd-af7d-3a58cc4dd8ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b73c9948845a28d7c3cb81e94d962a4f11ac061f4108a18f46d451a554e9adb7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7rwh4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b0321657968185975569a4bf8037ab956988b13493feec0be9c439fbe6c20707\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7rwh4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:21Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-z9nnh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:35Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:35 crc kubenswrapper[4881]: I1211 08:16:35.450088 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:35 crc kubenswrapper[4881]: I1211 08:16:35.450149 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:35 crc kubenswrapper[4881]: I1211 08:16:35.450163 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:35 crc kubenswrapper[4881]: I1211 08:16:35.450183 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:35 crc kubenswrapper[4881]: I1211 08:16:35.450214 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:35Z","lastTransitionTime":"2025-12-11T08:16:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:35 crc kubenswrapper[4881]: I1211 08:16:35.451852 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k86hj\" (UniqueName: \"kubernetes.io/projected/3a9b30f4-30b0-4b16-9a4a-135d2e7a9beb-kube-api-access-k86hj\") pod \"network-metrics-daemon-bzslm\" (UID: \"3a9b30f4-30b0-4b16-9a4a-135d2e7a9beb\") " pod="openshift-multus/network-metrics-daemon-bzslm" Dec 11 08:16:35 crc kubenswrapper[4881]: I1211 08:16:35.454289 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-kqw5r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"71774618-27c8-499d-83d9-e88693c86758\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:34Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:34Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:34Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6btfk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6btfk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-kqw5r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:35Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:35 crc kubenswrapper[4881]: I1211 08:16:35.466622 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-bzslm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3a9b30f4-30b0-4b16-9a4a-135d2e7a9beb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:35Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:35Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:35Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-k86hj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-k86hj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:35Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-bzslm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:35Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:35 crc kubenswrapper[4881]: I1211 08:16:35.478695 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:35Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:35 crc kubenswrapper[4881]: I1211 08:16:35.492169 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:20Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:20Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://35b9fdb7a609d6a955abda7e49e7bceec55811d33e14295ba7258c0e085cc517\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:35Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:35 crc kubenswrapper[4881]: I1211 08:16:35.514033 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wf8q8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f14cc110-e74f-4cb7-a998-041e3f9b537b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3fece2162ea78bf051c94f0f8adb0b194d05e5d3c5c25cb4e3674096d1204079\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3715d1eacbf7a843b7a1a0e83dee159ddbe5bf62812e94bf5f74f4677da2617b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c23a70dbe6b32be7c8a0c3799f7dd2323f0dcc86d7d58ee3e140cda4ffbb03f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6d8574de8e2431b2aca7954b9a0498e353c00d982b4384c1f198b2099fe527a6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5a0cb039f4c38da8b633dfcd2e0d23afbc3607b41f04d3c230aee5f86cba79b5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5b8e9dda68edf7ec8848da4eb851807ad0dc10a086fb6129e3a22f9a35e368ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://58ff0c7d416fda4200920825cedb24d309b747147b3e209166f99140da0e11d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0c9d06c04708f285b468dcbb0b0406b77e16ebe289cefb4cb0fb5d9dc84e3dc1\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-11T08:16:31Z\\\",\\\"message\\\":\\\" (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1211 08:16:31.679255 6216 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1211 08:16:31.679430 6216 reflector.go:311] Stopping reflector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1211 08:16:31.679547 6216 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1211 08:16:31.679591 6216 reflector.go:311] Stopping reflector *v1.EgressIP (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressip/v1/apis/informers/externalversions/factory.go:140\\\\nI1211 08:16:31.679687 6216 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1211 08:16:31.679942 6216 reflector.go:311] Stopping reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1211 08:16:31.680464 6216 reflector.go:311] Stopping reflector *v1.Pod (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1211 08:16:31.680515 6216 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1211 08:16:31.680865 6216 factory.go:656] Stopping \\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:27Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://58ff0c7d416fda4200920825cedb24d309b747147b3e209166f99140da0e11d4\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-11T08:16:34Z\\\",\\\"message\\\":\\\".network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:33Z is after 2025-08-24T17:21:41Z]\\\\nI1211 08:16:33.855251 6356 services_controller.go:473] Services do not match for network=default, existing lbs: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-service-ca-operator/metrics_TCP_cluster\\\\\\\", UUID:\\\\\\\"2a3fb1a3-a476-4e14-bcf5-fb79af60206a\\\\\\\", Protocol:\\\\\\\"tcp\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-service-ca-operator/metrics\\\\\\\"}, Opts:services.LBOpts{Reject:false, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{}, Templates:services.TemplateMap{}, Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}, built lbs: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-service-ca-operator/metrics_TCP_cluster\\\\\\\", UUID:\\\\\\\"\\\\\\\", Protocol:\\\\\\\"TCP\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-service-ca-operator/metrics\\\\\\\"}, Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.4.\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://88bd335385bc696076b9b2719bfbd27b294003397c545f24ee2c50cd854fcc28\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1794069e7175bc0ce9dc315807ad766fd28fb46e11ed6e7addc3199cccaea63a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1794069e7175bc0ce9dc315807ad766fd28fb46e11ed6e7addc3199cccaea63a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:21Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-wf8q8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:35Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:35 crc kubenswrapper[4881]: I1211 08:16:35.529621 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"712c62b1-8ccd-4aa3-bcf9-678e361454ec\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f4bc17ebdc46bdf12b24515e7a055043646bcea98fe47ea7f5c44e679d02bde9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://daf982782f40db65e6a33e1e1bcfd01493d5a385c52013546d38d921ac4dcad3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3ceeda38e245a549205af6a7f2468c682ca1420574f2d3b375dfba7d93a3c669\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://204b876ced5657df64e1a6074ba7ddbb3e286df0e12630d709ff64d2905b8487\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ff7ad52ec7f42bde82d0f5b9a6ef051a56c37004bb4172dbc504afde1e5f69bf\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-11T08:16:03Z\\\",\\\"message\\\":\\\"W1211 08:16:02.420586 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1211 08:16:02.420959 1 crypto.go:601] Generating new CA for check-endpoints-signer@1765440962 cert, and key in /tmp/serving-cert-2611071911/serving-signer.crt, /tmp/serving-cert-2611071911/serving-signer.key\\\\nI1211 08:16:03.011861 1 observer_polling.go:159] Starting file observer\\\\nW1211 08:16:03.024183 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1211 08:16:03.024367 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1211 08:16:03.024984 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2611071911/tls.crt::/tmp/serving-cert-2611071911/tls.key\\\\\\\"\\\\nF1211 08:16:03.405647 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:00Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6735b5cd08055deeae614855e5de8a056b276c8006a16c45f2a0fce97da172cb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:57Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70fb9b6123864b3652eabbb833b18032659a0c038f4e5857e7363a3776022396\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://70fb9b6123864b3652eabbb833b18032659a0c038f4e5857e7363a3776022396\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:15:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:15:53Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:35Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:35 crc kubenswrapper[4881]: I1211 08:16:35.542395 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://02d46e407feeba96230ff44b31f513e09434cdc3378124322398587ebda11b4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:35Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:35 crc kubenswrapper[4881]: I1211 08:16:35.553750 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:35 crc kubenswrapper[4881]: I1211 08:16:35.553811 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:35 crc kubenswrapper[4881]: I1211 08:16:35.553823 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:35 crc kubenswrapper[4881]: I1211 08:16:35.553843 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:35 crc kubenswrapper[4881]: I1211 08:16:35.553856 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:35Z","lastTransitionTime":"2025-12-11T08:16:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:35 crc kubenswrapper[4881]: I1211 08:16:35.555668 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:35Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:35 crc kubenswrapper[4881]: I1211 08:16:35.569842 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-g8jhd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"368e635e-0e63-4202-b9e4-4a3a85c6f30c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f17445543c4ae20340b00c834ecfa381b0d9624196a6da869ea823ee99df132f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zrsnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-g8jhd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:35Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:35 crc kubenswrapper[4881]: I1211 08:16:35.584263 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-cwhxk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"afd0cc21-e31c-47c9-a598-cd93dde96121\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0f0031f6a7c33c9d1c538ec8384083ab5bc7b7fcda9c01c8e5c922a7e375a7e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://44d5ca755969b2434115259f141a9f8f17da8e0c1bef5b61c48475e09dc91601\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://44d5ca755969b2434115259f141a9f8f17da8e0c1bef5b61c48475e09dc91601\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://18c881494d5238565dfdb69e61ecdebb725f60a14c0c651e0bcc65cb2aff297a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://18c881494d5238565dfdb69e61ecdebb725f60a14c0c651e0bcc65cb2aff297a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a13ad4683fb404bf7d973528489e3c39e33c2991d86a80875dab067023626584\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a13ad4683fb404bf7d973528489e3c39e33c2991d86a80875dab067023626584\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b84c7832c1d1df15202c858e63e3d97319f2014f413f98a9d332a3f89a11210\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b84c7832c1d1df15202c858e63e3d97319f2014f413f98a9d332a3f89a11210\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb6df865673b8073f0fa13525833ae978ff17856a9b4149d549e177bc5d9a494\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bb6df865673b8073f0fa13525833ae978ff17856a9b4149d549e177bc5d9a494\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8a6eb6e7ccd64f8d181fe104b44a4299f9b28d0409e46175da23144ef88e4129\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8a6eb6e7ccd64f8d181fe104b44a4299f9b28d0409e46175da23144ef88e4129\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-cwhxk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:35Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:35 crc kubenswrapper[4881]: I1211 08:16:35.596080 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"56d69133-af36-4cbd-af7d-3a58cc4dd8ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b73c9948845a28d7c3cb81e94d962a4f11ac061f4108a18f46d451a554e9adb7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7rwh4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b0321657968185975569a4bf8037ab956988b13493feec0be9c439fbe6c20707\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7rwh4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:21Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-z9nnh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:35Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:35 crc kubenswrapper[4881]: I1211 08:16:35.610416 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-kqw5r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"71774618-27c8-499d-83d9-e88693c86758\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://136c88f0cab9d7a47a8c5925592e8cc09b62e515258829cc096df9ad3bb30615\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6btfk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c48e4553e3eb00f1e725dd9cacf2b25360544772ef82d6846361a4b7c44c6244\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6btfk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-kqw5r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:35Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:35 crc kubenswrapper[4881]: I1211 08:16:35.621701 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-bzslm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3a9b30f4-30b0-4b16-9a4a-135d2e7a9beb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:35Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:35Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:35Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-k86hj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-k86hj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:35Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-bzslm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:35Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:35 crc kubenswrapper[4881]: I1211 08:16:35.635884 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:35Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:35 crc kubenswrapper[4881]: I1211 08:16:35.648913 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6ec4e8a28a722463111eeb961e6e80d1f8ef1b776c40fc212a2ca5aaf0c43b63\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6eff6a04afb607c6248352793d1205ae80c194a77222a234d586925a8ca8cd00\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:35Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:35 crc kubenswrapper[4881]: I1211 08:16:35.657315 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:35 crc kubenswrapper[4881]: I1211 08:16:35.657432 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:35 crc kubenswrapper[4881]: I1211 08:16:35.657457 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:35 crc kubenswrapper[4881]: I1211 08:16:35.657490 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:35 crc kubenswrapper[4881]: I1211 08:16:35.657515 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:35Z","lastTransitionTime":"2025-12-11T08:16:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:35 crc kubenswrapper[4881]: I1211 08:16:35.659768 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-847k7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1711214f-683a-4a2e-b9b8-f3fd2dd6dbc2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0ad11e04da8a6d33e46ca8c5f28be920d52c7c2ce22b4539fc8b7b0b85edb425\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2txkz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:20Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-847k7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:35Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:35 crc kubenswrapper[4881]: I1211 08:16:35.672227 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-jzsv5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aacce2d2-7fd3-439a-b46b-51858d3de240\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6277956e6d4873b4105ce03e358e2857371feead7eb74e2b7412ad7ed4de486\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tlbfj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:24Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-jzsv5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:35Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:35 crc kubenswrapper[4881]: I1211 08:16:35.687370 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e233ddb-7c1f-4b3d-a111-de8cdc7ccd18\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://221f8c696ca3c5ab5dc2025890b5279fd406a8dfdd4ea482bb12f0c5d304255d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3dec421d26a9373c52bf2fb9434cc3f3b373f409b1d86ec1effca7534acb7262\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7cb4176d384609eb549c9f00057256bd34053ca3eca229c20cd250b93f87f910\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://91541795f1dd997a0d977904a6a3cbeae8b66b8bb57cde8dac344d3703352e1a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:15:53Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:35Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:35 crc kubenswrapper[4881]: I1211 08:16:35.760908 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:35 crc kubenswrapper[4881]: I1211 08:16:35.761017 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:35 crc kubenswrapper[4881]: I1211 08:16:35.761056 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:35 crc kubenswrapper[4881]: I1211 08:16:35.761089 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:35 crc kubenswrapper[4881]: I1211 08:16:35.761140 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:35Z","lastTransitionTime":"2025-12-11T08:16:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:35 crc kubenswrapper[4881]: I1211 08:16:35.864595 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:35 crc kubenswrapper[4881]: I1211 08:16:35.864700 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:35 crc kubenswrapper[4881]: I1211 08:16:35.864715 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:35 crc kubenswrapper[4881]: I1211 08:16:35.864738 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:35 crc kubenswrapper[4881]: I1211 08:16:35.864755 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:35Z","lastTransitionTime":"2025-12-11T08:16:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:35 crc kubenswrapper[4881]: I1211 08:16:35.938381 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/3a9b30f4-30b0-4b16-9a4a-135d2e7a9beb-metrics-certs\") pod \"network-metrics-daemon-bzslm\" (UID: \"3a9b30f4-30b0-4b16-9a4a-135d2e7a9beb\") " pod="openshift-multus/network-metrics-daemon-bzslm" Dec 11 08:16:35 crc kubenswrapper[4881]: E1211 08:16:35.938574 4881 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Dec 11 08:16:35 crc kubenswrapper[4881]: E1211 08:16:35.938727 4881 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/3a9b30f4-30b0-4b16-9a4a-135d2e7a9beb-metrics-certs podName:3a9b30f4-30b0-4b16-9a4a-135d2e7a9beb nodeName:}" failed. No retries permitted until 2025-12-11 08:16:36.938698182 +0000 UTC m=+45.316066879 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/3a9b30f4-30b0-4b16-9a4a-135d2e7a9beb-metrics-certs") pod "network-metrics-daemon-bzslm" (UID: "3a9b30f4-30b0-4b16-9a4a-135d2e7a9beb") : object "openshift-multus"/"metrics-daemon-secret" not registered Dec 11 08:16:35 crc kubenswrapper[4881]: I1211 08:16:35.967929 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:35 crc kubenswrapper[4881]: I1211 08:16:35.967965 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:35 crc kubenswrapper[4881]: I1211 08:16:35.967974 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:35 crc kubenswrapper[4881]: I1211 08:16:35.967991 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:35 crc kubenswrapper[4881]: I1211 08:16:35.968003 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:35Z","lastTransitionTime":"2025-12-11T08:16:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:36 crc kubenswrapper[4881]: I1211 08:16:36.004924 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 11 08:16:36 crc kubenswrapper[4881]: E1211 08:16:36.005595 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 11 08:16:36 crc kubenswrapper[4881]: I1211 08:16:36.071675 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:36 crc kubenswrapper[4881]: I1211 08:16:36.071723 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:36 crc kubenswrapper[4881]: I1211 08:16:36.071733 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:36 crc kubenswrapper[4881]: I1211 08:16:36.071751 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:36 crc kubenswrapper[4881]: I1211 08:16:36.071767 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:36Z","lastTransitionTime":"2025-12-11T08:16:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:36 crc kubenswrapper[4881]: I1211 08:16:36.174852 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:36 crc kubenswrapper[4881]: I1211 08:16:36.174906 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:36 crc kubenswrapper[4881]: I1211 08:16:36.174917 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:36 crc kubenswrapper[4881]: I1211 08:16:36.174934 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:36 crc kubenswrapper[4881]: I1211 08:16:36.174946 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:36Z","lastTransitionTime":"2025-12-11T08:16:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:36 crc kubenswrapper[4881]: I1211 08:16:36.277553 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:36 crc kubenswrapper[4881]: I1211 08:16:36.277605 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:36 crc kubenswrapper[4881]: I1211 08:16:36.277617 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:36 crc kubenswrapper[4881]: I1211 08:16:36.277636 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:36 crc kubenswrapper[4881]: I1211 08:16:36.277649 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:36Z","lastTransitionTime":"2025-12-11T08:16:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:36 crc kubenswrapper[4881]: I1211 08:16:36.312601 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-wf8q8_f14cc110-e74f-4cb7-a998-041e3f9b537b/ovnkube-controller/1.log" Dec 11 08:16:36 crc kubenswrapper[4881]: I1211 08:16:36.381466 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:36 crc kubenswrapper[4881]: I1211 08:16:36.381526 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:36 crc kubenswrapper[4881]: I1211 08:16:36.381547 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:36 crc kubenswrapper[4881]: I1211 08:16:36.381573 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:36 crc kubenswrapper[4881]: I1211 08:16:36.381590 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:36Z","lastTransitionTime":"2025-12-11T08:16:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:36 crc kubenswrapper[4881]: I1211 08:16:36.484357 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:36 crc kubenswrapper[4881]: I1211 08:16:36.484423 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:36 crc kubenswrapper[4881]: I1211 08:16:36.484437 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:36 crc kubenswrapper[4881]: I1211 08:16:36.484458 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:36 crc kubenswrapper[4881]: I1211 08:16:36.484473 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:36Z","lastTransitionTime":"2025-12-11T08:16:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:36 crc kubenswrapper[4881]: I1211 08:16:36.587906 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:36 crc kubenswrapper[4881]: I1211 08:16:36.587952 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:36 crc kubenswrapper[4881]: I1211 08:16:36.587964 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:36 crc kubenswrapper[4881]: I1211 08:16:36.587981 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:36 crc kubenswrapper[4881]: I1211 08:16:36.587993 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:36Z","lastTransitionTime":"2025-12-11T08:16:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:36 crc kubenswrapper[4881]: I1211 08:16:36.690482 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:36 crc kubenswrapper[4881]: I1211 08:16:36.690586 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:36 crc kubenswrapper[4881]: I1211 08:16:36.690610 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:36 crc kubenswrapper[4881]: I1211 08:16:36.690642 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:36 crc kubenswrapper[4881]: I1211 08:16:36.690665 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:36Z","lastTransitionTime":"2025-12-11T08:16:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:36 crc kubenswrapper[4881]: I1211 08:16:36.794084 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:36 crc kubenswrapper[4881]: I1211 08:16:36.794436 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:36 crc kubenswrapper[4881]: I1211 08:16:36.794462 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:36 crc kubenswrapper[4881]: I1211 08:16:36.794489 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:36 crc kubenswrapper[4881]: I1211 08:16:36.794506 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:36Z","lastTransitionTime":"2025-12-11T08:16:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:36 crc kubenswrapper[4881]: I1211 08:16:36.897941 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:36 crc kubenswrapper[4881]: I1211 08:16:36.898001 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:36 crc kubenswrapper[4881]: I1211 08:16:36.898012 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:36 crc kubenswrapper[4881]: I1211 08:16:36.898049 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:36 crc kubenswrapper[4881]: I1211 08:16:36.898064 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:36Z","lastTransitionTime":"2025-12-11T08:16:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:36 crc kubenswrapper[4881]: I1211 08:16:36.949997 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/3a9b30f4-30b0-4b16-9a4a-135d2e7a9beb-metrics-certs\") pod \"network-metrics-daemon-bzslm\" (UID: \"3a9b30f4-30b0-4b16-9a4a-135d2e7a9beb\") " pod="openshift-multus/network-metrics-daemon-bzslm" Dec 11 08:16:36 crc kubenswrapper[4881]: E1211 08:16:36.950213 4881 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Dec 11 08:16:36 crc kubenswrapper[4881]: E1211 08:16:36.950301 4881 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/3a9b30f4-30b0-4b16-9a4a-135d2e7a9beb-metrics-certs podName:3a9b30f4-30b0-4b16-9a4a-135d2e7a9beb nodeName:}" failed. No retries permitted until 2025-12-11 08:16:38.950276947 +0000 UTC m=+47.327645674 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/3a9b30f4-30b0-4b16-9a4a-135d2e7a9beb-metrics-certs") pod "network-metrics-daemon-bzslm" (UID: "3a9b30f4-30b0-4b16-9a4a-135d2e7a9beb") : object "openshift-multus"/"metrics-daemon-secret" not registered Dec 11 08:16:37 crc kubenswrapper[4881]: I1211 08:16:37.001576 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:37 crc kubenswrapper[4881]: I1211 08:16:37.001641 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:37 crc kubenswrapper[4881]: I1211 08:16:37.001659 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:37 crc kubenswrapper[4881]: I1211 08:16:37.001686 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:37 crc kubenswrapper[4881]: I1211 08:16:37.001705 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:37Z","lastTransitionTime":"2025-12-11T08:16:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:37 crc kubenswrapper[4881]: I1211 08:16:37.005555 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 11 08:16:37 crc kubenswrapper[4881]: I1211 08:16:37.005573 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bzslm" Dec 11 08:16:37 crc kubenswrapper[4881]: E1211 08:16:37.005717 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 11 08:16:37 crc kubenswrapper[4881]: I1211 08:16:37.005793 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 11 08:16:37 crc kubenswrapper[4881]: E1211 08:16:37.005934 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bzslm" podUID="3a9b30f4-30b0-4b16-9a4a-135d2e7a9beb" Dec 11 08:16:37 crc kubenswrapper[4881]: E1211 08:16:37.006034 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 11 08:16:37 crc kubenswrapper[4881]: I1211 08:16:37.104783 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:37 crc kubenswrapper[4881]: I1211 08:16:37.105188 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:37 crc kubenswrapper[4881]: I1211 08:16:37.105368 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:37 crc kubenswrapper[4881]: I1211 08:16:37.105578 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:37 crc kubenswrapper[4881]: I1211 08:16:37.105780 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:37Z","lastTransitionTime":"2025-12-11T08:16:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:37 crc kubenswrapper[4881]: I1211 08:16:37.209527 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:37 crc kubenswrapper[4881]: I1211 08:16:37.209919 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:37 crc kubenswrapper[4881]: I1211 08:16:37.209997 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:37 crc kubenswrapper[4881]: I1211 08:16:37.210173 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:37 crc kubenswrapper[4881]: I1211 08:16:37.210247 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:37Z","lastTransitionTime":"2025-12-11T08:16:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:37 crc kubenswrapper[4881]: I1211 08:16:37.313578 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:37 crc kubenswrapper[4881]: I1211 08:16:37.313952 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:37 crc kubenswrapper[4881]: I1211 08:16:37.314074 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:37 crc kubenswrapper[4881]: I1211 08:16:37.314166 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:37 crc kubenswrapper[4881]: I1211 08:16:37.314246 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:37Z","lastTransitionTime":"2025-12-11T08:16:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:37 crc kubenswrapper[4881]: I1211 08:16:37.416968 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:37 crc kubenswrapper[4881]: I1211 08:16:37.417090 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:37 crc kubenswrapper[4881]: I1211 08:16:37.417161 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:37 crc kubenswrapper[4881]: I1211 08:16:37.417197 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:37 crc kubenswrapper[4881]: I1211 08:16:37.417220 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:37Z","lastTransitionTime":"2025-12-11T08:16:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:37 crc kubenswrapper[4881]: I1211 08:16:37.520495 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:37 crc kubenswrapper[4881]: I1211 08:16:37.520568 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:37 crc kubenswrapper[4881]: I1211 08:16:37.520586 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:37 crc kubenswrapper[4881]: I1211 08:16:37.520614 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:37 crc kubenswrapper[4881]: I1211 08:16:37.520631 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:37Z","lastTransitionTime":"2025-12-11T08:16:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:37 crc kubenswrapper[4881]: I1211 08:16:37.624016 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:37 crc kubenswrapper[4881]: I1211 08:16:37.624099 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:37 crc kubenswrapper[4881]: I1211 08:16:37.624123 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:37 crc kubenswrapper[4881]: I1211 08:16:37.624156 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:37 crc kubenswrapper[4881]: I1211 08:16:37.624179 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:37Z","lastTransitionTime":"2025-12-11T08:16:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:37 crc kubenswrapper[4881]: I1211 08:16:37.727459 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:37 crc kubenswrapper[4881]: I1211 08:16:37.727716 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:37 crc kubenswrapper[4881]: I1211 08:16:37.727798 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:37 crc kubenswrapper[4881]: I1211 08:16:37.727891 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:37 crc kubenswrapper[4881]: I1211 08:16:37.727979 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:37Z","lastTransitionTime":"2025-12-11T08:16:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:37 crc kubenswrapper[4881]: I1211 08:16:37.831479 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:37 crc kubenswrapper[4881]: I1211 08:16:37.831554 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:37 crc kubenswrapper[4881]: I1211 08:16:37.831570 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:37 crc kubenswrapper[4881]: I1211 08:16:37.831596 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:37 crc kubenswrapper[4881]: I1211 08:16:37.831613 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:37Z","lastTransitionTime":"2025-12-11T08:16:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:37 crc kubenswrapper[4881]: I1211 08:16:37.935668 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:37 crc kubenswrapper[4881]: I1211 08:16:37.935727 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:37 crc kubenswrapper[4881]: I1211 08:16:37.935740 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:37 crc kubenswrapper[4881]: I1211 08:16:37.935760 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:37 crc kubenswrapper[4881]: I1211 08:16:37.935772 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:37Z","lastTransitionTime":"2025-12-11T08:16:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:38 crc kubenswrapper[4881]: I1211 08:16:38.005387 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 11 08:16:38 crc kubenswrapper[4881]: E1211 08:16:38.005615 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 11 08:16:38 crc kubenswrapper[4881]: I1211 08:16:38.039755 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:38 crc kubenswrapper[4881]: I1211 08:16:38.039922 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:38 crc kubenswrapper[4881]: I1211 08:16:38.039954 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:38 crc kubenswrapper[4881]: I1211 08:16:38.040008 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:38 crc kubenswrapper[4881]: I1211 08:16:38.040037 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:38Z","lastTransitionTime":"2025-12-11T08:16:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:38 crc kubenswrapper[4881]: I1211 08:16:38.143081 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:38 crc kubenswrapper[4881]: I1211 08:16:38.143137 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:38 crc kubenswrapper[4881]: I1211 08:16:38.143157 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:38 crc kubenswrapper[4881]: I1211 08:16:38.143182 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:38 crc kubenswrapper[4881]: I1211 08:16:38.143200 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:38Z","lastTransitionTime":"2025-12-11T08:16:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:38 crc kubenswrapper[4881]: I1211 08:16:38.246924 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:38 crc kubenswrapper[4881]: I1211 08:16:38.246977 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:38 crc kubenswrapper[4881]: I1211 08:16:38.246994 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:38 crc kubenswrapper[4881]: I1211 08:16:38.247018 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:38 crc kubenswrapper[4881]: I1211 08:16:38.247036 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:38Z","lastTransitionTime":"2025-12-11T08:16:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:38 crc kubenswrapper[4881]: I1211 08:16:38.350288 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:38 crc kubenswrapper[4881]: I1211 08:16:38.350389 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:38 crc kubenswrapper[4881]: I1211 08:16:38.350419 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:38 crc kubenswrapper[4881]: I1211 08:16:38.350451 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:38 crc kubenswrapper[4881]: I1211 08:16:38.350476 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:38Z","lastTransitionTime":"2025-12-11T08:16:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:38 crc kubenswrapper[4881]: I1211 08:16:38.453987 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:38 crc kubenswrapper[4881]: I1211 08:16:38.454027 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:38 crc kubenswrapper[4881]: I1211 08:16:38.454037 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:38 crc kubenswrapper[4881]: I1211 08:16:38.454053 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:38 crc kubenswrapper[4881]: I1211 08:16:38.454065 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:38Z","lastTransitionTime":"2025-12-11T08:16:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:38 crc kubenswrapper[4881]: I1211 08:16:38.557865 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:38 crc kubenswrapper[4881]: I1211 08:16:38.557929 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:38 crc kubenswrapper[4881]: I1211 08:16:38.557942 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:38 crc kubenswrapper[4881]: I1211 08:16:38.557962 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:38 crc kubenswrapper[4881]: I1211 08:16:38.557978 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:38Z","lastTransitionTime":"2025-12-11T08:16:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:38 crc kubenswrapper[4881]: I1211 08:16:38.661040 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:38 crc kubenswrapper[4881]: I1211 08:16:38.661127 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:38 crc kubenswrapper[4881]: I1211 08:16:38.661151 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:38 crc kubenswrapper[4881]: I1211 08:16:38.661210 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:38 crc kubenswrapper[4881]: I1211 08:16:38.661231 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:38Z","lastTransitionTime":"2025-12-11T08:16:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:38 crc kubenswrapper[4881]: I1211 08:16:38.764589 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:38 crc kubenswrapper[4881]: I1211 08:16:38.764662 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:38 crc kubenswrapper[4881]: I1211 08:16:38.764681 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:38 crc kubenswrapper[4881]: I1211 08:16:38.764706 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:38 crc kubenswrapper[4881]: I1211 08:16:38.764723 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:38Z","lastTransitionTime":"2025-12-11T08:16:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:38 crc kubenswrapper[4881]: I1211 08:16:38.867974 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:38 crc kubenswrapper[4881]: I1211 08:16:38.868053 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:38 crc kubenswrapper[4881]: I1211 08:16:38.868076 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:38 crc kubenswrapper[4881]: I1211 08:16:38.868106 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:38 crc kubenswrapper[4881]: I1211 08:16:38.868131 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:38Z","lastTransitionTime":"2025-12-11T08:16:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:38 crc kubenswrapper[4881]: I1211 08:16:38.972443 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:38 crc kubenswrapper[4881]: I1211 08:16:38.972515 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:38 crc kubenswrapper[4881]: I1211 08:16:38.972535 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:38 crc kubenswrapper[4881]: I1211 08:16:38.972561 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:38 crc kubenswrapper[4881]: I1211 08:16:38.972578 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:38Z","lastTransitionTime":"2025-12-11T08:16:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:38 crc kubenswrapper[4881]: I1211 08:16:38.973066 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/3a9b30f4-30b0-4b16-9a4a-135d2e7a9beb-metrics-certs\") pod \"network-metrics-daemon-bzslm\" (UID: \"3a9b30f4-30b0-4b16-9a4a-135d2e7a9beb\") " pod="openshift-multus/network-metrics-daemon-bzslm" Dec 11 08:16:38 crc kubenswrapper[4881]: E1211 08:16:38.973236 4881 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Dec 11 08:16:38 crc kubenswrapper[4881]: E1211 08:16:38.973381 4881 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/3a9b30f4-30b0-4b16-9a4a-135d2e7a9beb-metrics-certs podName:3a9b30f4-30b0-4b16-9a4a-135d2e7a9beb nodeName:}" failed. No retries permitted until 2025-12-11 08:16:42.973317156 +0000 UTC m=+51.350685883 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/3a9b30f4-30b0-4b16-9a4a-135d2e7a9beb-metrics-certs") pod "network-metrics-daemon-bzslm" (UID: "3a9b30f4-30b0-4b16-9a4a-135d2e7a9beb") : object "openshift-multus"/"metrics-daemon-secret" not registered Dec 11 08:16:39 crc kubenswrapper[4881]: I1211 08:16:39.004696 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bzslm" Dec 11 08:16:39 crc kubenswrapper[4881]: I1211 08:16:39.004696 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 11 08:16:39 crc kubenswrapper[4881]: E1211 08:16:39.004911 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bzslm" podUID="3a9b30f4-30b0-4b16-9a4a-135d2e7a9beb" Dec 11 08:16:39 crc kubenswrapper[4881]: E1211 08:16:39.005004 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 11 08:16:39 crc kubenswrapper[4881]: I1211 08:16:39.005614 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 11 08:16:39 crc kubenswrapper[4881]: E1211 08:16:39.006164 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 11 08:16:39 crc kubenswrapper[4881]: I1211 08:16:39.076283 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:39 crc kubenswrapper[4881]: I1211 08:16:39.076357 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:39 crc kubenswrapper[4881]: I1211 08:16:39.076372 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:39 crc kubenswrapper[4881]: I1211 08:16:39.076398 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:39 crc kubenswrapper[4881]: I1211 08:16:39.076410 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:39Z","lastTransitionTime":"2025-12-11T08:16:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:39 crc kubenswrapper[4881]: I1211 08:16:39.180188 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:39 crc kubenswrapper[4881]: I1211 08:16:39.180261 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:39 crc kubenswrapper[4881]: I1211 08:16:39.180285 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:39 crc kubenswrapper[4881]: I1211 08:16:39.180374 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:39 crc kubenswrapper[4881]: I1211 08:16:39.180414 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:39Z","lastTransitionTime":"2025-12-11T08:16:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:39 crc kubenswrapper[4881]: I1211 08:16:39.284614 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:39 crc kubenswrapper[4881]: I1211 08:16:39.284682 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:39 crc kubenswrapper[4881]: I1211 08:16:39.284709 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:39 crc kubenswrapper[4881]: I1211 08:16:39.284742 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:39 crc kubenswrapper[4881]: I1211 08:16:39.284766 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:39Z","lastTransitionTime":"2025-12-11T08:16:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:39 crc kubenswrapper[4881]: I1211 08:16:39.388585 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:39 crc kubenswrapper[4881]: I1211 08:16:39.389078 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:39 crc kubenswrapper[4881]: I1211 08:16:39.389192 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:39 crc kubenswrapper[4881]: I1211 08:16:39.389292 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:39 crc kubenswrapper[4881]: I1211 08:16:39.389409 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:39Z","lastTransitionTime":"2025-12-11T08:16:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:39 crc kubenswrapper[4881]: I1211 08:16:39.492428 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:39 crc kubenswrapper[4881]: I1211 08:16:39.492543 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:39 crc kubenswrapper[4881]: I1211 08:16:39.492555 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:39 crc kubenswrapper[4881]: I1211 08:16:39.492574 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:39 crc kubenswrapper[4881]: I1211 08:16:39.492589 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:39Z","lastTransitionTime":"2025-12-11T08:16:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:39 crc kubenswrapper[4881]: I1211 08:16:39.595151 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:39 crc kubenswrapper[4881]: I1211 08:16:39.595200 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:39 crc kubenswrapper[4881]: I1211 08:16:39.595215 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:39 crc kubenswrapper[4881]: I1211 08:16:39.595233 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:39 crc kubenswrapper[4881]: I1211 08:16:39.595245 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:39Z","lastTransitionTime":"2025-12-11T08:16:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:39 crc kubenswrapper[4881]: I1211 08:16:39.698515 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:39 crc kubenswrapper[4881]: I1211 08:16:39.698573 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:39 crc kubenswrapper[4881]: I1211 08:16:39.698584 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:39 crc kubenswrapper[4881]: I1211 08:16:39.698602 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:39 crc kubenswrapper[4881]: I1211 08:16:39.698613 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:39Z","lastTransitionTime":"2025-12-11T08:16:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:39 crc kubenswrapper[4881]: I1211 08:16:39.801876 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:39 crc kubenswrapper[4881]: I1211 08:16:39.802117 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:39 crc kubenswrapper[4881]: I1211 08:16:39.802208 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:39 crc kubenswrapper[4881]: I1211 08:16:39.802296 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:39 crc kubenswrapper[4881]: I1211 08:16:39.802382 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:39Z","lastTransitionTime":"2025-12-11T08:16:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:39 crc kubenswrapper[4881]: I1211 08:16:39.904811 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:39 crc kubenswrapper[4881]: I1211 08:16:39.904895 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:39 crc kubenswrapper[4881]: I1211 08:16:39.904922 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:39 crc kubenswrapper[4881]: I1211 08:16:39.904955 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:39 crc kubenswrapper[4881]: I1211 08:16:39.904977 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:39Z","lastTransitionTime":"2025-12-11T08:16:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:40 crc kubenswrapper[4881]: I1211 08:16:40.004709 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 11 08:16:40 crc kubenswrapper[4881]: E1211 08:16:40.004910 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 11 08:16:40 crc kubenswrapper[4881]: I1211 08:16:40.007924 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:40 crc kubenswrapper[4881]: I1211 08:16:40.008029 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:40 crc kubenswrapper[4881]: I1211 08:16:40.008062 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:40 crc kubenswrapper[4881]: I1211 08:16:40.008093 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:40 crc kubenswrapper[4881]: I1211 08:16:40.008121 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:40Z","lastTransitionTime":"2025-12-11T08:16:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:40 crc kubenswrapper[4881]: I1211 08:16:40.114392 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:40 crc kubenswrapper[4881]: I1211 08:16:40.114469 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:40 crc kubenswrapper[4881]: I1211 08:16:40.114484 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:40 crc kubenswrapper[4881]: I1211 08:16:40.114605 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:40 crc kubenswrapper[4881]: I1211 08:16:40.114624 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:40Z","lastTransitionTime":"2025-12-11T08:16:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:40 crc kubenswrapper[4881]: I1211 08:16:40.218686 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:40 crc kubenswrapper[4881]: I1211 08:16:40.218772 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:40 crc kubenswrapper[4881]: I1211 08:16:40.218793 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:40 crc kubenswrapper[4881]: I1211 08:16:40.218827 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:40 crc kubenswrapper[4881]: I1211 08:16:40.218850 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:40Z","lastTransitionTime":"2025-12-11T08:16:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:40 crc kubenswrapper[4881]: I1211 08:16:40.322424 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:40 crc kubenswrapper[4881]: I1211 08:16:40.322462 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:40 crc kubenswrapper[4881]: I1211 08:16:40.322472 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:40 crc kubenswrapper[4881]: I1211 08:16:40.322512 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:40 crc kubenswrapper[4881]: I1211 08:16:40.322529 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:40Z","lastTransitionTime":"2025-12-11T08:16:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:40 crc kubenswrapper[4881]: I1211 08:16:40.426095 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:40 crc kubenswrapper[4881]: I1211 08:16:40.426187 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:40 crc kubenswrapper[4881]: I1211 08:16:40.426202 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:40 crc kubenswrapper[4881]: I1211 08:16:40.426221 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:40 crc kubenswrapper[4881]: I1211 08:16:40.426236 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:40Z","lastTransitionTime":"2025-12-11T08:16:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:40 crc kubenswrapper[4881]: I1211 08:16:40.529410 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:40 crc kubenswrapper[4881]: I1211 08:16:40.529487 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:40 crc kubenswrapper[4881]: I1211 08:16:40.529504 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:40 crc kubenswrapper[4881]: I1211 08:16:40.529531 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:40 crc kubenswrapper[4881]: I1211 08:16:40.529549 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:40Z","lastTransitionTime":"2025-12-11T08:16:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:40 crc kubenswrapper[4881]: I1211 08:16:40.633441 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:40 crc kubenswrapper[4881]: I1211 08:16:40.633627 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:40 crc kubenswrapper[4881]: I1211 08:16:40.633645 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:40 crc kubenswrapper[4881]: I1211 08:16:40.633675 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:40 crc kubenswrapper[4881]: I1211 08:16:40.633706 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:40Z","lastTransitionTime":"2025-12-11T08:16:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:40 crc kubenswrapper[4881]: I1211 08:16:40.736764 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:40 crc kubenswrapper[4881]: I1211 08:16:40.736840 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:40 crc kubenswrapper[4881]: I1211 08:16:40.736863 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:40 crc kubenswrapper[4881]: I1211 08:16:40.736892 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:40 crc kubenswrapper[4881]: I1211 08:16:40.736910 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:40Z","lastTransitionTime":"2025-12-11T08:16:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:40 crc kubenswrapper[4881]: I1211 08:16:40.839631 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:40 crc kubenswrapper[4881]: I1211 08:16:40.839688 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:40 crc kubenswrapper[4881]: I1211 08:16:40.839697 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:40 crc kubenswrapper[4881]: I1211 08:16:40.839713 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:40 crc kubenswrapper[4881]: I1211 08:16:40.839723 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:40Z","lastTransitionTime":"2025-12-11T08:16:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:40 crc kubenswrapper[4881]: I1211 08:16:40.942812 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:40 crc kubenswrapper[4881]: I1211 08:16:40.942896 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:40 crc kubenswrapper[4881]: I1211 08:16:40.942920 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:40 crc kubenswrapper[4881]: I1211 08:16:40.942952 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:40 crc kubenswrapper[4881]: I1211 08:16:40.942975 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:40Z","lastTransitionTime":"2025-12-11T08:16:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:41 crc kubenswrapper[4881]: I1211 08:16:41.005699 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bzslm" Dec 11 08:16:41 crc kubenswrapper[4881]: I1211 08:16:41.005757 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 11 08:16:41 crc kubenswrapper[4881]: E1211 08:16:41.006023 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bzslm" podUID="3a9b30f4-30b0-4b16-9a4a-135d2e7a9beb" Dec 11 08:16:41 crc kubenswrapper[4881]: I1211 08:16:41.006050 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 11 08:16:41 crc kubenswrapper[4881]: E1211 08:16:41.006285 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 11 08:16:41 crc kubenswrapper[4881]: E1211 08:16:41.006449 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 11 08:16:41 crc kubenswrapper[4881]: I1211 08:16:41.045858 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:41 crc kubenswrapper[4881]: I1211 08:16:41.045935 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:41 crc kubenswrapper[4881]: I1211 08:16:41.045947 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:41 crc kubenswrapper[4881]: I1211 08:16:41.045974 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:41 crc kubenswrapper[4881]: I1211 08:16:41.045988 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:41Z","lastTransitionTime":"2025-12-11T08:16:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:41 crc kubenswrapper[4881]: I1211 08:16:41.149638 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:41 crc kubenswrapper[4881]: I1211 08:16:41.149790 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:41 crc kubenswrapper[4881]: I1211 08:16:41.149804 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:41 crc kubenswrapper[4881]: I1211 08:16:41.149829 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:41 crc kubenswrapper[4881]: I1211 08:16:41.149851 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:41Z","lastTransitionTime":"2025-12-11T08:16:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:41 crc kubenswrapper[4881]: I1211 08:16:41.253395 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:41 crc kubenswrapper[4881]: I1211 08:16:41.253486 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:41 crc kubenswrapper[4881]: I1211 08:16:41.253503 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:41 crc kubenswrapper[4881]: I1211 08:16:41.253529 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:41 crc kubenswrapper[4881]: I1211 08:16:41.253549 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:41Z","lastTransitionTime":"2025-12-11T08:16:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:41 crc kubenswrapper[4881]: I1211 08:16:41.356679 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:41 crc kubenswrapper[4881]: I1211 08:16:41.356730 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:41 crc kubenswrapper[4881]: I1211 08:16:41.356738 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:41 crc kubenswrapper[4881]: I1211 08:16:41.356755 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:41 crc kubenswrapper[4881]: I1211 08:16:41.356768 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:41Z","lastTransitionTime":"2025-12-11T08:16:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:41 crc kubenswrapper[4881]: I1211 08:16:41.460118 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:41 crc kubenswrapper[4881]: I1211 08:16:41.460190 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:41 crc kubenswrapper[4881]: I1211 08:16:41.460209 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:41 crc kubenswrapper[4881]: I1211 08:16:41.460235 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:41 crc kubenswrapper[4881]: I1211 08:16:41.460259 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:41Z","lastTransitionTime":"2025-12-11T08:16:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:41 crc kubenswrapper[4881]: I1211 08:16:41.564260 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:41 crc kubenswrapper[4881]: I1211 08:16:41.564363 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:41 crc kubenswrapper[4881]: I1211 08:16:41.564382 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:41 crc kubenswrapper[4881]: I1211 08:16:41.564413 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:41 crc kubenswrapper[4881]: I1211 08:16:41.564434 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:41Z","lastTransitionTime":"2025-12-11T08:16:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:41 crc kubenswrapper[4881]: I1211 08:16:41.668763 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:41 crc kubenswrapper[4881]: I1211 08:16:41.668850 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:41 crc kubenswrapper[4881]: I1211 08:16:41.668872 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:41 crc kubenswrapper[4881]: I1211 08:16:41.668916 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:41 crc kubenswrapper[4881]: I1211 08:16:41.668937 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:41Z","lastTransitionTime":"2025-12-11T08:16:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:41 crc kubenswrapper[4881]: I1211 08:16:41.773495 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:41 crc kubenswrapper[4881]: I1211 08:16:41.773577 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:41 crc kubenswrapper[4881]: I1211 08:16:41.773595 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:41 crc kubenswrapper[4881]: I1211 08:16:41.773622 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:41 crc kubenswrapper[4881]: I1211 08:16:41.773646 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:41Z","lastTransitionTime":"2025-12-11T08:16:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:41 crc kubenswrapper[4881]: I1211 08:16:41.877001 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:41 crc kubenswrapper[4881]: I1211 08:16:41.877053 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:41 crc kubenswrapper[4881]: I1211 08:16:41.877070 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:41 crc kubenswrapper[4881]: I1211 08:16:41.877095 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:41 crc kubenswrapper[4881]: I1211 08:16:41.877113 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:41Z","lastTransitionTime":"2025-12-11T08:16:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:41 crc kubenswrapper[4881]: I1211 08:16:41.980415 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:41 crc kubenswrapper[4881]: I1211 08:16:41.980480 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:41 crc kubenswrapper[4881]: I1211 08:16:41.980490 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:41 crc kubenswrapper[4881]: I1211 08:16:41.980511 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:41 crc kubenswrapper[4881]: I1211 08:16:41.980527 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:41Z","lastTransitionTime":"2025-12-11T08:16:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:42 crc kubenswrapper[4881]: I1211 08:16:42.004896 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 11 08:16:42 crc kubenswrapper[4881]: E1211 08:16:42.005200 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 11 08:16:42 crc kubenswrapper[4881]: I1211 08:16:42.083947 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:42 crc kubenswrapper[4881]: I1211 08:16:42.084032 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:42 crc kubenswrapper[4881]: I1211 08:16:42.084074 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:42 crc kubenswrapper[4881]: I1211 08:16:42.084109 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:42 crc kubenswrapper[4881]: I1211 08:16:42.084154 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:42Z","lastTransitionTime":"2025-12-11T08:16:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:42 crc kubenswrapper[4881]: I1211 08:16:42.187102 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:42 crc kubenswrapper[4881]: I1211 08:16:42.187146 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:42 crc kubenswrapper[4881]: I1211 08:16:42.187158 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:42 crc kubenswrapper[4881]: I1211 08:16:42.187175 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:42 crc kubenswrapper[4881]: I1211 08:16:42.187187 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:42Z","lastTransitionTime":"2025-12-11T08:16:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:42 crc kubenswrapper[4881]: I1211 08:16:42.290782 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:42 crc kubenswrapper[4881]: I1211 08:16:42.290832 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:42 crc kubenswrapper[4881]: I1211 08:16:42.290848 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:42 crc kubenswrapper[4881]: I1211 08:16:42.290871 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:42 crc kubenswrapper[4881]: I1211 08:16:42.290887 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:42Z","lastTransitionTime":"2025-12-11T08:16:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:42 crc kubenswrapper[4881]: I1211 08:16:42.393773 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:42 crc kubenswrapper[4881]: I1211 08:16:42.393830 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:42 crc kubenswrapper[4881]: I1211 08:16:42.393853 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:42 crc kubenswrapper[4881]: I1211 08:16:42.393874 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:42 crc kubenswrapper[4881]: I1211 08:16:42.393888 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:42Z","lastTransitionTime":"2025-12-11T08:16:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:42 crc kubenswrapper[4881]: I1211 08:16:42.496414 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:42 crc kubenswrapper[4881]: I1211 08:16:42.496476 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:42 crc kubenswrapper[4881]: I1211 08:16:42.496491 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:42 crc kubenswrapper[4881]: I1211 08:16:42.496513 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:42 crc kubenswrapper[4881]: I1211 08:16:42.496528 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:42Z","lastTransitionTime":"2025-12-11T08:16:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:42 crc kubenswrapper[4881]: I1211 08:16:42.599993 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:42 crc kubenswrapper[4881]: I1211 08:16:42.600071 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:42 crc kubenswrapper[4881]: I1211 08:16:42.600133 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:42 crc kubenswrapper[4881]: I1211 08:16:42.600165 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:42 crc kubenswrapper[4881]: I1211 08:16:42.600187 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:42Z","lastTransitionTime":"2025-12-11T08:16:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:42 crc kubenswrapper[4881]: I1211 08:16:42.702610 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:42 crc kubenswrapper[4881]: I1211 08:16:42.702680 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:42 crc kubenswrapper[4881]: I1211 08:16:42.702705 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:42 crc kubenswrapper[4881]: I1211 08:16:42.702750 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:42 crc kubenswrapper[4881]: I1211 08:16:42.702773 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:42Z","lastTransitionTime":"2025-12-11T08:16:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:42 crc kubenswrapper[4881]: I1211 08:16:42.805794 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:42 crc kubenswrapper[4881]: I1211 08:16:42.805850 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:42 crc kubenswrapper[4881]: I1211 08:16:42.805867 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:42 crc kubenswrapper[4881]: I1211 08:16:42.805891 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:42 crc kubenswrapper[4881]: I1211 08:16:42.805908 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:42Z","lastTransitionTime":"2025-12-11T08:16:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:42 crc kubenswrapper[4881]: I1211 08:16:42.909582 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:42 crc kubenswrapper[4881]: I1211 08:16:42.909652 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:42 crc kubenswrapper[4881]: I1211 08:16:42.909671 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:42 crc kubenswrapper[4881]: I1211 08:16:42.909701 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:42 crc kubenswrapper[4881]: I1211 08:16:42.909722 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:42Z","lastTransitionTime":"2025-12-11T08:16:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:43 crc kubenswrapper[4881]: I1211 08:16:43.005714 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 11 08:16:43 crc kubenswrapper[4881]: I1211 08:16:43.006045 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bzslm" Dec 11 08:16:43 crc kubenswrapper[4881]: I1211 08:16:43.005923 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 11 08:16:43 crc kubenswrapper[4881]: E1211 08:16:43.006907 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bzslm" podUID="3a9b30f4-30b0-4b16-9a4a-135d2e7a9beb" Dec 11 08:16:43 crc kubenswrapper[4881]: E1211 08:16:43.007040 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 11 08:16:43 crc kubenswrapper[4881]: E1211 08:16:43.007142 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 11 08:16:43 crc kubenswrapper[4881]: I1211 08:16:43.012154 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:43 crc kubenswrapper[4881]: I1211 08:16:43.012382 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:43 crc kubenswrapper[4881]: I1211 08:16:43.012405 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:43 crc kubenswrapper[4881]: I1211 08:16:43.012434 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:43 crc kubenswrapper[4881]: I1211 08:16:43.012453 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:43Z","lastTransitionTime":"2025-12-11T08:16:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:43 crc kubenswrapper[4881]: I1211 08:16:43.023240 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/3a9b30f4-30b0-4b16-9a4a-135d2e7a9beb-metrics-certs\") pod \"network-metrics-daemon-bzslm\" (UID: \"3a9b30f4-30b0-4b16-9a4a-135d2e7a9beb\") " pod="openshift-multus/network-metrics-daemon-bzslm" Dec 11 08:16:43 crc kubenswrapper[4881]: E1211 08:16:43.023505 4881 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Dec 11 08:16:43 crc kubenswrapper[4881]: E1211 08:16:43.023593 4881 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/3a9b30f4-30b0-4b16-9a4a-135d2e7a9beb-metrics-certs podName:3a9b30f4-30b0-4b16-9a4a-135d2e7a9beb nodeName:}" failed. No retries permitted until 2025-12-11 08:16:51.023561692 +0000 UTC m=+59.400930389 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/3a9b30f4-30b0-4b16-9a4a-135d2e7a9beb-metrics-certs") pod "network-metrics-daemon-bzslm" (UID: "3a9b30f4-30b0-4b16-9a4a-135d2e7a9beb") : object "openshift-multus"/"metrics-daemon-secret" not registered Dec 11 08:16:43 crc kubenswrapper[4881]: I1211 08:16:43.026918 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"712c62b1-8ccd-4aa3-bcf9-678e361454ec\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f4bc17ebdc46bdf12b24515e7a055043646bcea98fe47ea7f5c44e679d02bde9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://daf982782f40db65e6a33e1e1bcfd01493d5a385c52013546d38d921ac4dcad3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3ceeda38e245a549205af6a7f2468c682ca1420574f2d3b375dfba7d93a3c669\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://204b876ced5657df64e1a6074ba7ddbb3e286df0e12630d709ff64d2905b8487\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ff7ad52ec7f42bde82d0f5b9a6ef051a56c37004bb4172dbc504afde1e5f69bf\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-11T08:16:03Z\\\",\\\"message\\\":\\\"W1211 08:16:02.420586 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1211 08:16:02.420959 1 crypto.go:601] Generating new CA for check-endpoints-signer@1765440962 cert, and key in /tmp/serving-cert-2611071911/serving-signer.crt, /tmp/serving-cert-2611071911/serving-signer.key\\\\nI1211 08:16:03.011861 1 observer_polling.go:159] Starting file observer\\\\nW1211 08:16:03.024183 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1211 08:16:03.024367 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1211 08:16:03.024984 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2611071911/tls.crt::/tmp/serving-cert-2611071911/tls.key\\\\\\\"\\\\nF1211 08:16:03.405647 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:00Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6735b5cd08055deeae614855e5de8a056b276c8006a16c45f2a0fce97da172cb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:57Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70fb9b6123864b3652eabbb833b18032659a0c038f4e5857e7363a3776022396\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://70fb9b6123864b3652eabbb833b18032659a0c038f4e5857e7363a3776022396\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:15:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:15:53Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:43Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:43 crc kubenswrapper[4881]: I1211 08:16:43.042485 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://02d46e407feeba96230ff44b31f513e09434cdc3378124322398587ebda11b4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:43Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:43 crc kubenswrapper[4881]: I1211 08:16:43.063176 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:43Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:43 crc kubenswrapper[4881]: I1211 08:16:43.075984 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:20Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:20Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://35b9fdb7a609d6a955abda7e49e7bceec55811d33e14295ba7258c0e085cc517\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:43Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:43 crc kubenswrapper[4881]: I1211 08:16:43.099471 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wf8q8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f14cc110-e74f-4cb7-a998-041e3f9b537b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3fece2162ea78bf051c94f0f8adb0b194d05e5d3c5c25cb4e3674096d1204079\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3715d1eacbf7a843b7a1a0e83dee159ddbe5bf62812e94bf5f74f4677da2617b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c23a70dbe6b32be7c8a0c3799f7dd2323f0dcc86d7d58ee3e140cda4ffbb03f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6d8574de8e2431b2aca7954b9a0498e353c00d982b4384c1f198b2099fe527a6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5a0cb039f4c38da8b633dfcd2e0d23afbc3607b41f04d3c230aee5f86cba79b5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5b8e9dda68edf7ec8848da4eb851807ad0dc10a086fb6129e3a22f9a35e368ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://58ff0c7d416fda4200920825cedb24d309b747147b3e209166f99140da0e11d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0c9d06c04708f285b468dcbb0b0406b77e16ebe289cefb4cb0fb5d9dc84e3dc1\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-11T08:16:31Z\\\",\\\"message\\\":\\\" (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1211 08:16:31.679255 6216 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1211 08:16:31.679430 6216 reflector.go:311] Stopping reflector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1211 08:16:31.679547 6216 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1211 08:16:31.679591 6216 reflector.go:311] Stopping reflector *v1.EgressIP (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressip/v1/apis/informers/externalversions/factory.go:140\\\\nI1211 08:16:31.679687 6216 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1211 08:16:31.679942 6216 reflector.go:311] Stopping reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1211 08:16:31.680464 6216 reflector.go:311] Stopping reflector *v1.Pod (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1211 08:16:31.680515 6216 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1211 08:16:31.680865 6216 factory.go:656] Stopping \\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:27Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://58ff0c7d416fda4200920825cedb24d309b747147b3e209166f99140da0e11d4\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-11T08:16:34Z\\\",\\\"message\\\":\\\".network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:33Z is after 2025-08-24T17:21:41Z]\\\\nI1211 08:16:33.855251 6356 services_controller.go:473] Services do not match for network=default, existing lbs: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-service-ca-operator/metrics_TCP_cluster\\\\\\\", UUID:\\\\\\\"2a3fb1a3-a476-4e14-bcf5-fb79af60206a\\\\\\\", Protocol:\\\\\\\"tcp\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-service-ca-operator/metrics\\\\\\\"}, Opts:services.LBOpts{Reject:false, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{}, Templates:services.TemplateMap{}, Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}, built lbs: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-service-ca-operator/metrics_TCP_cluster\\\\\\\", UUID:\\\\\\\"\\\\\\\", Protocol:\\\\\\\"TCP\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-service-ca-operator/metrics\\\\\\\"}, Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.4.\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://88bd335385bc696076b9b2719bfbd27b294003397c545f24ee2c50cd854fcc28\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1794069e7175bc0ce9dc315807ad766fd28fb46e11ed6e7addc3199cccaea63a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1794069e7175bc0ce9dc315807ad766fd28fb46e11ed6e7addc3199cccaea63a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:21Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-wf8q8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:43Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:43 crc kubenswrapper[4881]: I1211 08:16:43.111669 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:43Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:43 crc kubenswrapper[4881]: I1211 08:16:43.115099 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:43 crc kubenswrapper[4881]: I1211 08:16:43.115135 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:43 crc kubenswrapper[4881]: I1211 08:16:43.115146 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:43 crc kubenswrapper[4881]: I1211 08:16:43.115164 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:43 crc kubenswrapper[4881]: I1211 08:16:43.115178 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:43Z","lastTransitionTime":"2025-12-11T08:16:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:43 crc kubenswrapper[4881]: I1211 08:16:43.123277 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6ec4e8a28a722463111eeb961e6e80d1f8ef1b776c40fc212a2ca5aaf0c43b63\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6eff6a04afb607c6248352793d1205ae80c194a77222a234d586925a8ca8cd00\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:43Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:43 crc kubenswrapper[4881]: I1211 08:16:43.137694 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:43Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:43 crc kubenswrapper[4881]: I1211 08:16:43.152504 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-g8jhd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"368e635e-0e63-4202-b9e4-4a3a85c6f30c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f17445543c4ae20340b00c834ecfa381b0d9624196a6da869ea823ee99df132f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zrsnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-g8jhd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:43Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:43 crc kubenswrapper[4881]: I1211 08:16:43.171628 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-cwhxk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"afd0cc21-e31c-47c9-a598-cd93dde96121\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0f0031f6a7c33c9d1c538ec8384083ab5bc7b7fcda9c01c8e5c922a7e375a7e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://44d5ca755969b2434115259f141a9f8f17da8e0c1bef5b61c48475e09dc91601\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://44d5ca755969b2434115259f141a9f8f17da8e0c1bef5b61c48475e09dc91601\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://18c881494d5238565dfdb69e61ecdebb725f60a14c0c651e0bcc65cb2aff297a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://18c881494d5238565dfdb69e61ecdebb725f60a14c0c651e0bcc65cb2aff297a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a13ad4683fb404bf7d973528489e3c39e33c2991d86a80875dab067023626584\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a13ad4683fb404bf7d973528489e3c39e33c2991d86a80875dab067023626584\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b84c7832c1d1df15202c858e63e3d97319f2014f413f98a9d332a3f89a11210\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b84c7832c1d1df15202c858e63e3d97319f2014f413f98a9d332a3f89a11210\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb6df865673b8073f0fa13525833ae978ff17856a9b4149d549e177bc5d9a494\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bb6df865673b8073f0fa13525833ae978ff17856a9b4149d549e177bc5d9a494\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8a6eb6e7ccd64f8d181fe104b44a4299f9b28d0409e46175da23144ef88e4129\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8a6eb6e7ccd64f8d181fe104b44a4299f9b28d0409e46175da23144ef88e4129\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-cwhxk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:43Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:43 crc kubenswrapper[4881]: I1211 08:16:43.188071 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"56d69133-af36-4cbd-af7d-3a58cc4dd8ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b73c9948845a28d7c3cb81e94d962a4f11ac061f4108a18f46d451a554e9adb7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7rwh4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b0321657968185975569a4bf8037ab956988b13493feec0be9c439fbe6c20707\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7rwh4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:21Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-z9nnh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:43Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:43 crc kubenswrapper[4881]: I1211 08:16:43.205294 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-kqw5r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"71774618-27c8-499d-83d9-e88693c86758\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://136c88f0cab9d7a47a8c5925592e8cc09b62e515258829cc096df9ad3bb30615\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6btfk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c48e4553e3eb00f1e725dd9cacf2b25360544772ef82d6846361a4b7c44c6244\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6btfk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-kqw5r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:43Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:43 crc kubenswrapper[4881]: I1211 08:16:43.217663 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:43 crc kubenswrapper[4881]: I1211 08:16:43.217706 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:43 crc kubenswrapper[4881]: I1211 08:16:43.217717 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:43 crc kubenswrapper[4881]: I1211 08:16:43.217737 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:43 crc kubenswrapper[4881]: I1211 08:16:43.217751 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:43Z","lastTransitionTime":"2025-12-11T08:16:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:43 crc kubenswrapper[4881]: I1211 08:16:43.221920 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-bzslm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3a9b30f4-30b0-4b16-9a4a-135d2e7a9beb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:35Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:35Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:35Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-k86hj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-k86hj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:35Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-bzslm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:43Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:43 crc kubenswrapper[4881]: I1211 08:16:43.243804 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e233ddb-7c1f-4b3d-a111-de8cdc7ccd18\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://221f8c696ca3c5ab5dc2025890b5279fd406a8dfdd4ea482bb12f0c5d304255d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3dec421d26a9373c52bf2fb9434cc3f3b373f409b1d86ec1effca7534acb7262\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7cb4176d384609eb549c9f00057256bd34053ca3eca229c20cd250b93f87f910\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://91541795f1dd997a0d977904a6a3cbeae8b66b8bb57cde8dac344d3703352e1a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:15:53Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:43Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:43 crc kubenswrapper[4881]: I1211 08:16:43.260695 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-847k7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1711214f-683a-4a2e-b9b8-f3fd2dd6dbc2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0ad11e04da8a6d33e46ca8c5f28be920d52c7c2ce22b4539fc8b7b0b85edb425\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2txkz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:20Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-847k7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:43Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:43 crc kubenswrapper[4881]: I1211 08:16:43.277619 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-jzsv5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aacce2d2-7fd3-439a-b46b-51858d3de240\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6277956e6d4873b4105ce03e358e2857371feead7eb74e2b7412ad7ed4de486\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tlbfj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:24Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-jzsv5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:43Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:43 crc kubenswrapper[4881]: I1211 08:16:43.321140 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:43 crc kubenswrapper[4881]: I1211 08:16:43.321194 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:43 crc kubenswrapper[4881]: I1211 08:16:43.321213 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:43 crc kubenswrapper[4881]: I1211 08:16:43.321238 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:43 crc kubenswrapper[4881]: I1211 08:16:43.321257 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:43Z","lastTransitionTime":"2025-12-11T08:16:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:43 crc kubenswrapper[4881]: I1211 08:16:43.425036 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:43 crc kubenswrapper[4881]: I1211 08:16:43.425098 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:43 crc kubenswrapper[4881]: I1211 08:16:43.425112 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:43 crc kubenswrapper[4881]: I1211 08:16:43.425133 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:43 crc kubenswrapper[4881]: I1211 08:16:43.425148 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:43Z","lastTransitionTime":"2025-12-11T08:16:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:43 crc kubenswrapper[4881]: I1211 08:16:43.528369 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:43 crc kubenswrapper[4881]: I1211 08:16:43.528434 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:43 crc kubenswrapper[4881]: I1211 08:16:43.528454 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:43 crc kubenswrapper[4881]: I1211 08:16:43.528482 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:43 crc kubenswrapper[4881]: I1211 08:16:43.528504 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:43Z","lastTransitionTime":"2025-12-11T08:16:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:43 crc kubenswrapper[4881]: I1211 08:16:43.632087 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:43 crc kubenswrapper[4881]: I1211 08:16:43.632146 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:43 crc kubenswrapper[4881]: I1211 08:16:43.632163 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:43 crc kubenswrapper[4881]: I1211 08:16:43.632186 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:43 crc kubenswrapper[4881]: I1211 08:16:43.632203 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:43Z","lastTransitionTime":"2025-12-11T08:16:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:43 crc kubenswrapper[4881]: I1211 08:16:43.643895 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Dec 11 08:16:43 crc kubenswrapper[4881]: I1211 08:16:43.655294 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-scheduler/openshift-kube-scheduler-crc"] Dec 11 08:16:43 crc kubenswrapper[4881]: I1211 08:16:43.660833 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://02d46e407feeba96230ff44b31f513e09434cdc3378124322398587ebda11b4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:43Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:43 crc kubenswrapper[4881]: I1211 08:16:43.676783 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:43Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:43 crc kubenswrapper[4881]: I1211 08:16:43.695394 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:20Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:20Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://35b9fdb7a609d6a955abda7e49e7bceec55811d33e14295ba7258c0e085cc517\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:43Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:43 crc kubenswrapper[4881]: I1211 08:16:43.725102 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wf8q8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f14cc110-e74f-4cb7-a998-041e3f9b537b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3fece2162ea78bf051c94f0f8adb0b194d05e5d3c5c25cb4e3674096d1204079\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3715d1eacbf7a843b7a1a0e83dee159ddbe5bf62812e94bf5f74f4677da2617b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c23a70dbe6b32be7c8a0c3799f7dd2323f0dcc86d7d58ee3e140cda4ffbb03f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6d8574de8e2431b2aca7954b9a0498e353c00d982b4384c1f198b2099fe527a6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5a0cb039f4c38da8b633dfcd2e0d23afbc3607b41f04d3c230aee5f86cba79b5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5b8e9dda68edf7ec8848da4eb851807ad0dc10a086fb6129e3a22f9a35e368ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://58ff0c7d416fda4200920825cedb24d309b747147b3e209166f99140da0e11d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0c9d06c04708f285b468dcbb0b0406b77e16ebe289cefb4cb0fb5d9dc84e3dc1\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-11T08:16:31Z\\\",\\\"message\\\":\\\" (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1211 08:16:31.679255 6216 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1211 08:16:31.679430 6216 reflector.go:311] Stopping reflector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1211 08:16:31.679547 6216 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1211 08:16:31.679591 6216 reflector.go:311] Stopping reflector *v1.EgressIP (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressip/v1/apis/informers/externalversions/factory.go:140\\\\nI1211 08:16:31.679687 6216 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1211 08:16:31.679942 6216 reflector.go:311] Stopping reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1211 08:16:31.680464 6216 reflector.go:311] Stopping reflector *v1.Pod (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1211 08:16:31.680515 6216 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1211 08:16:31.680865 6216 factory.go:656] Stopping \\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:27Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://58ff0c7d416fda4200920825cedb24d309b747147b3e209166f99140da0e11d4\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-11T08:16:34Z\\\",\\\"message\\\":\\\".network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:33Z is after 2025-08-24T17:21:41Z]\\\\nI1211 08:16:33.855251 6356 services_controller.go:473] Services do not match for network=default, existing lbs: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-service-ca-operator/metrics_TCP_cluster\\\\\\\", UUID:\\\\\\\"2a3fb1a3-a476-4e14-bcf5-fb79af60206a\\\\\\\", Protocol:\\\\\\\"tcp\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-service-ca-operator/metrics\\\\\\\"}, Opts:services.LBOpts{Reject:false, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{}, Templates:services.TemplateMap{}, Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}, built lbs: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-service-ca-operator/metrics_TCP_cluster\\\\\\\", UUID:\\\\\\\"\\\\\\\", Protocol:\\\\\\\"TCP\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-service-ca-operator/metrics\\\\\\\"}, Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.4.\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://88bd335385bc696076b9b2719bfbd27b294003397c545f24ee2c50cd854fcc28\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1794069e7175bc0ce9dc315807ad766fd28fb46e11ed6e7addc3199cccaea63a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1794069e7175bc0ce9dc315807ad766fd28fb46e11ed6e7addc3199cccaea63a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:21Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-wf8q8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:43Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:43 crc kubenswrapper[4881]: I1211 08:16:43.736281 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:43 crc kubenswrapper[4881]: I1211 08:16:43.736328 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:43 crc kubenswrapper[4881]: I1211 08:16:43.736369 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:43 crc kubenswrapper[4881]: I1211 08:16:43.736389 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:43 crc kubenswrapper[4881]: I1211 08:16:43.736401 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:43Z","lastTransitionTime":"2025-12-11T08:16:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:43 crc kubenswrapper[4881]: I1211 08:16:43.746903 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"712c62b1-8ccd-4aa3-bcf9-678e361454ec\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f4bc17ebdc46bdf12b24515e7a055043646bcea98fe47ea7f5c44e679d02bde9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://daf982782f40db65e6a33e1e1bcfd01493d5a385c52013546d38d921ac4dcad3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3ceeda38e245a549205af6a7f2468c682ca1420574f2d3b375dfba7d93a3c669\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://204b876ced5657df64e1a6074ba7ddbb3e286df0e12630d709ff64d2905b8487\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ff7ad52ec7f42bde82d0f5b9a6ef051a56c37004bb4172dbc504afde1e5f69bf\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-11T08:16:03Z\\\",\\\"message\\\":\\\"W1211 08:16:02.420586 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1211 08:16:02.420959 1 crypto.go:601] Generating new CA for check-endpoints-signer@1765440962 cert, and key in /tmp/serving-cert-2611071911/serving-signer.crt, /tmp/serving-cert-2611071911/serving-signer.key\\\\nI1211 08:16:03.011861 1 observer_polling.go:159] Starting file observer\\\\nW1211 08:16:03.024183 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1211 08:16:03.024367 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1211 08:16:03.024984 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2611071911/tls.crt::/tmp/serving-cert-2611071911/tls.key\\\\\\\"\\\\nF1211 08:16:03.405647 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:00Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6735b5cd08055deeae614855e5de8a056b276c8006a16c45f2a0fce97da172cb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:57Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70fb9b6123864b3652eabbb833b18032659a0c038f4e5857e7363a3776022396\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://70fb9b6123864b3652eabbb833b18032659a0c038f4e5857e7363a3776022396\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:15:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:15:53Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:43Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:43 crc kubenswrapper[4881]: I1211 08:16:43.764077 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6ec4e8a28a722463111eeb961e6e80d1f8ef1b776c40fc212a2ca5aaf0c43b63\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6eff6a04afb607c6248352793d1205ae80c194a77222a234d586925a8ca8cd00\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:43Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:43 crc kubenswrapper[4881]: I1211 08:16:43.777594 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:43Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:43 crc kubenswrapper[4881]: I1211 08:16:43.794542 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-g8jhd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"368e635e-0e63-4202-b9e4-4a3a85c6f30c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f17445543c4ae20340b00c834ecfa381b0d9624196a6da869ea823ee99df132f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zrsnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-g8jhd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:43Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:43 crc kubenswrapper[4881]: I1211 08:16:43.816102 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-cwhxk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"afd0cc21-e31c-47c9-a598-cd93dde96121\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0f0031f6a7c33c9d1c538ec8384083ab5bc7b7fcda9c01c8e5c922a7e375a7e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://44d5ca755969b2434115259f141a9f8f17da8e0c1bef5b61c48475e09dc91601\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://44d5ca755969b2434115259f141a9f8f17da8e0c1bef5b61c48475e09dc91601\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://18c881494d5238565dfdb69e61ecdebb725f60a14c0c651e0bcc65cb2aff297a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://18c881494d5238565dfdb69e61ecdebb725f60a14c0c651e0bcc65cb2aff297a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a13ad4683fb404bf7d973528489e3c39e33c2991d86a80875dab067023626584\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a13ad4683fb404bf7d973528489e3c39e33c2991d86a80875dab067023626584\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b84c7832c1d1df15202c858e63e3d97319f2014f413f98a9d332a3f89a11210\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b84c7832c1d1df15202c858e63e3d97319f2014f413f98a9d332a3f89a11210\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb6df865673b8073f0fa13525833ae978ff17856a9b4149d549e177bc5d9a494\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bb6df865673b8073f0fa13525833ae978ff17856a9b4149d549e177bc5d9a494\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8a6eb6e7ccd64f8d181fe104b44a4299f9b28d0409e46175da23144ef88e4129\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8a6eb6e7ccd64f8d181fe104b44a4299f9b28d0409e46175da23144ef88e4129\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-cwhxk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:43Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:43 crc kubenswrapper[4881]: I1211 08:16:43.827288 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"56d69133-af36-4cbd-af7d-3a58cc4dd8ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b73c9948845a28d7c3cb81e94d962a4f11ac061f4108a18f46d451a554e9adb7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7rwh4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b0321657968185975569a4bf8037ab956988b13493feec0be9c439fbe6c20707\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7rwh4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:21Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-z9nnh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:43Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:43 crc kubenswrapper[4881]: I1211 08:16:43.839035 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:43 crc kubenswrapper[4881]: I1211 08:16:43.839083 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:43 crc kubenswrapper[4881]: I1211 08:16:43.839097 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:43 crc kubenswrapper[4881]: I1211 08:16:43.839122 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:43 crc kubenswrapper[4881]: I1211 08:16:43.839137 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:43Z","lastTransitionTime":"2025-12-11T08:16:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:43 crc kubenswrapper[4881]: I1211 08:16:43.840419 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-kqw5r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"71774618-27c8-499d-83d9-e88693c86758\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://136c88f0cab9d7a47a8c5925592e8cc09b62e515258829cc096df9ad3bb30615\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6btfk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c48e4553e3eb00f1e725dd9cacf2b25360544772ef82d6846361a4b7c44c6244\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6btfk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-kqw5r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:43Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:43 crc kubenswrapper[4881]: I1211 08:16:43.852319 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-bzslm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3a9b30f4-30b0-4b16-9a4a-135d2e7a9beb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:35Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:35Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:35Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-k86hj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-k86hj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:35Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-bzslm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:43Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:43 crc kubenswrapper[4881]: I1211 08:16:43.867293 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:43Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:43 crc kubenswrapper[4881]: I1211 08:16:43.878601 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-847k7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1711214f-683a-4a2e-b9b8-f3fd2dd6dbc2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0ad11e04da8a6d33e46ca8c5f28be920d52c7c2ce22b4539fc8b7b0b85edb425\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2txkz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:20Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-847k7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:43Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:43 crc kubenswrapper[4881]: I1211 08:16:43.895856 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-jzsv5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aacce2d2-7fd3-439a-b46b-51858d3de240\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6277956e6d4873b4105ce03e358e2857371feead7eb74e2b7412ad7ed4de486\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tlbfj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:24Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-jzsv5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:43Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:43 crc kubenswrapper[4881]: I1211 08:16:43.912979 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e233ddb-7c1f-4b3d-a111-de8cdc7ccd18\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://221f8c696ca3c5ab5dc2025890b5279fd406a8dfdd4ea482bb12f0c5d304255d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3dec421d26a9373c52bf2fb9434cc3f3b373f409b1d86ec1effca7534acb7262\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7cb4176d384609eb549c9f00057256bd34053ca3eca229c20cd250b93f87f910\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://91541795f1dd997a0d977904a6a3cbeae8b66b8bb57cde8dac344d3703352e1a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:15:53Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:43Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:43 crc kubenswrapper[4881]: I1211 08:16:43.941980 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:43 crc kubenswrapper[4881]: I1211 08:16:43.942049 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:43 crc kubenswrapper[4881]: I1211 08:16:43.942066 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:43 crc kubenswrapper[4881]: I1211 08:16:43.942092 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:43 crc kubenswrapper[4881]: I1211 08:16:43.942111 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:43Z","lastTransitionTime":"2025-12-11T08:16:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:44 crc kubenswrapper[4881]: I1211 08:16:44.004590 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 11 08:16:44 crc kubenswrapper[4881]: E1211 08:16:44.004767 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 11 08:16:44 crc kubenswrapper[4881]: I1211 08:16:44.044900 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:44 crc kubenswrapper[4881]: I1211 08:16:44.044969 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:44 crc kubenswrapper[4881]: I1211 08:16:44.044990 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:44 crc kubenswrapper[4881]: I1211 08:16:44.045021 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:44 crc kubenswrapper[4881]: I1211 08:16:44.045045 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:44Z","lastTransitionTime":"2025-12-11T08:16:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:44 crc kubenswrapper[4881]: I1211 08:16:44.090839 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:44 crc kubenswrapper[4881]: I1211 08:16:44.090887 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:44 crc kubenswrapper[4881]: I1211 08:16:44.090899 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:44 crc kubenswrapper[4881]: I1211 08:16:44.090915 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:44 crc kubenswrapper[4881]: I1211 08:16:44.090928 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:44Z","lastTransitionTime":"2025-12-11T08:16:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:44 crc kubenswrapper[4881]: E1211 08:16:44.103945 4881 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:16:44Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:44Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:16:44Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:44Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:16:44Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:44Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:16:44Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:44Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"46a6300e-52c1-447e-8230-6662e62288c7\\\",\\\"systemUUID\\\":\\\"fece3d29-5045-4c4f-98be-52739a921bd2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:44Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:44 crc kubenswrapper[4881]: I1211 08:16:44.107630 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:44 crc kubenswrapper[4881]: I1211 08:16:44.107780 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:44 crc kubenswrapper[4881]: I1211 08:16:44.107878 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:44 crc kubenswrapper[4881]: I1211 08:16:44.107976 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:44 crc kubenswrapper[4881]: I1211 08:16:44.108067 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:44Z","lastTransitionTime":"2025-12-11T08:16:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:44 crc kubenswrapper[4881]: E1211 08:16:44.120361 4881 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:16:44Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:44Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:16:44Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:44Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:16:44Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:44Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:16:44Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:44Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"46a6300e-52c1-447e-8230-6662e62288c7\\\",\\\"systemUUID\\\":\\\"fece3d29-5045-4c4f-98be-52739a921bd2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:44Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:44 crc kubenswrapper[4881]: I1211 08:16:44.125577 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:44 crc kubenswrapper[4881]: I1211 08:16:44.125656 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:44 crc kubenswrapper[4881]: I1211 08:16:44.125675 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:44 crc kubenswrapper[4881]: I1211 08:16:44.125701 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:44 crc kubenswrapper[4881]: I1211 08:16:44.125721 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:44Z","lastTransitionTime":"2025-12-11T08:16:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:44 crc kubenswrapper[4881]: E1211 08:16:44.144830 4881 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:16:44Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:44Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:16:44Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:44Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:16:44Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:44Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:16:44Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:44Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"46a6300e-52c1-447e-8230-6662e62288c7\\\",\\\"systemUUID\\\":\\\"fece3d29-5045-4c4f-98be-52739a921bd2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:44Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:44 crc kubenswrapper[4881]: I1211 08:16:44.148873 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:44 crc kubenswrapper[4881]: I1211 08:16:44.148916 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:44 crc kubenswrapper[4881]: I1211 08:16:44.148927 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:44 crc kubenswrapper[4881]: I1211 08:16:44.148944 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:44 crc kubenswrapper[4881]: I1211 08:16:44.148955 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:44Z","lastTransitionTime":"2025-12-11T08:16:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:44 crc kubenswrapper[4881]: E1211 08:16:44.163909 4881 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:16:44Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:44Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:16:44Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:44Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:16:44Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:44Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:16:44Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:44Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"46a6300e-52c1-447e-8230-6662e62288c7\\\",\\\"systemUUID\\\":\\\"fece3d29-5045-4c4f-98be-52739a921bd2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:44Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:44 crc kubenswrapper[4881]: I1211 08:16:44.168785 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:44 crc kubenswrapper[4881]: I1211 08:16:44.168824 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:44 crc kubenswrapper[4881]: I1211 08:16:44.168842 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:44 crc kubenswrapper[4881]: I1211 08:16:44.168859 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:44 crc kubenswrapper[4881]: I1211 08:16:44.168871 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:44Z","lastTransitionTime":"2025-12-11T08:16:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:44 crc kubenswrapper[4881]: E1211 08:16:44.181664 4881 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:16:44Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:44Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:16:44Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:44Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:16:44Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:44Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:16:44Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:44Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"46a6300e-52c1-447e-8230-6662e62288c7\\\",\\\"systemUUID\\\":\\\"fece3d29-5045-4c4f-98be-52739a921bd2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:44Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:44 crc kubenswrapper[4881]: E1211 08:16:44.181885 4881 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Dec 11 08:16:44 crc kubenswrapper[4881]: I1211 08:16:44.184044 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:44 crc kubenswrapper[4881]: I1211 08:16:44.184206 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:44 crc kubenswrapper[4881]: I1211 08:16:44.184279 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:44 crc kubenswrapper[4881]: I1211 08:16:44.184369 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:44 crc kubenswrapper[4881]: I1211 08:16:44.184447 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:44Z","lastTransitionTime":"2025-12-11T08:16:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:44 crc kubenswrapper[4881]: I1211 08:16:44.288046 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:44 crc kubenswrapper[4881]: I1211 08:16:44.288113 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:44 crc kubenswrapper[4881]: I1211 08:16:44.288124 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:44 crc kubenswrapper[4881]: I1211 08:16:44.288140 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:44 crc kubenswrapper[4881]: I1211 08:16:44.288151 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:44Z","lastTransitionTime":"2025-12-11T08:16:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:44 crc kubenswrapper[4881]: I1211 08:16:44.392228 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:44 crc kubenswrapper[4881]: I1211 08:16:44.392302 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:44 crc kubenswrapper[4881]: I1211 08:16:44.392321 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:44 crc kubenswrapper[4881]: I1211 08:16:44.392387 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:44 crc kubenswrapper[4881]: I1211 08:16:44.392438 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:44Z","lastTransitionTime":"2025-12-11T08:16:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:44 crc kubenswrapper[4881]: I1211 08:16:44.495368 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:44 crc kubenswrapper[4881]: I1211 08:16:44.495723 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:44 crc kubenswrapper[4881]: I1211 08:16:44.495960 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:44 crc kubenswrapper[4881]: I1211 08:16:44.496124 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:44 crc kubenswrapper[4881]: I1211 08:16:44.496266 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:44Z","lastTransitionTime":"2025-12-11T08:16:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:44 crc kubenswrapper[4881]: I1211 08:16:44.599966 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:44 crc kubenswrapper[4881]: I1211 08:16:44.600021 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:44 crc kubenswrapper[4881]: I1211 08:16:44.600039 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:44 crc kubenswrapper[4881]: I1211 08:16:44.600068 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:44 crc kubenswrapper[4881]: I1211 08:16:44.600087 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:44Z","lastTransitionTime":"2025-12-11T08:16:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:44 crc kubenswrapper[4881]: I1211 08:16:44.703298 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:44 crc kubenswrapper[4881]: I1211 08:16:44.703361 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:44 crc kubenswrapper[4881]: I1211 08:16:44.703373 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:44 crc kubenswrapper[4881]: I1211 08:16:44.703390 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:44 crc kubenswrapper[4881]: I1211 08:16:44.703401 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:44Z","lastTransitionTime":"2025-12-11T08:16:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:44 crc kubenswrapper[4881]: I1211 08:16:44.806810 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:44 crc kubenswrapper[4881]: I1211 08:16:44.806873 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:44 crc kubenswrapper[4881]: I1211 08:16:44.806889 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:44 crc kubenswrapper[4881]: I1211 08:16:44.806916 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:44 crc kubenswrapper[4881]: I1211 08:16:44.806935 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:44Z","lastTransitionTime":"2025-12-11T08:16:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:44 crc kubenswrapper[4881]: I1211 08:16:44.909663 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:44 crc kubenswrapper[4881]: I1211 08:16:44.909728 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:44 crc kubenswrapper[4881]: I1211 08:16:44.909746 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:44 crc kubenswrapper[4881]: I1211 08:16:44.909773 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:44 crc kubenswrapper[4881]: I1211 08:16:44.909792 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:44Z","lastTransitionTime":"2025-12-11T08:16:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:45 crc kubenswrapper[4881]: I1211 08:16:45.004623 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bzslm" Dec 11 08:16:45 crc kubenswrapper[4881]: I1211 08:16:45.004790 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 11 08:16:45 crc kubenswrapper[4881]: E1211 08:16:45.004869 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bzslm" podUID="3a9b30f4-30b0-4b16-9a4a-135d2e7a9beb" Dec 11 08:16:45 crc kubenswrapper[4881]: E1211 08:16:45.004991 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 11 08:16:45 crc kubenswrapper[4881]: I1211 08:16:45.004635 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 11 08:16:45 crc kubenswrapper[4881]: E1211 08:16:45.005140 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 11 08:16:45 crc kubenswrapper[4881]: I1211 08:16:45.011840 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:45 crc kubenswrapper[4881]: I1211 08:16:45.011916 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:45 crc kubenswrapper[4881]: I1211 08:16:45.011941 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:45 crc kubenswrapper[4881]: I1211 08:16:45.011973 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:45 crc kubenswrapper[4881]: I1211 08:16:45.011995 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:45Z","lastTransitionTime":"2025-12-11T08:16:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:45 crc kubenswrapper[4881]: I1211 08:16:45.115055 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:45 crc kubenswrapper[4881]: I1211 08:16:45.115129 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:45 crc kubenswrapper[4881]: I1211 08:16:45.115155 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:45 crc kubenswrapper[4881]: I1211 08:16:45.115187 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:45 crc kubenswrapper[4881]: I1211 08:16:45.115214 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:45Z","lastTransitionTime":"2025-12-11T08:16:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:45 crc kubenswrapper[4881]: I1211 08:16:45.218855 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:45 crc kubenswrapper[4881]: I1211 08:16:45.218922 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:45 crc kubenswrapper[4881]: I1211 08:16:45.218939 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:45 crc kubenswrapper[4881]: I1211 08:16:45.218964 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:45 crc kubenswrapper[4881]: I1211 08:16:45.218981 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:45Z","lastTransitionTime":"2025-12-11T08:16:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:45 crc kubenswrapper[4881]: I1211 08:16:45.322841 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:45 crc kubenswrapper[4881]: I1211 08:16:45.323191 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:45 crc kubenswrapper[4881]: I1211 08:16:45.323636 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:45 crc kubenswrapper[4881]: I1211 08:16:45.323678 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:45 crc kubenswrapper[4881]: I1211 08:16:45.323704 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:45Z","lastTransitionTime":"2025-12-11T08:16:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:45 crc kubenswrapper[4881]: I1211 08:16:45.427711 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:45 crc kubenswrapper[4881]: I1211 08:16:45.428093 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:45 crc kubenswrapper[4881]: I1211 08:16:45.428271 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:45 crc kubenswrapper[4881]: I1211 08:16:45.428470 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:45 crc kubenswrapper[4881]: I1211 08:16:45.428673 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:45Z","lastTransitionTime":"2025-12-11T08:16:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:45 crc kubenswrapper[4881]: I1211 08:16:45.531784 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:45 crc kubenswrapper[4881]: I1211 08:16:45.532115 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:45 crc kubenswrapper[4881]: I1211 08:16:45.532305 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:45 crc kubenswrapper[4881]: I1211 08:16:45.532521 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:45 crc kubenswrapper[4881]: I1211 08:16:45.532736 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:45Z","lastTransitionTime":"2025-12-11T08:16:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:45 crc kubenswrapper[4881]: I1211 08:16:45.636450 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:45 crc kubenswrapper[4881]: I1211 08:16:45.636518 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:45 crc kubenswrapper[4881]: I1211 08:16:45.636540 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:45 crc kubenswrapper[4881]: I1211 08:16:45.636569 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:45 crc kubenswrapper[4881]: I1211 08:16:45.636590 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:45Z","lastTransitionTime":"2025-12-11T08:16:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:45 crc kubenswrapper[4881]: I1211 08:16:45.740219 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:45 crc kubenswrapper[4881]: I1211 08:16:45.740273 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:45 crc kubenswrapper[4881]: I1211 08:16:45.740451 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:45 crc kubenswrapper[4881]: I1211 08:16:45.740529 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:45 crc kubenswrapper[4881]: I1211 08:16:45.740561 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:45Z","lastTransitionTime":"2025-12-11T08:16:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:45 crc kubenswrapper[4881]: I1211 08:16:45.843229 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:45 crc kubenswrapper[4881]: I1211 08:16:45.843424 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:45 crc kubenswrapper[4881]: I1211 08:16:45.843455 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:45 crc kubenswrapper[4881]: I1211 08:16:45.843488 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:45 crc kubenswrapper[4881]: I1211 08:16:45.843529 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:45Z","lastTransitionTime":"2025-12-11T08:16:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:45 crc kubenswrapper[4881]: I1211 08:16:45.947146 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:45 crc kubenswrapper[4881]: I1211 08:16:45.947196 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:45 crc kubenswrapper[4881]: I1211 08:16:45.947214 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:45 crc kubenswrapper[4881]: I1211 08:16:45.947237 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:45 crc kubenswrapper[4881]: I1211 08:16:45.947253 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:45Z","lastTransitionTime":"2025-12-11T08:16:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:46 crc kubenswrapper[4881]: I1211 08:16:46.005132 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 11 08:16:46 crc kubenswrapper[4881]: E1211 08:16:46.005397 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 11 08:16:46 crc kubenswrapper[4881]: I1211 08:16:46.050272 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:46 crc kubenswrapper[4881]: I1211 08:16:46.050319 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:46 crc kubenswrapper[4881]: I1211 08:16:46.050378 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:46 crc kubenswrapper[4881]: I1211 08:16:46.050407 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:46 crc kubenswrapper[4881]: I1211 08:16:46.050423 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:46Z","lastTransitionTime":"2025-12-11T08:16:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:46 crc kubenswrapper[4881]: I1211 08:16:46.154228 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:46 crc kubenswrapper[4881]: I1211 08:16:46.154269 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:46 crc kubenswrapper[4881]: I1211 08:16:46.154279 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:46 crc kubenswrapper[4881]: I1211 08:16:46.154296 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:46 crc kubenswrapper[4881]: I1211 08:16:46.154306 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:46Z","lastTransitionTime":"2025-12-11T08:16:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:46 crc kubenswrapper[4881]: I1211 08:16:46.257998 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:46 crc kubenswrapper[4881]: I1211 08:16:46.258045 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:46 crc kubenswrapper[4881]: I1211 08:16:46.258060 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:46 crc kubenswrapper[4881]: I1211 08:16:46.258082 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:46 crc kubenswrapper[4881]: I1211 08:16:46.258097 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:46Z","lastTransitionTime":"2025-12-11T08:16:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:46 crc kubenswrapper[4881]: I1211 08:16:46.360477 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:46 crc kubenswrapper[4881]: I1211 08:16:46.360521 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:46 crc kubenswrapper[4881]: I1211 08:16:46.360532 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:46 crc kubenswrapper[4881]: I1211 08:16:46.360550 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:46 crc kubenswrapper[4881]: I1211 08:16:46.360562 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:46Z","lastTransitionTime":"2025-12-11T08:16:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:46 crc kubenswrapper[4881]: I1211 08:16:46.462818 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:46 crc kubenswrapper[4881]: I1211 08:16:46.462853 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:46 crc kubenswrapper[4881]: I1211 08:16:46.462862 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:46 crc kubenswrapper[4881]: I1211 08:16:46.462875 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:46 crc kubenswrapper[4881]: I1211 08:16:46.462914 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:46Z","lastTransitionTime":"2025-12-11T08:16:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:46 crc kubenswrapper[4881]: I1211 08:16:46.565239 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:46 crc kubenswrapper[4881]: I1211 08:16:46.565286 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:46 crc kubenswrapper[4881]: I1211 08:16:46.565301 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:46 crc kubenswrapper[4881]: I1211 08:16:46.565319 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:46 crc kubenswrapper[4881]: I1211 08:16:46.565472 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:46Z","lastTransitionTime":"2025-12-11T08:16:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:46 crc kubenswrapper[4881]: I1211 08:16:46.668004 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:46 crc kubenswrapper[4881]: I1211 08:16:46.668055 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:46 crc kubenswrapper[4881]: I1211 08:16:46.668070 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:46 crc kubenswrapper[4881]: I1211 08:16:46.668089 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:46 crc kubenswrapper[4881]: I1211 08:16:46.668101 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:46Z","lastTransitionTime":"2025-12-11T08:16:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:46 crc kubenswrapper[4881]: I1211 08:16:46.770375 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:46 crc kubenswrapper[4881]: I1211 08:16:46.770483 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:46 crc kubenswrapper[4881]: I1211 08:16:46.770508 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:46 crc kubenswrapper[4881]: I1211 08:16:46.770541 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:46 crc kubenswrapper[4881]: I1211 08:16:46.770563 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:46Z","lastTransitionTime":"2025-12-11T08:16:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:46 crc kubenswrapper[4881]: I1211 08:16:46.874151 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:46 crc kubenswrapper[4881]: I1211 08:16:46.874656 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:46 crc kubenswrapper[4881]: I1211 08:16:46.874853 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:46 crc kubenswrapper[4881]: I1211 08:16:46.875015 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:46 crc kubenswrapper[4881]: I1211 08:16:46.875223 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:46Z","lastTransitionTime":"2025-12-11T08:16:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:46 crc kubenswrapper[4881]: I1211 08:16:46.978259 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:46 crc kubenswrapper[4881]: I1211 08:16:46.978481 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:46 crc kubenswrapper[4881]: I1211 08:16:46.978513 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:46 crc kubenswrapper[4881]: I1211 08:16:46.978546 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:46 crc kubenswrapper[4881]: I1211 08:16:46.978569 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:46Z","lastTransitionTime":"2025-12-11T08:16:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:47 crc kubenswrapper[4881]: I1211 08:16:47.004680 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 11 08:16:47 crc kubenswrapper[4881]: E1211 08:16:47.005087 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 11 08:16:47 crc kubenswrapper[4881]: I1211 08:16:47.005512 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bzslm" Dec 11 08:16:47 crc kubenswrapper[4881]: I1211 08:16:47.005605 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 11 08:16:47 crc kubenswrapper[4881]: E1211 08:16:47.005760 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bzslm" podUID="3a9b30f4-30b0-4b16-9a4a-135d2e7a9beb" Dec 11 08:16:47 crc kubenswrapper[4881]: E1211 08:16:47.006025 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 11 08:16:47 crc kubenswrapper[4881]: I1211 08:16:47.082232 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:47 crc kubenswrapper[4881]: I1211 08:16:47.082288 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:47 crc kubenswrapper[4881]: I1211 08:16:47.082300 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:47 crc kubenswrapper[4881]: I1211 08:16:47.082328 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:47 crc kubenswrapper[4881]: I1211 08:16:47.082367 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:47Z","lastTransitionTime":"2025-12-11T08:16:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:47 crc kubenswrapper[4881]: I1211 08:16:47.185615 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:47 crc kubenswrapper[4881]: I1211 08:16:47.185847 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:47 crc kubenswrapper[4881]: I1211 08:16:47.185972 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:47 crc kubenswrapper[4881]: I1211 08:16:47.186076 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:47 crc kubenswrapper[4881]: I1211 08:16:47.186170 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:47Z","lastTransitionTime":"2025-12-11T08:16:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:47 crc kubenswrapper[4881]: I1211 08:16:47.289821 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:47 crc kubenswrapper[4881]: I1211 08:16:47.290170 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:47 crc kubenswrapper[4881]: I1211 08:16:47.290380 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:47 crc kubenswrapper[4881]: I1211 08:16:47.290632 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:47 crc kubenswrapper[4881]: I1211 08:16:47.290863 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:47Z","lastTransitionTime":"2025-12-11T08:16:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:47 crc kubenswrapper[4881]: I1211 08:16:47.394746 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:47 crc kubenswrapper[4881]: I1211 08:16:47.394809 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:47 crc kubenswrapper[4881]: I1211 08:16:47.394828 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:47 crc kubenswrapper[4881]: I1211 08:16:47.394850 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:47 crc kubenswrapper[4881]: I1211 08:16:47.394868 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:47Z","lastTransitionTime":"2025-12-11T08:16:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:47 crc kubenswrapper[4881]: I1211 08:16:47.497456 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:47 crc kubenswrapper[4881]: I1211 08:16:47.497490 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:47 crc kubenswrapper[4881]: I1211 08:16:47.497500 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:47 crc kubenswrapper[4881]: I1211 08:16:47.497515 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:47 crc kubenswrapper[4881]: I1211 08:16:47.497524 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:47Z","lastTransitionTime":"2025-12-11T08:16:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:47 crc kubenswrapper[4881]: I1211 08:16:47.600757 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:47 crc kubenswrapper[4881]: I1211 08:16:47.600803 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:47 crc kubenswrapper[4881]: I1211 08:16:47.600815 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:47 crc kubenswrapper[4881]: I1211 08:16:47.600832 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:47 crc kubenswrapper[4881]: I1211 08:16:47.600843 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:47Z","lastTransitionTime":"2025-12-11T08:16:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:47 crc kubenswrapper[4881]: I1211 08:16:47.703008 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:47 crc kubenswrapper[4881]: I1211 08:16:47.703070 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:47 crc kubenswrapper[4881]: I1211 08:16:47.703087 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:47 crc kubenswrapper[4881]: I1211 08:16:47.703109 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:47 crc kubenswrapper[4881]: I1211 08:16:47.703127 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:47Z","lastTransitionTime":"2025-12-11T08:16:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:47 crc kubenswrapper[4881]: I1211 08:16:47.806872 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:47 crc kubenswrapper[4881]: I1211 08:16:47.806952 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:47 crc kubenswrapper[4881]: I1211 08:16:47.806963 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:47 crc kubenswrapper[4881]: I1211 08:16:47.806983 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:47 crc kubenswrapper[4881]: I1211 08:16:47.806996 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:47Z","lastTransitionTime":"2025-12-11T08:16:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:47 crc kubenswrapper[4881]: I1211 08:16:47.910150 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:47 crc kubenswrapper[4881]: I1211 08:16:47.910218 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:47 crc kubenswrapper[4881]: I1211 08:16:47.910228 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:47 crc kubenswrapper[4881]: I1211 08:16:47.910247 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:47 crc kubenswrapper[4881]: I1211 08:16:47.910259 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:47Z","lastTransitionTime":"2025-12-11T08:16:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:48 crc kubenswrapper[4881]: I1211 08:16:48.004901 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 11 08:16:48 crc kubenswrapper[4881]: E1211 08:16:48.005164 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 11 08:16:48 crc kubenswrapper[4881]: I1211 08:16:48.013490 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:48 crc kubenswrapper[4881]: I1211 08:16:48.013540 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:48 crc kubenswrapper[4881]: I1211 08:16:48.013550 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:48 crc kubenswrapper[4881]: I1211 08:16:48.013570 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:48 crc kubenswrapper[4881]: I1211 08:16:48.013584 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:48Z","lastTransitionTime":"2025-12-11T08:16:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:48 crc kubenswrapper[4881]: I1211 08:16:48.116509 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:48 crc kubenswrapper[4881]: I1211 08:16:48.116561 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:48 crc kubenswrapper[4881]: I1211 08:16:48.116575 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:48 crc kubenswrapper[4881]: I1211 08:16:48.116597 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:48 crc kubenswrapper[4881]: I1211 08:16:48.116610 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:48Z","lastTransitionTime":"2025-12-11T08:16:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:48 crc kubenswrapper[4881]: I1211 08:16:48.220215 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:48 crc kubenswrapper[4881]: I1211 08:16:48.220650 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:48 crc kubenswrapper[4881]: I1211 08:16:48.220834 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:48 crc kubenswrapper[4881]: I1211 08:16:48.220988 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:48 crc kubenswrapper[4881]: I1211 08:16:48.221123 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:48Z","lastTransitionTime":"2025-12-11T08:16:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:48 crc kubenswrapper[4881]: I1211 08:16:48.324033 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:48 crc kubenswrapper[4881]: I1211 08:16:48.324192 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:48 crc kubenswrapper[4881]: I1211 08:16:48.324218 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:48 crc kubenswrapper[4881]: I1211 08:16:48.324248 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:48 crc kubenswrapper[4881]: I1211 08:16:48.324268 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:48Z","lastTransitionTime":"2025-12-11T08:16:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:48 crc kubenswrapper[4881]: I1211 08:16:48.427649 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:48 crc kubenswrapper[4881]: I1211 08:16:48.427715 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:48 crc kubenswrapper[4881]: I1211 08:16:48.427733 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:48 crc kubenswrapper[4881]: I1211 08:16:48.427761 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:48 crc kubenswrapper[4881]: I1211 08:16:48.427780 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:48Z","lastTransitionTime":"2025-12-11T08:16:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:48 crc kubenswrapper[4881]: I1211 08:16:48.530795 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:48 crc kubenswrapper[4881]: I1211 08:16:48.530860 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:48 crc kubenswrapper[4881]: I1211 08:16:48.530876 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:48 crc kubenswrapper[4881]: I1211 08:16:48.530900 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:48 crc kubenswrapper[4881]: I1211 08:16:48.530917 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:48Z","lastTransitionTime":"2025-12-11T08:16:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:48 crc kubenswrapper[4881]: I1211 08:16:48.634641 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:48 crc kubenswrapper[4881]: I1211 08:16:48.634719 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:48 crc kubenswrapper[4881]: I1211 08:16:48.634741 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:48 crc kubenswrapper[4881]: I1211 08:16:48.634767 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:48 crc kubenswrapper[4881]: I1211 08:16:48.634785 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:48Z","lastTransitionTime":"2025-12-11T08:16:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:48 crc kubenswrapper[4881]: I1211 08:16:48.738613 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:48 crc kubenswrapper[4881]: I1211 08:16:48.738688 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:48 crc kubenswrapper[4881]: I1211 08:16:48.738710 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:48 crc kubenswrapper[4881]: I1211 08:16:48.738744 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:48 crc kubenswrapper[4881]: I1211 08:16:48.738767 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:48Z","lastTransitionTime":"2025-12-11T08:16:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:48 crc kubenswrapper[4881]: I1211 08:16:48.786228 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 11 08:16:48 crc kubenswrapper[4881]: E1211 08:16:48.786543 4881 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-11 08:17:20.786502851 +0000 UTC m=+89.163871588 (durationBeforeRetry 32s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 08:16:48 crc kubenswrapper[4881]: I1211 08:16:48.842023 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:48 crc kubenswrapper[4881]: I1211 08:16:48.842069 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:48 crc kubenswrapper[4881]: I1211 08:16:48.842085 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:48 crc kubenswrapper[4881]: I1211 08:16:48.842105 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:48 crc kubenswrapper[4881]: I1211 08:16:48.842117 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:48Z","lastTransitionTime":"2025-12-11T08:16:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:48 crc kubenswrapper[4881]: I1211 08:16:48.887478 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 11 08:16:48 crc kubenswrapper[4881]: I1211 08:16:48.887548 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 11 08:16:48 crc kubenswrapper[4881]: I1211 08:16:48.887571 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 11 08:16:48 crc kubenswrapper[4881]: I1211 08:16:48.887640 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 11 08:16:48 crc kubenswrapper[4881]: E1211 08:16:48.887642 4881 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Dec 11 08:16:48 crc kubenswrapper[4881]: E1211 08:16:48.887776 4881 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-12-11 08:17:20.887737212 +0000 UTC m=+89.265105939 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Dec 11 08:16:48 crc kubenswrapper[4881]: E1211 08:16:48.887780 4881 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Dec 11 08:16:48 crc kubenswrapper[4881]: E1211 08:16:48.887824 4881 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Dec 11 08:16:48 crc kubenswrapper[4881]: E1211 08:16:48.887834 4881 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Dec 11 08:16:48 crc kubenswrapper[4881]: E1211 08:16:48.887949 4881 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-12-11 08:17:20.887920027 +0000 UTC m=+89.265288764 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Dec 11 08:16:48 crc kubenswrapper[4881]: E1211 08:16:48.887844 4881 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Dec 11 08:16:48 crc kubenswrapper[4881]: E1211 08:16:48.888044 4881 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Dec 11 08:16:48 crc kubenswrapper[4881]: E1211 08:16:48.888069 4881 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 11 08:16:48 crc kubenswrapper[4881]: E1211 08:16:48.887845 4881 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 11 08:16:48 crc kubenswrapper[4881]: E1211 08:16:48.888139 4881 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-12-11 08:17:20.888120071 +0000 UTC m=+89.265488798 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 11 08:16:48 crc kubenswrapper[4881]: E1211 08:16:48.888183 4881 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-12-11 08:17:20.888155482 +0000 UTC m=+89.265524369 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 11 08:16:48 crc kubenswrapper[4881]: I1211 08:16:48.945817 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:48 crc kubenswrapper[4881]: I1211 08:16:48.945889 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:48 crc kubenswrapper[4881]: I1211 08:16:48.945905 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:48 crc kubenswrapper[4881]: I1211 08:16:48.945927 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:48 crc kubenswrapper[4881]: I1211 08:16:48.945943 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:48Z","lastTransitionTime":"2025-12-11T08:16:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:49 crc kubenswrapper[4881]: I1211 08:16:49.004591 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bzslm" Dec 11 08:16:49 crc kubenswrapper[4881]: I1211 08:16:49.004650 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 11 08:16:49 crc kubenswrapper[4881]: E1211 08:16:49.004766 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bzslm" podUID="3a9b30f4-30b0-4b16-9a4a-135d2e7a9beb" Dec 11 08:16:49 crc kubenswrapper[4881]: E1211 08:16:49.004940 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 11 08:16:49 crc kubenswrapper[4881]: I1211 08:16:49.005019 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 11 08:16:49 crc kubenswrapper[4881]: E1211 08:16:49.005801 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 11 08:16:49 crc kubenswrapper[4881]: I1211 08:16:49.006384 4881 scope.go:117] "RemoveContainer" containerID="58ff0c7d416fda4200920825cedb24d309b747147b3e209166f99140da0e11d4" Dec 11 08:16:49 crc kubenswrapper[4881]: I1211 08:16:49.048873 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:49 crc kubenswrapper[4881]: I1211 08:16:49.049425 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:49 crc kubenswrapper[4881]: I1211 08:16:49.049436 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:49 crc kubenswrapper[4881]: I1211 08:16:49.049455 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:49 crc kubenswrapper[4881]: I1211 08:16:49.049467 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:49Z","lastTransitionTime":"2025-12-11T08:16:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:49 crc kubenswrapper[4881]: I1211 08:16:49.358589 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:49 crc kubenswrapper[4881]: I1211 08:16:49.358640 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:49 crc kubenswrapper[4881]: I1211 08:16:49.358651 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:49 crc kubenswrapper[4881]: I1211 08:16:49.358672 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:49 crc kubenswrapper[4881]: I1211 08:16:49.358685 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:49Z","lastTransitionTime":"2025-12-11T08:16:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:49 crc kubenswrapper[4881]: I1211 08:16:49.362137 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e233ddb-7c1f-4b3d-a111-de8cdc7ccd18\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://221f8c696ca3c5ab5dc2025890b5279fd406a8dfdd4ea482bb12f0c5d304255d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3dec421d26a9373c52bf2fb9434cc3f3b373f409b1d86ec1effca7534acb7262\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7cb4176d384609eb549c9f00057256bd34053ca3eca229c20cd250b93f87f910\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://91541795f1dd997a0d977904a6a3cbeae8b66b8bb57cde8dac344d3703352e1a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:15:53Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:49Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:49 crc kubenswrapper[4881]: I1211 08:16:49.384071 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6c507af2-56f1-45a6-ab18-c6d27c4e3f85\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ae6cead982609bcea650eccda0c847ae1c568061104bda0584984b9d62481b0e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bdda482f14afc374a3ba38a02b0b93bab4faf300cc47f7ee39ef9183af904f22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a2846dae5b0d424649d68bdbeb34b7791ee2a8e1b05a0309876cd0d79c5fca01\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://89d2f7f7873955f51814a4a24f42fc5fff4c2c08480d7ff81bfc18fd73102d7d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://89d2f7f7873955f51814a4a24f42fc5fff4c2c08480d7ff81bfc18fd73102d7d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:15:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:15:54Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:15:53Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:49Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:49 crc kubenswrapper[4881]: I1211 08:16:49.402448 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-847k7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1711214f-683a-4a2e-b9b8-f3fd2dd6dbc2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0ad11e04da8a6d33e46ca8c5f28be920d52c7c2ce22b4539fc8b7b0b85edb425\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2txkz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:20Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-847k7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:49Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:49 crc kubenswrapper[4881]: I1211 08:16:49.417652 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-jzsv5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aacce2d2-7fd3-439a-b46b-51858d3de240\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6277956e6d4873b4105ce03e358e2857371feead7eb74e2b7412ad7ed4de486\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tlbfj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:24Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-jzsv5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:49Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:49 crc kubenswrapper[4881]: I1211 08:16:49.435744 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"712c62b1-8ccd-4aa3-bcf9-678e361454ec\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f4bc17ebdc46bdf12b24515e7a055043646bcea98fe47ea7f5c44e679d02bde9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://daf982782f40db65e6a33e1e1bcfd01493d5a385c52013546d38d921ac4dcad3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3ceeda38e245a549205af6a7f2468c682ca1420574f2d3b375dfba7d93a3c669\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://204b876ced5657df64e1a6074ba7ddbb3e286df0e12630d709ff64d2905b8487\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ff7ad52ec7f42bde82d0f5b9a6ef051a56c37004bb4172dbc504afde1e5f69bf\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-11T08:16:03Z\\\",\\\"message\\\":\\\"W1211 08:16:02.420586 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1211 08:16:02.420959 1 crypto.go:601] Generating new CA for check-endpoints-signer@1765440962 cert, and key in /tmp/serving-cert-2611071911/serving-signer.crt, /tmp/serving-cert-2611071911/serving-signer.key\\\\nI1211 08:16:03.011861 1 observer_polling.go:159] Starting file observer\\\\nW1211 08:16:03.024183 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1211 08:16:03.024367 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1211 08:16:03.024984 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2611071911/tls.crt::/tmp/serving-cert-2611071911/tls.key\\\\\\\"\\\\nF1211 08:16:03.405647 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:00Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6735b5cd08055deeae614855e5de8a056b276c8006a16c45f2a0fce97da172cb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:57Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70fb9b6123864b3652eabbb833b18032659a0c038f4e5857e7363a3776022396\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://70fb9b6123864b3652eabbb833b18032659a0c038f4e5857e7363a3776022396\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:15:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:15:53Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:49Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:49 crc kubenswrapper[4881]: I1211 08:16:49.452846 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://02d46e407feeba96230ff44b31f513e09434cdc3378124322398587ebda11b4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:49Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:49 crc kubenswrapper[4881]: I1211 08:16:49.462225 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:49 crc kubenswrapper[4881]: I1211 08:16:49.462276 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:49 crc kubenswrapper[4881]: I1211 08:16:49.462289 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:49 crc kubenswrapper[4881]: I1211 08:16:49.462308 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:49 crc kubenswrapper[4881]: I1211 08:16:49.462354 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:49Z","lastTransitionTime":"2025-12-11T08:16:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:49 crc kubenswrapper[4881]: I1211 08:16:49.469803 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:49Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:49 crc kubenswrapper[4881]: I1211 08:16:49.484422 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:20Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:20Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://35b9fdb7a609d6a955abda7e49e7bceec55811d33e14295ba7258c0e085cc517\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:49Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:49 crc kubenswrapper[4881]: I1211 08:16:49.509276 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wf8q8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f14cc110-e74f-4cb7-a998-041e3f9b537b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3fece2162ea78bf051c94f0f8adb0b194d05e5d3c5c25cb4e3674096d1204079\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3715d1eacbf7a843b7a1a0e83dee159ddbe5bf62812e94bf5f74f4677da2617b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c23a70dbe6b32be7c8a0c3799f7dd2323f0dcc86d7d58ee3e140cda4ffbb03f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6d8574de8e2431b2aca7954b9a0498e353c00d982b4384c1f198b2099fe527a6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5a0cb039f4c38da8b633dfcd2e0d23afbc3607b41f04d3c230aee5f86cba79b5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5b8e9dda68edf7ec8848da4eb851807ad0dc10a086fb6129e3a22f9a35e368ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://58ff0c7d416fda4200920825cedb24d309b747147b3e209166f99140da0e11d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://58ff0c7d416fda4200920825cedb24d309b747147b3e209166f99140da0e11d4\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-11T08:16:34Z\\\",\\\"message\\\":\\\".network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:33Z is after 2025-08-24T17:21:41Z]\\\\nI1211 08:16:33.855251 6356 services_controller.go:473] Services do not match for network=default, existing lbs: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-service-ca-operator/metrics_TCP_cluster\\\\\\\", UUID:\\\\\\\"2a3fb1a3-a476-4e14-bcf5-fb79af60206a\\\\\\\", Protocol:\\\\\\\"tcp\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-service-ca-operator/metrics\\\\\\\"}, Opts:services.LBOpts{Reject:false, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{}, Templates:services.TemplateMap{}, Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}, built lbs: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-service-ca-operator/metrics_TCP_cluster\\\\\\\", UUID:\\\\\\\"\\\\\\\", Protocol:\\\\\\\"TCP\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-service-ca-operator/metrics\\\\\\\"}, Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.4.\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:33Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-wf8q8_openshift-ovn-kubernetes(f14cc110-e74f-4cb7-a998-041e3f9b537b)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://88bd335385bc696076b9b2719bfbd27b294003397c545f24ee2c50cd854fcc28\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1794069e7175bc0ce9dc315807ad766fd28fb46e11ed6e7addc3199cccaea63a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1794069e7175bc0ce9dc315807ad766fd28fb46e11ed6e7addc3199cccaea63a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:21Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-wf8q8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:49Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:49 crc kubenswrapper[4881]: I1211 08:16:49.522521 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-bzslm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3a9b30f4-30b0-4b16-9a4a-135d2e7a9beb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:35Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:35Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:35Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-k86hj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-k86hj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:35Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-bzslm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:49Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:49 crc kubenswrapper[4881]: I1211 08:16:49.547942 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:49Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:49 crc kubenswrapper[4881]: I1211 08:16:49.566907 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:49 crc kubenswrapper[4881]: I1211 08:16:49.566951 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:49 crc kubenswrapper[4881]: I1211 08:16:49.566962 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:49 crc kubenswrapper[4881]: I1211 08:16:49.566981 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:49 crc kubenswrapper[4881]: I1211 08:16:49.566992 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:49Z","lastTransitionTime":"2025-12-11T08:16:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:49 crc kubenswrapper[4881]: I1211 08:16:49.568558 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6ec4e8a28a722463111eeb961e6e80d1f8ef1b776c40fc212a2ca5aaf0c43b63\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6eff6a04afb607c6248352793d1205ae80c194a77222a234d586925a8ca8cd00\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:49Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:49 crc kubenswrapper[4881]: I1211 08:16:49.590054 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:49Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:49 crc kubenswrapper[4881]: I1211 08:16:49.604207 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-g8jhd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"368e635e-0e63-4202-b9e4-4a3a85c6f30c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f17445543c4ae20340b00c834ecfa381b0d9624196a6da869ea823ee99df132f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zrsnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-g8jhd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:49Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:49 crc kubenswrapper[4881]: I1211 08:16:49.627749 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-cwhxk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"afd0cc21-e31c-47c9-a598-cd93dde96121\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0f0031f6a7c33c9d1c538ec8384083ab5bc7b7fcda9c01c8e5c922a7e375a7e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://44d5ca755969b2434115259f141a9f8f17da8e0c1bef5b61c48475e09dc91601\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://44d5ca755969b2434115259f141a9f8f17da8e0c1bef5b61c48475e09dc91601\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://18c881494d5238565dfdb69e61ecdebb725f60a14c0c651e0bcc65cb2aff297a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://18c881494d5238565dfdb69e61ecdebb725f60a14c0c651e0bcc65cb2aff297a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a13ad4683fb404bf7d973528489e3c39e33c2991d86a80875dab067023626584\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a13ad4683fb404bf7d973528489e3c39e33c2991d86a80875dab067023626584\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b84c7832c1d1df15202c858e63e3d97319f2014f413f98a9d332a3f89a11210\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b84c7832c1d1df15202c858e63e3d97319f2014f413f98a9d332a3f89a11210\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb6df865673b8073f0fa13525833ae978ff17856a9b4149d549e177bc5d9a494\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bb6df865673b8073f0fa13525833ae978ff17856a9b4149d549e177bc5d9a494\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8a6eb6e7ccd64f8d181fe104b44a4299f9b28d0409e46175da23144ef88e4129\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8a6eb6e7ccd64f8d181fe104b44a4299f9b28d0409e46175da23144ef88e4129\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-cwhxk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:49Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:49 crc kubenswrapper[4881]: I1211 08:16:49.645173 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"56d69133-af36-4cbd-af7d-3a58cc4dd8ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b73c9948845a28d7c3cb81e94d962a4f11ac061f4108a18f46d451a554e9adb7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7rwh4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b0321657968185975569a4bf8037ab956988b13493feec0be9c439fbe6c20707\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7rwh4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:21Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-z9nnh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:49Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:49 crc kubenswrapper[4881]: I1211 08:16:49.659259 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-kqw5r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"71774618-27c8-499d-83d9-e88693c86758\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://136c88f0cab9d7a47a8c5925592e8cc09b62e515258829cc096df9ad3bb30615\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6btfk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c48e4553e3eb00f1e725dd9cacf2b25360544772ef82d6846361a4b7c44c6244\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6btfk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-kqw5r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:49Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:49 crc kubenswrapper[4881]: I1211 08:16:49.669842 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:49 crc kubenswrapper[4881]: I1211 08:16:49.669895 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:49 crc kubenswrapper[4881]: I1211 08:16:49.669922 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:49 crc kubenswrapper[4881]: I1211 08:16:49.669941 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:49 crc kubenswrapper[4881]: I1211 08:16:49.669951 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:49Z","lastTransitionTime":"2025-12-11T08:16:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:49 crc kubenswrapper[4881]: I1211 08:16:49.776224 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:49 crc kubenswrapper[4881]: I1211 08:16:49.776280 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:49 crc kubenswrapper[4881]: I1211 08:16:49.776294 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:49 crc kubenswrapper[4881]: I1211 08:16:49.776313 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:49 crc kubenswrapper[4881]: I1211 08:16:49.776325 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:49Z","lastTransitionTime":"2025-12-11T08:16:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:49 crc kubenswrapper[4881]: I1211 08:16:49.879867 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:49 crc kubenswrapper[4881]: I1211 08:16:49.879919 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:49 crc kubenswrapper[4881]: I1211 08:16:49.879938 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:49 crc kubenswrapper[4881]: I1211 08:16:49.879960 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:49 crc kubenswrapper[4881]: I1211 08:16:49.879974 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:49Z","lastTransitionTime":"2025-12-11T08:16:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:49 crc kubenswrapper[4881]: I1211 08:16:49.982536 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:49 crc kubenswrapper[4881]: I1211 08:16:49.982604 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:49 crc kubenswrapper[4881]: I1211 08:16:49.982627 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:49 crc kubenswrapper[4881]: I1211 08:16:49.982658 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:49 crc kubenswrapper[4881]: I1211 08:16:49.982679 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:49Z","lastTransitionTime":"2025-12-11T08:16:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:50 crc kubenswrapper[4881]: I1211 08:16:50.004931 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 11 08:16:50 crc kubenswrapper[4881]: E1211 08:16:50.005090 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 11 08:16:50 crc kubenswrapper[4881]: I1211 08:16:50.084859 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:50 crc kubenswrapper[4881]: I1211 08:16:50.084917 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:50 crc kubenswrapper[4881]: I1211 08:16:50.084929 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:50 crc kubenswrapper[4881]: I1211 08:16:50.084950 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:50 crc kubenswrapper[4881]: I1211 08:16:50.084967 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:50Z","lastTransitionTime":"2025-12-11T08:16:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:50 crc kubenswrapper[4881]: I1211 08:16:50.187393 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:50 crc kubenswrapper[4881]: I1211 08:16:50.187433 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:50 crc kubenswrapper[4881]: I1211 08:16:50.187444 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:50 crc kubenswrapper[4881]: I1211 08:16:50.187464 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:50 crc kubenswrapper[4881]: I1211 08:16:50.187476 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:50Z","lastTransitionTime":"2025-12-11T08:16:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:50 crc kubenswrapper[4881]: I1211 08:16:50.291255 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:50 crc kubenswrapper[4881]: I1211 08:16:50.291305 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:50 crc kubenswrapper[4881]: I1211 08:16:50.291314 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:50 crc kubenswrapper[4881]: I1211 08:16:50.291351 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:50 crc kubenswrapper[4881]: I1211 08:16:50.291367 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:50Z","lastTransitionTime":"2025-12-11T08:16:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:50 crc kubenswrapper[4881]: I1211 08:16:50.371151 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-wf8q8_f14cc110-e74f-4cb7-a998-041e3f9b537b/ovnkube-controller/1.log" Dec 11 08:16:50 crc kubenswrapper[4881]: I1211 08:16:50.373761 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wf8q8" event={"ID":"f14cc110-e74f-4cb7-a998-041e3f9b537b","Type":"ContainerStarted","Data":"6293b9bfc7cfe65f9d9c2cb46c40ba8aba252b7f4dfec7b3552d3a9271bfa25e"} Dec 11 08:16:50 crc kubenswrapper[4881]: I1211 08:16:50.374199 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-wf8q8" Dec 11 08:16:50 crc kubenswrapper[4881]: I1211 08:16:50.388270 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"56d69133-af36-4cbd-af7d-3a58cc4dd8ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b73c9948845a28d7c3cb81e94d962a4f11ac061f4108a18f46d451a554e9adb7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7rwh4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b0321657968185975569a4bf8037ab956988b13493feec0be9c439fbe6c20707\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7rwh4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:21Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-z9nnh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:50Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:50 crc kubenswrapper[4881]: I1211 08:16:50.393734 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:50 crc kubenswrapper[4881]: I1211 08:16:50.393792 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:50 crc kubenswrapper[4881]: I1211 08:16:50.393808 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:50 crc kubenswrapper[4881]: I1211 08:16:50.393828 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:50 crc kubenswrapper[4881]: I1211 08:16:50.393841 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:50Z","lastTransitionTime":"2025-12-11T08:16:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:50 crc kubenswrapper[4881]: I1211 08:16:50.401927 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-kqw5r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"71774618-27c8-499d-83d9-e88693c86758\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://136c88f0cab9d7a47a8c5925592e8cc09b62e515258829cc096df9ad3bb30615\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6btfk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c48e4553e3eb00f1e725dd9cacf2b25360544772ef82d6846361a4b7c44c6244\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6btfk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-kqw5r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:50Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:50 crc kubenswrapper[4881]: I1211 08:16:50.412482 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-bzslm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3a9b30f4-30b0-4b16-9a4a-135d2e7a9beb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:35Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:35Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:35Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-k86hj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-k86hj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:35Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-bzslm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:50Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:50 crc kubenswrapper[4881]: I1211 08:16:50.426472 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:50Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:50 crc kubenswrapper[4881]: I1211 08:16:50.437815 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6ec4e8a28a722463111eeb961e6e80d1f8ef1b776c40fc212a2ca5aaf0c43b63\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6eff6a04afb607c6248352793d1205ae80c194a77222a234d586925a8ca8cd00\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:50Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:50 crc kubenswrapper[4881]: I1211 08:16:50.449412 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:50Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:50 crc kubenswrapper[4881]: I1211 08:16:50.460553 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-g8jhd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"368e635e-0e63-4202-b9e4-4a3a85c6f30c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f17445543c4ae20340b00c834ecfa381b0d9624196a6da869ea823ee99df132f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zrsnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-g8jhd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:50Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:50 crc kubenswrapper[4881]: I1211 08:16:50.477861 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-cwhxk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"afd0cc21-e31c-47c9-a598-cd93dde96121\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0f0031f6a7c33c9d1c538ec8384083ab5bc7b7fcda9c01c8e5c922a7e375a7e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://44d5ca755969b2434115259f141a9f8f17da8e0c1bef5b61c48475e09dc91601\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://44d5ca755969b2434115259f141a9f8f17da8e0c1bef5b61c48475e09dc91601\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://18c881494d5238565dfdb69e61ecdebb725f60a14c0c651e0bcc65cb2aff297a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://18c881494d5238565dfdb69e61ecdebb725f60a14c0c651e0bcc65cb2aff297a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a13ad4683fb404bf7d973528489e3c39e33c2991d86a80875dab067023626584\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a13ad4683fb404bf7d973528489e3c39e33c2991d86a80875dab067023626584\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b84c7832c1d1df15202c858e63e3d97319f2014f413f98a9d332a3f89a11210\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b84c7832c1d1df15202c858e63e3d97319f2014f413f98a9d332a3f89a11210\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb6df865673b8073f0fa13525833ae978ff17856a9b4149d549e177bc5d9a494\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bb6df865673b8073f0fa13525833ae978ff17856a9b4149d549e177bc5d9a494\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8a6eb6e7ccd64f8d181fe104b44a4299f9b28d0409e46175da23144ef88e4129\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8a6eb6e7ccd64f8d181fe104b44a4299f9b28d0409e46175da23144ef88e4129\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-cwhxk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:50Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:50 crc kubenswrapper[4881]: I1211 08:16:50.489558 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e233ddb-7c1f-4b3d-a111-de8cdc7ccd18\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://221f8c696ca3c5ab5dc2025890b5279fd406a8dfdd4ea482bb12f0c5d304255d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3dec421d26a9373c52bf2fb9434cc3f3b373f409b1d86ec1effca7534acb7262\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7cb4176d384609eb549c9f00057256bd34053ca3eca229c20cd250b93f87f910\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://91541795f1dd997a0d977904a6a3cbeae8b66b8bb57cde8dac344d3703352e1a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:15:53Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:50Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:50 crc kubenswrapper[4881]: I1211 08:16:50.496768 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:50 crc kubenswrapper[4881]: I1211 08:16:50.496838 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:50 crc kubenswrapper[4881]: I1211 08:16:50.496855 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:50 crc kubenswrapper[4881]: I1211 08:16:50.496881 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:50 crc kubenswrapper[4881]: I1211 08:16:50.496895 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:50Z","lastTransitionTime":"2025-12-11T08:16:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:50 crc kubenswrapper[4881]: I1211 08:16:50.504693 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6c507af2-56f1-45a6-ab18-c6d27c4e3f85\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ae6cead982609bcea650eccda0c847ae1c568061104bda0584984b9d62481b0e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bdda482f14afc374a3ba38a02b0b93bab4faf300cc47f7ee39ef9183af904f22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a2846dae5b0d424649d68bdbeb34b7791ee2a8e1b05a0309876cd0d79c5fca01\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://89d2f7f7873955f51814a4a24f42fc5fff4c2c08480d7ff81bfc18fd73102d7d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://89d2f7f7873955f51814a4a24f42fc5fff4c2c08480d7ff81bfc18fd73102d7d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:15:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:15:54Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:15:53Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:50Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:50 crc kubenswrapper[4881]: I1211 08:16:50.517082 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-847k7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1711214f-683a-4a2e-b9b8-f3fd2dd6dbc2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0ad11e04da8a6d33e46ca8c5f28be920d52c7c2ce22b4539fc8b7b0b85edb425\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2txkz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:20Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-847k7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:50Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:50 crc kubenswrapper[4881]: I1211 08:16:50.530268 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-jzsv5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aacce2d2-7fd3-439a-b46b-51858d3de240\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6277956e6d4873b4105ce03e358e2857371feead7eb74e2b7412ad7ed4de486\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tlbfj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:24Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-jzsv5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:50Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:50 crc kubenswrapper[4881]: I1211 08:16:50.547177 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"712c62b1-8ccd-4aa3-bcf9-678e361454ec\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f4bc17ebdc46bdf12b24515e7a055043646bcea98fe47ea7f5c44e679d02bde9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://daf982782f40db65e6a33e1e1bcfd01493d5a385c52013546d38d921ac4dcad3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3ceeda38e245a549205af6a7f2468c682ca1420574f2d3b375dfba7d93a3c669\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://204b876ced5657df64e1a6074ba7ddbb3e286df0e12630d709ff64d2905b8487\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ff7ad52ec7f42bde82d0f5b9a6ef051a56c37004bb4172dbc504afde1e5f69bf\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-11T08:16:03Z\\\",\\\"message\\\":\\\"W1211 08:16:02.420586 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1211 08:16:02.420959 1 crypto.go:601] Generating new CA for check-endpoints-signer@1765440962 cert, and key in /tmp/serving-cert-2611071911/serving-signer.crt, /tmp/serving-cert-2611071911/serving-signer.key\\\\nI1211 08:16:03.011861 1 observer_polling.go:159] Starting file observer\\\\nW1211 08:16:03.024183 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1211 08:16:03.024367 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1211 08:16:03.024984 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2611071911/tls.crt::/tmp/serving-cert-2611071911/tls.key\\\\\\\"\\\\nF1211 08:16:03.405647 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:00Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6735b5cd08055deeae614855e5de8a056b276c8006a16c45f2a0fce97da172cb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:57Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70fb9b6123864b3652eabbb833b18032659a0c038f4e5857e7363a3776022396\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://70fb9b6123864b3652eabbb833b18032659a0c038f4e5857e7363a3776022396\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:15:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:15:53Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:50Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:50 crc kubenswrapper[4881]: I1211 08:16:50.561974 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://02d46e407feeba96230ff44b31f513e09434cdc3378124322398587ebda11b4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:50Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:50 crc kubenswrapper[4881]: I1211 08:16:50.576005 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:50Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:50 crc kubenswrapper[4881]: I1211 08:16:50.589705 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:20Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:20Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://35b9fdb7a609d6a955abda7e49e7bceec55811d33e14295ba7258c0e085cc517\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:50Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:50 crc kubenswrapper[4881]: I1211 08:16:50.600141 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:50 crc kubenswrapper[4881]: I1211 08:16:50.600201 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:50 crc kubenswrapper[4881]: I1211 08:16:50.600222 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:50 crc kubenswrapper[4881]: I1211 08:16:50.600245 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:50 crc kubenswrapper[4881]: I1211 08:16:50.600261 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:50Z","lastTransitionTime":"2025-12-11T08:16:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:50 crc kubenswrapper[4881]: I1211 08:16:50.608279 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wf8q8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f14cc110-e74f-4cb7-a998-041e3f9b537b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3fece2162ea78bf051c94f0f8adb0b194d05e5d3c5c25cb4e3674096d1204079\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3715d1eacbf7a843b7a1a0e83dee159ddbe5bf62812e94bf5f74f4677da2617b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c23a70dbe6b32be7c8a0c3799f7dd2323f0dcc86d7d58ee3e140cda4ffbb03f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6d8574de8e2431b2aca7954b9a0498e353c00d982b4384c1f198b2099fe527a6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5a0cb039f4c38da8b633dfcd2e0d23afbc3607b41f04d3c230aee5f86cba79b5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5b8e9dda68edf7ec8848da4eb851807ad0dc10a086fb6129e3a22f9a35e368ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6293b9bfc7cfe65f9d9c2cb46c40ba8aba252b7f4dfec7b3552d3a9271bfa25e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://58ff0c7d416fda4200920825cedb24d309b747147b3e209166f99140da0e11d4\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-11T08:16:34Z\\\",\\\"message\\\":\\\".network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:33Z is after 2025-08-24T17:21:41Z]\\\\nI1211 08:16:33.855251 6356 services_controller.go:473] Services do not match for network=default, existing lbs: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-service-ca-operator/metrics_TCP_cluster\\\\\\\", UUID:\\\\\\\"2a3fb1a3-a476-4e14-bcf5-fb79af60206a\\\\\\\", Protocol:\\\\\\\"tcp\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-service-ca-operator/metrics\\\\\\\"}, Opts:services.LBOpts{Reject:false, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{}, Templates:services.TemplateMap{}, Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}, built lbs: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-service-ca-operator/metrics_TCP_cluster\\\\\\\", UUID:\\\\\\\"\\\\\\\", Protocol:\\\\\\\"TCP\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-service-ca-operator/metrics\\\\\\\"}, Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.4.\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:33Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://88bd335385bc696076b9b2719bfbd27b294003397c545f24ee2c50cd854fcc28\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1794069e7175bc0ce9dc315807ad766fd28fb46e11ed6e7addc3199cccaea63a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1794069e7175bc0ce9dc315807ad766fd28fb46e11ed6e7addc3199cccaea63a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:21Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-wf8q8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:50Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:50 crc kubenswrapper[4881]: I1211 08:16:50.703166 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:50 crc kubenswrapper[4881]: I1211 08:16:50.703232 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:50 crc kubenswrapper[4881]: I1211 08:16:50.703251 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:50 crc kubenswrapper[4881]: I1211 08:16:50.703278 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:50 crc kubenswrapper[4881]: I1211 08:16:50.703301 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:50Z","lastTransitionTime":"2025-12-11T08:16:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:50 crc kubenswrapper[4881]: I1211 08:16:50.806765 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:50 crc kubenswrapper[4881]: I1211 08:16:50.806826 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:50 crc kubenswrapper[4881]: I1211 08:16:50.806844 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:50 crc kubenswrapper[4881]: I1211 08:16:50.806868 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:50 crc kubenswrapper[4881]: I1211 08:16:50.806884 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:50Z","lastTransitionTime":"2025-12-11T08:16:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:50 crc kubenswrapper[4881]: I1211 08:16:50.909905 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:50 crc kubenswrapper[4881]: I1211 08:16:50.910262 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:50 crc kubenswrapper[4881]: I1211 08:16:50.910452 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:50 crc kubenswrapper[4881]: I1211 08:16:50.910587 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:50 crc kubenswrapper[4881]: I1211 08:16:50.910707 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:50Z","lastTransitionTime":"2025-12-11T08:16:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:51 crc kubenswrapper[4881]: I1211 08:16:51.005066 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 11 08:16:51 crc kubenswrapper[4881]: E1211 08:16:51.005271 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 11 08:16:51 crc kubenswrapper[4881]: I1211 08:16:51.005534 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 11 08:16:51 crc kubenswrapper[4881]: E1211 08:16:51.005679 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 11 08:16:51 crc kubenswrapper[4881]: I1211 08:16:51.005863 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bzslm" Dec 11 08:16:51 crc kubenswrapper[4881]: E1211 08:16:51.006133 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bzslm" podUID="3a9b30f4-30b0-4b16-9a4a-135d2e7a9beb" Dec 11 08:16:51 crc kubenswrapper[4881]: I1211 08:16:51.014689 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:51 crc kubenswrapper[4881]: I1211 08:16:51.014779 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:51 crc kubenswrapper[4881]: I1211 08:16:51.014799 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:51 crc kubenswrapper[4881]: I1211 08:16:51.014867 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:51 crc kubenswrapper[4881]: I1211 08:16:51.014898 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:51Z","lastTransitionTime":"2025-12-11T08:16:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:51 crc kubenswrapper[4881]: I1211 08:16:51.076292 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/3a9b30f4-30b0-4b16-9a4a-135d2e7a9beb-metrics-certs\") pod \"network-metrics-daemon-bzslm\" (UID: \"3a9b30f4-30b0-4b16-9a4a-135d2e7a9beb\") " pod="openshift-multus/network-metrics-daemon-bzslm" Dec 11 08:16:51 crc kubenswrapper[4881]: E1211 08:16:51.076660 4881 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Dec 11 08:16:51 crc kubenswrapper[4881]: E1211 08:16:51.076836 4881 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/3a9b30f4-30b0-4b16-9a4a-135d2e7a9beb-metrics-certs podName:3a9b30f4-30b0-4b16-9a4a-135d2e7a9beb nodeName:}" failed. No retries permitted until 2025-12-11 08:17:07.076771866 +0000 UTC m=+75.454140593 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/3a9b30f4-30b0-4b16-9a4a-135d2e7a9beb-metrics-certs") pod "network-metrics-daemon-bzslm" (UID: "3a9b30f4-30b0-4b16-9a4a-135d2e7a9beb") : object "openshift-multus"/"metrics-daemon-secret" not registered Dec 11 08:16:51 crc kubenswrapper[4881]: I1211 08:16:51.119030 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:51 crc kubenswrapper[4881]: I1211 08:16:51.119105 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:51 crc kubenswrapper[4881]: I1211 08:16:51.119131 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:51 crc kubenswrapper[4881]: I1211 08:16:51.119164 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:51 crc kubenswrapper[4881]: I1211 08:16:51.119187 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:51Z","lastTransitionTime":"2025-12-11T08:16:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:51 crc kubenswrapper[4881]: I1211 08:16:51.222129 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:51 crc kubenswrapper[4881]: I1211 08:16:51.222170 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:51 crc kubenswrapper[4881]: I1211 08:16:51.222182 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:51 crc kubenswrapper[4881]: I1211 08:16:51.222199 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:51 crc kubenswrapper[4881]: I1211 08:16:51.222213 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:51Z","lastTransitionTime":"2025-12-11T08:16:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:51 crc kubenswrapper[4881]: I1211 08:16:51.325954 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:51 crc kubenswrapper[4881]: I1211 08:16:51.326013 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:51 crc kubenswrapper[4881]: I1211 08:16:51.326030 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:51 crc kubenswrapper[4881]: I1211 08:16:51.326057 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:51 crc kubenswrapper[4881]: I1211 08:16:51.326074 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:51Z","lastTransitionTime":"2025-12-11T08:16:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:51 crc kubenswrapper[4881]: I1211 08:16:51.382128 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-wf8q8_f14cc110-e74f-4cb7-a998-041e3f9b537b/ovnkube-controller/2.log" Dec 11 08:16:51 crc kubenswrapper[4881]: I1211 08:16:51.383327 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-wf8q8_f14cc110-e74f-4cb7-a998-041e3f9b537b/ovnkube-controller/1.log" Dec 11 08:16:51 crc kubenswrapper[4881]: I1211 08:16:51.387273 4881 generic.go:334] "Generic (PLEG): container finished" podID="f14cc110-e74f-4cb7-a998-041e3f9b537b" containerID="6293b9bfc7cfe65f9d9c2cb46c40ba8aba252b7f4dfec7b3552d3a9271bfa25e" exitCode=1 Dec 11 08:16:51 crc kubenswrapper[4881]: I1211 08:16:51.387419 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wf8q8" event={"ID":"f14cc110-e74f-4cb7-a998-041e3f9b537b","Type":"ContainerDied","Data":"6293b9bfc7cfe65f9d9c2cb46c40ba8aba252b7f4dfec7b3552d3a9271bfa25e"} Dec 11 08:16:51 crc kubenswrapper[4881]: I1211 08:16:51.387506 4881 scope.go:117] "RemoveContainer" containerID="58ff0c7d416fda4200920825cedb24d309b747147b3e209166f99140da0e11d4" Dec 11 08:16:51 crc kubenswrapper[4881]: I1211 08:16:51.389649 4881 scope.go:117] "RemoveContainer" containerID="6293b9bfc7cfe65f9d9c2cb46c40ba8aba252b7f4dfec7b3552d3a9271bfa25e" Dec 11 08:16:51 crc kubenswrapper[4881]: E1211 08:16:51.390298 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-wf8q8_openshift-ovn-kubernetes(f14cc110-e74f-4cb7-a998-041e3f9b537b)\"" pod="openshift-ovn-kubernetes/ovnkube-node-wf8q8" podUID="f14cc110-e74f-4cb7-a998-041e3f9b537b" Dec 11 08:16:51 crc kubenswrapper[4881]: I1211 08:16:51.410794 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6ec4e8a28a722463111eeb961e6e80d1f8ef1b776c40fc212a2ca5aaf0c43b63\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6eff6a04afb607c6248352793d1205ae80c194a77222a234d586925a8ca8cd00\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:51Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:51 crc kubenswrapper[4881]: I1211 08:16:51.428690 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:51 crc kubenswrapper[4881]: I1211 08:16:51.428952 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:51 crc kubenswrapper[4881]: I1211 08:16:51.429058 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:51 crc kubenswrapper[4881]: I1211 08:16:51.429151 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:51 crc kubenswrapper[4881]: I1211 08:16:51.429230 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:51Z","lastTransitionTime":"2025-12-11T08:16:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:51 crc kubenswrapper[4881]: I1211 08:16:51.432389 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:51Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:51 crc kubenswrapper[4881]: I1211 08:16:51.448696 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-g8jhd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"368e635e-0e63-4202-b9e4-4a3a85c6f30c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f17445543c4ae20340b00c834ecfa381b0d9624196a6da869ea823ee99df132f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zrsnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-g8jhd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:51Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:51 crc kubenswrapper[4881]: I1211 08:16:51.469945 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-cwhxk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"afd0cc21-e31c-47c9-a598-cd93dde96121\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0f0031f6a7c33c9d1c538ec8384083ab5bc7b7fcda9c01c8e5c922a7e375a7e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://44d5ca755969b2434115259f141a9f8f17da8e0c1bef5b61c48475e09dc91601\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://44d5ca755969b2434115259f141a9f8f17da8e0c1bef5b61c48475e09dc91601\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://18c881494d5238565dfdb69e61ecdebb725f60a14c0c651e0bcc65cb2aff297a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://18c881494d5238565dfdb69e61ecdebb725f60a14c0c651e0bcc65cb2aff297a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a13ad4683fb404bf7d973528489e3c39e33c2991d86a80875dab067023626584\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a13ad4683fb404bf7d973528489e3c39e33c2991d86a80875dab067023626584\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b84c7832c1d1df15202c858e63e3d97319f2014f413f98a9d332a3f89a11210\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b84c7832c1d1df15202c858e63e3d97319f2014f413f98a9d332a3f89a11210\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb6df865673b8073f0fa13525833ae978ff17856a9b4149d549e177bc5d9a494\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bb6df865673b8073f0fa13525833ae978ff17856a9b4149d549e177bc5d9a494\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8a6eb6e7ccd64f8d181fe104b44a4299f9b28d0409e46175da23144ef88e4129\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8a6eb6e7ccd64f8d181fe104b44a4299f9b28d0409e46175da23144ef88e4129\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-cwhxk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:51Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:51 crc kubenswrapper[4881]: I1211 08:16:51.484918 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"56d69133-af36-4cbd-af7d-3a58cc4dd8ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b73c9948845a28d7c3cb81e94d962a4f11ac061f4108a18f46d451a554e9adb7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7rwh4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b0321657968185975569a4bf8037ab956988b13493feec0be9c439fbe6c20707\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7rwh4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:21Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-z9nnh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:51Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:51 crc kubenswrapper[4881]: I1211 08:16:51.498232 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-kqw5r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"71774618-27c8-499d-83d9-e88693c86758\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://136c88f0cab9d7a47a8c5925592e8cc09b62e515258829cc096df9ad3bb30615\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6btfk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c48e4553e3eb00f1e725dd9cacf2b25360544772ef82d6846361a4b7c44c6244\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6btfk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-kqw5r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:51Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:51 crc kubenswrapper[4881]: I1211 08:16:51.511299 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-bzslm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3a9b30f4-30b0-4b16-9a4a-135d2e7a9beb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:35Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:35Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:35Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-k86hj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-k86hj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:35Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-bzslm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:51Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:51 crc kubenswrapper[4881]: I1211 08:16:51.530046 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:51Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:51 crc kubenswrapper[4881]: I1211 08:16:51.532862 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:51 crc kubenswrapper[4881]: I1211 08:16:51.532892 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:51 crc kubenswrapper[4881]: I1211 08:16:51.532902 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:51 crc kubenswrapper[4881]: I1211 08:16:51.532922 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:51 crc kubenswrapper[4881]: I1211 08:16:51.532935 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:51Z","lastTransitionTime":"2025-12-11T08:16:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:51 crc kubenswrapper[4881]: I1211 08:16:51.547782 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6c507af2-56f1-45a6-ab18-c6d27c4e3f85\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ae6cead982609bcea650eccda0c847ae1c568061104bda0584984b9d62481b0e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bdda482f14afc374a3ba38a02b0b93bab4faf300cc47f7ee39ef9183af904f22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a2846dae5b0d424649d68bdbeb34b7791ee2a8e1b05a0309876cd0d79c5fca01\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://89d2f7f7873955f51814a4a24f42fc5fff4c2c08480d7ff81bfc18fd73102d7d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://89d2f7f7873955f51814a4a24f42fc5fff4c2c08480d7ff81bfc18fd73102d7d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:15:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:15:54Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:15:53Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:51Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:51 crc kubenswrapper[4881]: I1211 08:16:51.562959 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-847k7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1711214f-683a-4a2e-b9b8-f3fd2dd6dbc2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0ad11e04da8a6d33e46ca8c5f28be920d52c7c2ce22b4539fc8b7b0b85edb425\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2txkz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:20Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-847k7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:51Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:51 crc kubenswrapper[4881]: I1211 08:16:51.578989 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-jzsv5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aacce2d2-7fd3-439a-b46b-51858d3de240\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6277956e6d4873b4105ce03e358e2857371feead7eb74e2b7412ad7ed4de486\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tlbfj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:24Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-jzsv5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:51Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:51 crc kubenswrapper[4881]: I1211 08:16:51.597152 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e233ddb-7c1f-4b3d-a111-de8cdc7ccd18\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://221f8c696ca3c5ab5dc2025890b5279fd406a8dfdd4ea482bb12f0c5d304255d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3dec421d26a9373c52bf2fb9434cc3f3b373f409b1d86ec1effca7534acb7262\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7cb4176d384609eb549c9f00057256bd34053ca3eca229c20cd250b93f87f910\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://91541795f1dd997a0d977904a6a3cbeae8b66b8bb57cde8dac344d3703352e1a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:15:53Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:51Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:51 crc kubenswrapper[4881]: I1211 08:16:51.613930 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://02d46e407feeba96230ff44b31f513e09434cdc3378124322398587ebda11b4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:51Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:51 crc kubenswrapper[4881]: I1211 08:16:51.630346 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:51Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:51 crc kubenswrapper[4881]: I1211 08:16:51.635749 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:51 crc kubenswrapper[4881]: I1211 08:16:51.635835 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:51 crc kubenswrapper[4881]: I1211 08:16:51.635862 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:51 crc kubenswrapper[4881]: I1211 08:16:51.635895 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:51 crc kubenswrapper[4881]: I1211 08:16:51.635921 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:51Z","lastTransitionTime":"2025-12-11T08:16:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:51 crc kubenswrapper[4881]: I1211 08:16:51.649691 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:20Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:20Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://35b9fdb7a609d6a955abda7e49e7bceec55811d33e14295ba7258c0e085cc517\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:51Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:51 crc kubenswrapper[4881]: I1211 08:16:51.667790 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wf8q8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f14cc110-e74f-4cb7-a998-041e3f9b537b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3fece2162ea78bf051c94f0f8adb0b194d05e5d3c5c25cb4e3674096d1204079\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3715d1eacbf7a843b7a1a0e83dee159ddbe5bf62812e94bf5f74f4677da2617b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c23a70dbe6b32be7c8a0c3799f7dd2323f0dcc86d7d58ee3e140cda4ffbb03f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6d8574de8e2431b2aca7954b9a0498e353c00d982b4384c1f198b2099fe527a6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5a0cb039f4c38da8b633dfcd2e0d23afbc3607b41f04d3c230aee5f86cba79b5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5b8e9dda68edf7ec8848da4eb851807ad0dc10a086fb6129e3a22f9a35e368ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6293b9bfc7cfe65f9d9c2cb46c40ba8aba252b7f4dfec7b3552d3a9271bfa25e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://58ff0c7d416fda4200920825cedb24d309b747147b3e209166f99140da0e11d4\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-11T08:16:34Z\\\",\\\"message\\\":\\\".network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:33Z is after 2025-08-24T17:21:41Z]\\\\nI1211 08:16:33.855251 6356 services_controller.go:473] Services do not match for network=default, existing lbs: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-service-ca-operator/metrics_TCP_cluster\\\\\\\", UUID:\\\\\\\"2a3fb1a3-a476-4e14-bcf5-fb79af60206a\\\\\\\", Protocol:\\\\\\\"tcp\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-service-ca-operator/metrics\\\\\\\"}, Opts:services.LBOpts{Reject:false, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{}, Templates:services.TemplateMap{}, Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}, built lbs: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-service-ca-operator/metrics_TCP_cluster\\\\\\\", UUID:\\\\\\\"\\\\\\\", Protocol:\\\\\\\"TCP\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-service-ca-operator/metrics\\\\\\\"}, Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.4.\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:33Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6293b9bfc7cfe65f9d9c2cb46c40ba8aba252b7f4dfec7b3552d3a9271bfa25e\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-11T08:16:50Z\\\",\\\"message\\\":\\\"d57b-49ff-9cd3-202ed3574b26}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:} {Op:update Table:NAT Row:map[external_ip:192.168.126.11 logical_ip:10.217.0.59 options:{GoMap:map[stateless:false]} type:snat] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {dce28c51-c9f1-478b-97c8-7e209d6e7cbe}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:} {Op:mutate Table:Logical_Router Row:map[] Rows:[] Columns:[] Mutations:[{Column:nat Mutator:insert Value:{GoSet:[{GoUUID:dce28c51-c9f1-478b-97c8-7e209d6e7cbe}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {e3c4661a-36a6-47f0-a6c0-a4ee741f2224}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1211 08:16:50.354677 6553 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nI1211 08:16:50.354696 6553 model_client.go:398] Mutate operations generated as: [{Op:mutate Table:Port_Group Row:map[] Rows:[] Columns:[] Mutations:[{Column:ports Mutator:insert Value:{GoSet:[{GoUUID:61897e97-c771-4738-8709-09636387cb00}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {c02bd945-d57b-49ff-9cd3-202ed3574b26}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nF1211 08:16:50.354771 6553 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://88bd335385bc696076b9b2719bfbd27b294003397c545f24ee2c50cd854fcc28\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1794069e7175bc0ce9dc315807ad766fd28fb46e11ed6e7addc3199cccaea63a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1794069e7175bc0ce9dc315807ad766fd28fb46e11ed6e7addc3199cccaea63a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:21Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-wf8q8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:51Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:51 crc kubenswrapper[4881]: I1211 08:16:51.686380 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"712c62b1-8ccd-4aa3-bcf9-678e361454ec\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f4bc17ebdc46bdf12b24515e7a055043646bcea98fe47ea7f5c44e679d02bde9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://daf982782f40db65e6a33e1e1bcfd01493d5a385c52013546d38d921ac4dcad3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3ceeda38e245a549205af6a7f2468c682ca1420574f2d3b375dfba7d93a3c669\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://204b876ced5657df64e1a6074ba7ddbb3e286df0e12630d709ff64d2905b8487\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ff7ad52ec7f42bde82d0f5b9a6ef051a56c37004bb4172dbc504afde1e5f69bf\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-11T08:16:03Z\\\",\\\"message\\\":\\\"W1211 08:16:02.420586 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1211 08:16:02.420959 1 crypto.go:601] Generating new CA for check-endpoints-signer@1765440962 cert, and key in /tmp/serving-cert-2611071911/serving-signer.crt, /tmp/serving-cert-2611071911/serving-signer.key\\\\nI1211 08:16:03.011861 1 observer_polling.go:159] Starting file observer\\\\nW1211 08:16:03.024183 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1211 08:16:03.024367 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1211 08:16:03.024984 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2611071911/tls.crt::/tmp/serving-cert-2611071911/tls.key\\\\\\\"\\\\nF1211 08:16:03.405647 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:00Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6735b5cd08055deeae614855e5de8a056b276c8006a16c45f2a0fce97da172cb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:57Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70fb9b6123864b3652eabbb833b18032659a0c038f4e5857e7363a3776022396\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://70fb9b6123864b3652eabbb833b18032659a0c038f4e5857e7363a3776022396\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:15:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:15:53Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:51Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:51 crc kubenswrapper[4881]: I1211 08:16:51.739541 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:51 crc kubenswrapper[4881]: I1211 08:16:51.739617 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:51 crc kubenswrapper[4881]: I1211 08:16:51.739636 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:51 crc kubenswrapper[4881]: I1211 08:16:51.739668 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:51 crc kubenswrapper[4881]: I1211 08:16:51.739692 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:51Z","lastTransitionTime":"2025-12-11T08:16:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:51 crc kubenswrapper[4881]: I1211 08:16:51.843647 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:51 crc kubenswrapper[4881]: I1211 08:16:51.843709 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:51 crc kubenswrapper[4881]: I1211 08:16:51.843723 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:51 crc kubenswrapper[4881]: I1211 08:16:51.843744 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:51 crc kubenswrapper[4881]: I1211 08:16:51.843763 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:51Z","lastTransitionTime":"2025-12-11T08:16:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:51 crc kubenswrapper[4881]: I1211 08:16:51.946104 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:51 crc kubenswrapper[4881]: I1211 08:16:51.946171 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:51 crc kubenswrapper[4881]: I1211 08:16:51.946189 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:51 crc kubenswrapper[4881]: I1211 08:16:51.946216 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:51 crc kubenswrapper[4881]: I1211 08:16:51.946236 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:51Z","lastTransitionTime":"2025-12-11T08:16:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:52 crc kubenswrapper[4881]: I1211 08:16:52.005002 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 11 08:16:52 crc kubenswrapper[4881]: E1211 08:16:52.005177 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 11 08:16:52 crc kubenswrapper[4881]: I1211 08:16:52.049184 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:52 crc kubenswrapper[4881]: I1211 08:16:52.049243 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:52 crc kubenswrapper[4881]: I1211 08:16:52.049252 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:52 crc kubenswrapper[4881]: I1211 08:16:52.049268 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:52 crc kubenswrapper[4881]: I1211 08:16:52.049277 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:52Z","lastTransitionTime":"2025-12-11T08:16:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:52 crc kubenswrapper[4881]: I1211 08:16:52.152303 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:52 crc kubenswrapper[4881]: I1211 08:16:52.152387 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:52 crc kubenswrapper[4881]: I1211 08:16:52.152405 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:52 crc kubenswrapper[4881]: I1211 08:16:52.152430 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:52 crc kubenswrapper[4881]: I1211 08:16:52.152451 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:52Z","lastTransitionTime":"2025-12-11T08:16:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:52 crc kubenswrapper[4881]: I1211 08:16:52.255969 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:52 crc kubenswrapper[4881]: I1211 08:16:52.256055 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:52 crc kubenswrapper[4881]: I1211 08:16:52.256077 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:52 crc kubenswrapper[4881]: I1211 08:16:52.256106 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:52 crc kubenswrapper[4881]: I1211 08:16:52.256131 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:52Z","lastTransitionTime":"2025-12-11T08:16:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:52 crc kubenswrapper[4881]: I1211 08:16:52.359155 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:52 crc kubenswrapper[4881]: I1211 08:16:52.359228 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:52 crc kubenswrapper[4881]: I1211 08:16:52.359247 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:52 crc kubenswrapper[4881]: I1211 08:16:52.359274 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:52 crc kubenswrapper[4881]: I1211 08:16:52.359295 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:52Z","lastTransitionTime":"2025-12-11T08:16:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:52 crc kubenswrapper[4881]: I1211 08:16:52.400767 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-wf8q8_f14cc110-e74f-4cb7-a998-041e3f9b537b/ovnkube-controller/2.log" Dec 11 08:16:52 crc kubenswrapper[4881]: I1211 08:16:52.406586 4881 scope.go:117] "RemoveContainer" containerID="6293b9bfc7cfe65f9d9c2cb46c40ba8aba252b7f4dfec7b3552d3a9271bfa25e" Dec 11 08:16:52 crc kubenswrapper[4881]: E1211 08:16:52.406901 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-wf8q8_openshift-ovn-kubernetes(f14cc110-e74f-4cb7-a998-041e3f9b537b)\"" pod="openshift-ovn-kubernetes/ovnkube-node-wf8q8" podUID="f14cc110-e74f-4cb7-a998-041e3f9b537b" Dec 11 08:16:52 crc kubenswrapper[4881]: I1211 08:16:52.421570 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6ec4e8a28a722463111eeb961e6e80d1f8ef1b776c40fc212a2ca5aaf0c43b63\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6eff6a04afb607c6248352793d1205ae80c194a77222a234d586925a8ca8cd00\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:52Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:52 crc kubenswrapper[4881]: I1211 08:16:52.439389 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:52Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:52 crc kubenswrapper[4881]: I1211 08:16:52.460901 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-g8jhd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"368e635e-0e63-4202-b9e4-4a3a85c6f30c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f17445543c4ae20340b00c834ecfa381b0d9624196a6da869ea823ee99df132f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zrsnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-g8jhd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:52Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:52 crc kubenswrapper[4881]: I1211 08:16:52.463187 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:52 crc kubenswrapper[4881]: I1211 08:16:52.463256 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:52 crc kubenswrapper[4881]: I1211 08:16:52.463272 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:52 crc kubenswrapper[4881]: I1211 08:16:52.463297 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:52 crc kubenswrapper[4881]: I1211 08:16:52.463314 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:52Z","lastTransitionTime":"2025-12-11T08:16:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:52 crc kubenswrapper[4881]: I1211 08:16:52.481320 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-cwhxk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"afd0cc21-e31c-47c9-a598-cd93dde96121\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0f0031f6a7c33c9d1c538ec8384083ab5bc7b7fcda9c01c8e5c922a7e375a7e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://44d5ca755969b2434115259f141a9f8f17da8e0c1bef5b61c48475e09dc91601\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://44d5ca755969b2434115259f141a9f8f17da8e0c1bef5b61c48475e09dc91601\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://18c881494d5238565dfdb69e61ecdebb725f60a14c0c651e0bcc65cb2aff297a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://18c881494d5238565dfdb69e61ecdebb725f60a14c0c651e0bcc65cb2aff297a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a13ad4683fb404bf7d973528489e3c39e33c2991d86a80875dab067023626584\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a13ad4683fb404bf7d973528489e3c39e33c2991d86a80875dab067023626584\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b84c7832c1d1df15202c858e63e3d97319f2014f413f98a9d332a3f89a11210\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b84c7832c1d1df15202c858e63e3d97319f2014f413f98a9d332a3f89a11210\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb6df865673b8073f0fa13525833ae978ff17856a9b4149d549e177bc5d9a494\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bb6df865673b8073f0fa13525833ae978ff17856a9b4149d549e177bc5d9a494\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8a6eb6e7ccd64f8d181fe104b44a4299f9b28d0409e46175da23144ef88e4129\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8a6eb6e7ccd64f8d181fe104b44a4299f9b28d0409e46175da23144ef88e4129\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-cwhxk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:52Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:52 crc kubenswrapper[4881]: I1211 08:16:52.497142 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"56d69133-af36-4cbd-af7d-3a58cc4dd8ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b73c9948845a28d7c3cb81e94d962a4f11ac061f4108a18f46d451a554e9adb7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7rwh4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b0321657968185975569a4bf8037ab956988b13493feec0be9c439fbe6c20707\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7rwh4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:21Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-z9nnh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:52Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:52 crc kubenswrapper[4881]: I1211 08:16:52.510635 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-kqw5r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"71774618-27c8-499d-83d9-e88693c86758\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://136c88f0cab9d7a47a8c5925592e8cc09b62e515258829cc096df9ad3bb30615\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6btfk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c48e4553e3eb00f1e725dd9cacf2b25360544772ef82d6846361a4b7c44c6244\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6btfk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-kqw5r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:52Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:52 crc kubenswrapper[4881]: I1211 08:16:52.522271 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-bzslm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3a9b30f4-30b0-4b16-9a4a-135d2e7a9beb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:35Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:35Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:35Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-k86hj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-k86hj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:35Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-bzslm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:52Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:52 crc kubenswrapper[4881]: I1211 08:16:52.540565 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:52Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:52 crc kubenswrapper[4881]: I1211 08:16:52.555985 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6c507af2-56f1-45a6-ab18-c6d27c4e3f85\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ae6cead982609bcea650eccda0c847ae1c568061104bda0584984b9d62481b0e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bdda482f14afc374a3ba38a02b0b93bab4faf300cc47f7ee39ef9183af904f22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a2846dae5b0d424649d68bdbeb34b7791ee2a8e1b05a0309876cd0d79c5fca01\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://89d2f7f7873955f51814a4a24f42fc5fff4c2c08480d7ff81bfc18fd73102d7d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://89d2f7f7873955f51814a4a24f42fc5fff4c2c08480d7ff81bfc18fd73102d7d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:15:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:15:54Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:15:53Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:52Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:52 crc kubenswrapper[4881]: I1211 08:16:52.566062 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:52 crc kubenswrapper[4881]: I1211 08:16:52.566110 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:52 crc kubenswrapper[4881]: I1211 08:16:52.566125 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:52 crc kubenswrapper[4881]: I1211 08:16:52.566147 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:52 crc kubenswrapper[4881]: I1211 08:16:52.566161 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:52Z","lastTransitionTime":"2025-12-11T08:16:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:52 crc kubenswrapper[4881]: I1211 08:16:52.570945 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-847k7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1711214f-683a-4a2e-b9b8-f3fd2dd6dbc2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0ad11e04da8a6d33e46ca8c5f28be920d52c7c2ce22b4539fc8b7b0b85edb425\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2txkz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:20Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-847k7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:52Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:52 crc kubenswrapper[4881]: I1211 08:16:52.585722 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-jzsv5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aacce2d2-7fd3-439a-b46b-51858d3de240\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6277956e6d4873b4105ce03e358e2857371feead7eb74e2b7412ad7ed4de486\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tlbfj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:24Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-jzsv5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:52Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:52 crc kubenswrapper[4881]: I1211 08:16:52.603823 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e233ddb-7c1f-4b3d-a111-de8cdc7ccd18\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://221f8c696ca3c5ab5dc2025890b5279fd406a8dfdd4ea482bb12f0c5d304255d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3dec421d26a9373c52bf2fb9434cc3f3b373f409b1d86ec1effca7534acb7262\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7cb4176d384609eb549c9f00057256bd34053ca3eca229c20cd250b93f87f910\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://91541795f1dd997a0d977904a6a3cbeae8b66b8bb57cde8dac344d3703352e1a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:15:53Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:52Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:52 crc kubenswrapper[4881]: I1211 08:16:52.621294 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://02d46e407feeba96230ff44b31f513e09434cdc3378124322398587ebda11b4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:52Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:52 crc kubenswrapper[4881]: I1211 08:16:52.637213 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:52Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:52 crc kubenswrapper[4881]: I1211 08:16:52.653650 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:20Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:20Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://35b9fdb7a609d6a955abda7e49e7bceec55811d33e14295ba7258c0e085cc517\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:52Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:52 crc kubenswrapper[4881]: I1211 08:16:52.668765 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:52 crc kubenswrapper[4881]: I1211 08:16:52.668822 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:52 crc kubenswrapper[4881]: I1211 08:16:52.668837 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:52 crc kubenswrapper[4881]: I1211 08:16:52.668854 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:52 crc kubenswrapper[4881]: I1211 08:16:52.668864 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:52Z","lastTransitionTime":"2025-12-11T08:16:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:52 crc kubenswrapper[4881]: I1211 08:16:52.675572 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wf8q8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f14cc110-e74f-4cb7-a998-041e3f9b537b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3fece2162ea78bf051c94f0f8adb0b194d05e5d3c5c25cb4e3674096d1204079\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3715d1eacbf7a843b7a1a0e83dee159ddbe5bf62812e94bf5f74f4677da2617b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c23a70dbe6b32be7c8a0c3799f7dd2323f0dcc86d7d58ee3e140cda4ffbb03f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6d8574de8e2431b2aca7954b9a0498e353c00d982b4384c1f198b2099fe527a6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5a0cb039f4c38da8b633dfcd2e0d23afbc3607b41f04d3c230aee5f86cba79b5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5b8e9dda68edf7ec8848da4eb851807ad0dc10a086fb6129e3a22f9a35e368ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6293b9bfc7cfe65f9d9c2cb46c40ba8aba252b7f4dfec7b3552d3a9271bfa25e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6293b9bfc7cfe65f9d9c2cb46c40ba8aba252b7f4dfec7b3552d3a9271bfa25e\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-11T08:16:50Z\\\",\\\"message\\\":\\\"d57b-49ff-9cd3-202ed3574b26}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:} {Op:update Table:NAT Row:map[external_ip:192.168.126.11 logical_ip:10.217.0.59 options:{GoMap:map[stateless:false]} type:snat] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {dce28c51-c9f1-478b-97c8-7e209d6e7cbe}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:} {Op:mutate Table:Logical_Router Row:map[] Rows:[] Columns:[] Mutations:[{Column:nat Mutator:insert Value:{GoSet:[{GoUUID:dce28c51-c9f1-478b-97c8-7e209d6e7cbe}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {e3c4661a-36a6-47f0-a6c0-a4ee741f2224}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1211 08:16:50.354677 6553 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nI1211 08:16:50.354696 6553 model_client.go:398] Mutate operations generated as: [{Op:mutate Table:Port_Group Row:map[] Rows:[] Columns:[] Mutations:[{Column:ports Mutator:insert Value:{GoSet:[{GoUUID:61897e97-c771-4738-8709-09636387cb00}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {c02bd945-d57b-49ff-9cd3-202ed3574b26}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nF1211 08:16:50.354771 6553 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:49Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-wf8q8_openshift-ovn-kubernetes(f14cc110-e74f-4cb7-a998-041e3f9b537b)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://88bd335385bc696076b9b2719bfbd27b294003397c545f24ee2c50cd854fcc28\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1794069e7175bc0ce9dc315807ad766fd28fb46e11ed6e7addc3199cccaea63a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1794069e7175bc0ce9dc315807ad766fd28fb46e11ed6e7addc3199cccaea63a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:21Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-wf8q8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:52Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:52 crc kubenswrapper[4881]: I1211 08:16:52.690659 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"712c62b1-8ccd-4aa3-bcf9-678e361454ec\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f4bc17ebdc46bdf12b24515e7a055043646bcea98fe47ea7f5c44e679d02bde9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://daf982782f40db65e6a33e1e1bcfd01493d5a385c52013546d38d921ac4dcad3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3ceeda38e245a549205af6a7f2468c682ca1420574f2d3b375dfba7d93a3c669\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://204b876ced5657df64e1a6074ba7ddbb3e286df0e12630d709ff64d2905b8487\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ff7ad52ec7f42bde82d0f5b9a6ef051a56c37004bb4172dbc504afde1e5f69bf\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-11T08:16:03Z\\\",\\\"message\\\":\\\"W1211 08:16:02.420586 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1211 08:16:02.420959 1 crypto.go:601] Generating new CA for check-endpoints-signer@1765440962 cert, and key in /tmp/serving-cert-2611071911/serving-signer.crt, /tmp/serving-cert-2611071911/serving-signer.key\\\\nI1211 08:16:03.011861 1 observer_polling.go:159] Starting file observer\\\\nW1211 08:16:03.024183 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1211 08:16:03.024367 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1211 08:16:03.024984 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2611071911/tls.crt::/tmp/serving-cert-2611071911/tls.key\\\\\\\"\\\\nF1211 08:16:03.405647 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:00Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6735b5cd08055deeae614855e5de8a056b276c8006a16c45f2a0fce97da172cb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:57Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70fb9b6123864b3652eabbb833b18032659a0c038f4e5857e7363a3776022396\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://70fb9b6123864b3652eabbb833b18032659a0c038f4e5857e7363a3776022396\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:15:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:15:53Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:52Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:52 crc kubenswrapper[4881]: I1211 08:16:52.771804 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:52 crc kubenswrapper[4881]: I1211 08:16:52.771871 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:52 crc kubenswrapper[4881]: I1211 08:16:52.771888 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:52 crc kubenswrapper[4881]: I1211 08:16:52.771912 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:52 crc kubenswrapper[4881]: I1211 08:16:52.771933 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:52Z","lastTransitionTime":"2025-12-11T08:16:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:52 crc kubenswrapper[4881]: I1211 08:16:52.874966 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:52 crc kubenswrapper[4881]: I1211 08:16:52.875067 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:52 crc kubenswrapper[4881]: I1211 08:16:52.875091 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:52 crc kubenswrapper[4881]: I1211 08:16:52.875122 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:52 crc kubenswrapper[4881]: I1211 08:16:52.875143 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:52Z","lastTransitionTime":"2025-12-11T08:16:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:52 crc kubenswrapper[4881]: I1211 08:16:52.978893 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:52 crc kubenswrapper[4881]: I1211 08:16:52.978954 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:52 crc kubenswrapper[4881]: I1211 08:16:52.978964 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:52 crc kubenswrapper[4881]: I1211 08:16:52.978980 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:52 crc kubenswrapper[4881]: I1211 08:16:52.978990 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:52Z","lastTransitionTime":"2025-12-11T08:16:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:53 crc kubenswrapper[4881]: I1211 08:16:53.005262 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 11 08:16:53 crc kubenswrapper[4881]: I1211 08:16:53.005287 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bzslm" Dec 11 08:16:53 crc kubenswrapper[4881]: I1211 08:16:53.005385 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 11 08:16:53 crc kubenswrapper[4881]: E1211 08:16:53.005475 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 11 08:16:53 crc kubenswrapper[4881]: E1211 08:16:53.005617 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 11 08:16:53 crc kubenswrapper[4881]: E1211 08:16:53.005869 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bzslm" podUID="3a9b30f4-30b0-4b16-9a4a-135d2e7a9beb" Dec 11 08:16:53 crc kubenswrapper[4881]: I1211 08:16:53.021391 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:20Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:20Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://35b9fdb7a609d6a955abda7e49e7bceec55811d33e14295ba7258c0e085cc517\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:53Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:53 crc kubenswrapper[4881]: I1211 08:16:53.044109 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wf8q8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f14cc110-e74f-4cb7-a998-041e3f9b537b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3fece2162ea78bf051c94f0f8adb0b194d05e5d3c5c25cb4e3674096d1204079\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3715d1eacbf7a843b7a1a0e83dee159ddbe5bf62812e94bf5f74f4677da2617b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c23a70dbe6b32be7c8a0c3799f7dd2323f0dcc86d7d58ee3e140cda4ffbb03f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6d8574de8e2431b2aca7954b9a0498e353c00d982b4384c1f198b2099fe527a6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5a0cb039f4c38da8b633dfcd2e0d23afbc3607b41f04d3c230aee5f86cba79b5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5b8e9dda68edf7ec8848da4eb851807ad0dc10a086fb6129e3a22f9a35e368ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6293b9bfc7cfe65f9d9c2cb46c40ba8aba252b7f4dfec7b3552d3a9271bfa25e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6293b9bfc7cfe65f9d9c2cb46c40ba8aba252b7f4dfec7b3552d3a9271bfa25e\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-11T08:16:50Z\\\",\\\"message\\\":\\\"d57b-49ff-9cd3-202ed3574b26}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:} {Op:update Table:NAT Row:map[external_ip:192.168.126.11 logical_ip:10.217.0.59 options:{GoMap:map[stateless:false]} type:snat] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {dce28c51-c9f1-478b-97c8-7e209d6e7cbe}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:} {Op:mutate Table:Logical_Router Row:map[] Rows:[] Columns:[] Mutations:[{Column:nat Mutator:insert Value:{GoSet:[{GoUUID:dce28c51-c9f1-478b-97c8-7e209d6e7cbe}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {e3c4661a-36a6-47f0-a6c0-a4ee741f2224}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1211 08:16:50.354677 6553 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nI1211 08:16:50.354696 6553 model_client.go:398] Mutate operations generated as: [{Op:mutate Table:Port_Group Row:map[] Rows:[] Columns:[] Mutations:[{Column:ports Mutator:insert Value:{GoSet:[{GoUUID:61897e97-c771-4738-8709-09636387cb00}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {c02bd945-d57b-49ff-9cd3-202ed3574b26}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nF1211 08:16:50.354771 6553 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:49Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-wf8q8_openshift-ovn-kubernetes(f14cc110-e74f-4cb7-a998-041e3f9b537b)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://88bd335385bc696076b9b2719bfbd27b294003397c545f24ee2c50cd854fcc28\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1794069e7175bc0ce9dc315807ad766fd28fb46e11ed6e7addc3199cccaea63a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1794069e7175bc0ce9dc315807ad766fd28fb46e11ed6e7addc3199cccaea63a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:21Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-wf8q8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:53Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:53 crc kubenswrapper[4881]: I1211 08:16:53.057875 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"712c62b1-8ccd-4aa3-bcf9-678e361454ec\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f4bc17ebdc46bdf12b24515e7a055043646bcea98fe47ea7f5c44e679d02bde9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://daf982782f40db65e6a33e1e1bcfd01493d5a385c52013546d38d921ac4dcad3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3ceeda38e245a549205af6a7f2468c682ca1420574f2d3b375dfba7d93a3c669\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://204b876ced5657df64e1a6074ba7ddbb3e286df0e12630d709ff64d2905b8487\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ff7ad52ec7f42bde82d0f5b9a6ef051a56c37004bb4172dbc504afde1e5f69bf\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-11T08:16:03Z\\\",\\\"message\\\":\\\"W1211 08:16:02.420586 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1211 08:16:02.420959 1 crypto.go:601] Generating new CA for check-endpoints-signer@1765440962 cert, and key in /tmp/serving-cert-2611071911/serving-signer.crt, /tmp/serving-cert-2611071911/serving-signer.key\\\\nI1211 08:16:03.011861 1 observer_polling.go:159] Starting file observer\\\\nW1211 08:16:03.024183 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1211 08:16:03.024367 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1211 08:16:03.024984 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2611071911/tls.crt::/tmp/serving-cert-2611071911/tls.key\\\\\\\"\\\\nF1211 08:16:03.405647 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:00Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6735b5cd08055deeae614855e5de8a056b276c8006a16c45f2a0fce97da172cb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:57Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70fb9b6123864b3652eabbb833b18032659a0c038f4e5857e7363a3776022396\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://70fb9b6123864b3652eabbb833b18032659a0c038f4e5857e7363a3776022396\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:15:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:15:53Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:53Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:53 crc kubenswrapper[4881]: I1211 08:16:53.072909 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://02d46e407feeba96230ff44b31f513e09434cdc3378124322398587ebda11b4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:53Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:53 crc kubenswrapper[4881]: I1211 08:16:53.082291 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:53 crc kubenswrapper[4881]: I1211 08:16:53.082369 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:53 crc kubenswrapper[4881]: I1211 08:16:53.082384 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:53 crc kubenswrapper[4881]: I1211 08:16:53.082407 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:53 crc kubenswrapper[4881]: I1211 08:16:53.082422 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:53Z","lastTransitionTime":"2025-12-11T08:16:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:53 crc kubenswrapper[4881]: I1211 08:16:53.087790 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:53Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:53 crc kubenswrapper[4881]: I1211 08:16:53.104256 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-g8jhd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"368e635e-0e63-4202-b9e4-4a3a85c6f30c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f17445543c4ae20340b00c834ecfa381b0d9624196a6da869ea823ee99df132f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zrsnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-g8jhd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:53Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:53 crc kubenswrapper[4881]: I1211 08:16:53.122412 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-cwhxk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"afd0cc21-e31c-47c9-a598-cd93dde96121\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0f0031f6a7c33c9d1c538ec8384083ab5bc7b7fcda9c01c8e5c922a7e375a7e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://44d5ca755969b2434115259f141a9f8f17da8e0c1bef5b61c48475e09dc91601\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://44d5ca755969b2434115259f141a9f8f17da8e0c1bef5b61c48475e09dc91601\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://18c881494d5238565dfdb69e61ecdebb725f60a14c0c651e0bcc65cb2aff297a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://18c881494d5238565dfdb69e61ecdebb725f60a14c0c651e0bcc65cb2aff297a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a13ad4683fb404bf7d973528489e3c39e33c2991d86a80875dab067023626584\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a13ad4683fb404bf7d973528489e3c39e33c2991d86a80875dab067023626584\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b84c7832c1d1df15202c858e63e3d97319f2014f413f98a9d332a3f89a11210\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b84c7832c1d1df15202c858e63e3d97319f2014f413f98a9d332a3f89a11210\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb6df865673b8073f0fa13525833ae978ff17856a9b4149d549e177bc5d9a494\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bb6df865673b8073f0fa13525833ae978ff17856a9b4149d549e177bc5d9a494\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8a6eb6e7ccd64f8d181fe104b44a4299f9b28d0409e46175da23144ef88e4129\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8a6eb6e7ccd64f8d181fe104b44a4299f9b28d0409e46175da23144ef88e4129\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-cwhxk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:53Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:53 crc kubenswrapper[4881]: I1211 08:16:53.135635 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"56d69133-af36-4cbd-af7d-3a58cc4dd8ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b73c9948845a28d7c3cb81e94d962a4f11ac061f4108a18f46d451a554e9adb7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7rwh4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b0321657968185975569a4bf8037ab956988b13493feec0be9c439fbe6c20707\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7rwh4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:21Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-z9nnh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:53Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:53 crc kubenswrapper[4881]: I1211 08:16:53.147509 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-kqw5r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"71774618-27c8-499d-83d9-e88693c86758\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://136c88f0cab9d7a47a8c5925592e8cc09b62e515258829cc096df9ad3bb30615\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6btfk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c48e4553e3eb00f1e725dd9cacf2b25360544772ef82d6846361a4b7c44c6244\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6btfk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-kqw5r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:53Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:53 crc kubenswrapper[4881]: I1211 08:16:53.158971 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-bzslm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3a9b30f4-30b0-4b16-9a4a-135d2e7a9beb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:35Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:35Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:35Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-k86hj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-k86hj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:35Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-bzslm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:53Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:53 crc kubenswrapper[4881]: I1211 08:16:53.171838 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:53Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:53 crc kubenswrapper[4881]: I1211 08:16:53.184980 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:53 crc kubenswrapper[4881]: I1211 08:16:53.185030 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:53 crc kubenswrapper[4881]: I1211 08:16:53.185045 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:53 crc kubenswrapper[4881]: I1211 08:16:53.185068 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:53 crc kubenswrapper[4881]: I1211 08:16:53.185083 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:53Z","lastTransitionTime":"2025-12-11T08:16:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:53 crc kubenswrapper[4881]: I1211 08:16:53.188536 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6ec4e8a28a722463111eeb961e6e80d1f8ef1b776c40fc212a2ca5aaf0c43b63\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6eff6a04afb607c6248352793d1205ae80c194a77222a234d586925a8ca8cd00\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:53Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:53 crc kubenswrapper[4881]: I1211 08:16:53.205285 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:53Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:53 crc kubenswrapper[4881]: I1211 08:16:53.219777 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-jzsv5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aacce2d2-7fd3-439a-b46b-51858d3de240\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6277956e6d4873b4105ce03e358e2857371feead7eb74e2b7412ad7ed4de486\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tlbfj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:24Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-jzsv5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:53Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:53 crc kubenswrapper[4881]: I1211 08:16:53.233049 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e233ddb-7c1f-4b3d-a111-de8cdc7ccd18\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://221f8c696ca3c5ab5dc2025890b5279fd406a8dfdd4ea482bb12f0c5d304255d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3dec421d26a9373c52bf2fb9434cc3f3b373f409b1d86ec1effca7534acb7262\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7cb4176d384609eb549c9f00057256bd34053ca3eca229c20cd250b93f87f910\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://91541795f1dd997a0d977904a6a3cbeae8b66b8bb57cde8dac344d3703352e1a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:15:53Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:53Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:53 crc kubenswrapper[4881]: I1211 08:16:53.245220 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6c507af2-56f1-45a6-ab18-c6d27c4e3f85\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ae6cead982609bcea650eccda0c847ae1c568061104bda0584984b9d62481b0e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bdda482f14afc374a3ba38a02b0b93bab4faf300cc47f7ee39ef9183af904f22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a2846dae5b0d424649d68bdbeb34b7791ee2a8e1b05a0309876cd0d79c5fca01\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://89d2f7f7873955f51814a4a24f42fc5fff4c2c08480d7ff81bfc18fd73102d7d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://89d2f7f7873955f51814a4a24f42fc5fff4c2c08480d7ff81bfc18fd73102d7d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:15:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:15:54Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:15:53Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:53Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:53 crc kubenswrapper[4881]: I1211 08:16:53.258438 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-847k7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1711214f-683a-4a2e-b9b8-f3fd2dd6dbc2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0ad11e04da8a6d33e46ca8c5f28be920d52c7c2ce22b4539fc8b7b0b85edb425\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2txkz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:20Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-847k7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:53Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:53 crc kubenswrapper[4881]: I1211 08:16:53.286917 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:53 crc kubenswrapper[4881]: I1211 08:16:53.286992 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:53 crc kubenswrapper[4881]: I1211 08:16:53.287014 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:53 crc kubenswrapper[4881]: I1211 08:16:53.287043 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:53 crc kubenswrapper[4881]: I1211 08:16:53.287062 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:53Z","lastTransitionTime":"2025-12-11T08:16:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:53 crc kubenswrapper[4881]: I1211 08:16:53.389970 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:53 crc kubenswrapper[4881]: I1211 08:16:53.390001 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:53 crc kubenswrapper[4881]: I1211 08:16:53.390009 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:53 crc kubenswrapper[4881]: I1211 08:16:53.390027 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:53 crc kubenswrapper[4881]: I1211 08:16:53.390037 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:53Z","lastTransitionTime":"2025-12-11T08:16:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:53 crc kubenswrapper[4881]: I1211 08:16:53.492574 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:53 crc kubenswrapper[4881]: I1211 08:16:53.492691 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:53 crc kubenswrapper[4881]: I1211 08:16:53.492713 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:53 crc kubenswrapper[4881]: I1211 08:16:53.492740 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:53 crc kubenswrapper[4881]: I1211 08:16:53.492758 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:53Z","lastTransitionTime":"2025-12-11T08:16:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:53 crc kubenswrapper[4881]: I1211 08:16:53.595924 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:53 crc kubenswrapper[4881]: I1211 08:16:53.596368 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:53 crc kubenswrapper[4881]: I1211 08:16:53.596578 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:53 crc kubenswrapper[4881]: I1211 08:16:53.596777 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:53 crc kubenswrapper[4881]: I1211 08:16:53.597480 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:53Z","lastTransitionTime":"2025-12-11T08:16:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:53 crc kubenswrapper[4881]: I1211 08:16:53.700693 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:53 crc kubenswrapper[4881]: I1211 08:16:53.700773 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:53 crc kubenswrapper[4881]: I1211 08:16:53.700792 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:53 crc kubenswrapper[4881]: I1211 08:16:53.700817 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:53 crc kubenswrapper[4881]: I1211 08:16:53.700839 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:53Z","lastTransitionTime":"2025-12-11T08:16:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:53 crc kubenswrapper[4881]: I1211 08:16:53.803964 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:53 crc kubenswrapper[4881]: I1211 08:16:53.804019 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:53 crc kubenswrapper[4881]: I1211 08:16:53.804027 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:53 crc kubenswrapper[4881]: I1211 08:16:53.804043 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:53 crc kubenswrapper[4881]: I1211 08:16:53.804053 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:53Z","lastTransitionTime":"2025-12-11T08:16:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:53 crc kubenswrapper[4881]: I1211 08:16:53.907564 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:53 crc kubenswrapper[4881]: I1211 08:16:53.907676 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:53 crc kubenswrapper[4881]: I1211 08:16:53.907724 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:53 crc kubenswrapper[4881]: I1211 08:16:53.907788 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:53 crc kubenswrapper[4881]: I1211 08:16:53.907838 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:53Z","lastTransitionTime":"2025-12-11T08:16:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:54 crc kubenswrapper[4881]: I1211 08:16:54.005549 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 11 08:16:54 crc kubenswrapper[4881]: E1211 08:16:54.005902 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 11 08:16:54 crc kubenswrapper[4881]: I1211 08:16:54.011209 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:54 crc kubenswrapper[4881]: I1211 08:16:54.011270 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:54 crc kubenswrapper[4881]: I1211 08:16:54.011288 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:54 crc kubenswrapper[4881]: I1211 08:16:54.011310 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:54 crc kubenswrapper[4881]: I1211 08:16:54.011330 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:54Z","lastTransitionTime":"2025-12-11T08:16:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:54 crc kubenswrapper[4881]: I1211 08:16:54.114506 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:54 crc kubenswrapper[4881]: I1211 08:16:54.114569 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:54 crc kubenswrapper[4881]: I1211 08:16:54.114594 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:54 crc kubenswrapper[4881]: I1211 08:16:54.114626 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:54 crc kubenswrapper[4881]: I1211 08:16:54.114651 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:54Z","lastTransitionTime":"2025-12-11T08:16:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:54 crc kubenswrapper[4881]: I1211 08:16:54.216854 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:54 crc kubenswrapper[4881]: I1211 08:16:54.216909 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:54 crc kubenswrapper[4881]: I1211 08:16:54.216926 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:54 crc kubenswrapper[4881]: I1211 08:16:54.216955 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:54 crc kubenswrapper[4881]: I1211 08:16:54.216979 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:54Z","lastTransitionTime":"2025-12-11T08:16:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:54 crc kubenswrapper[4881]: I1211 08:16:54.300590 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:54 crc kubenswrapper[4881]: I1211 08:16:54.300679 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:54 crc kubenswrapper[4881]: I1211 08:16:54.300705 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:54 crc kubenswrapper[4881]: I1211 08:16:54.300737 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:54 crc kubenswrapper[4881]: I1211 08:16:54.300762 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:54Z","lastTransitionTime":"2025-12-11T08:16:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:54 crc kubenswrapper[4881]: E1211 08:16:54.322907 4881 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:16:54Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:54Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:16:54Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:54Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:16:54Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:54Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:16:54Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:54Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"46a6300e-52c1-447e-8230-6662e62288c7\\\",\\\"systemUUID\\\":\\\"fece3d29-5045-4c4f-98be-52739a921bd2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:54Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:54 crc kubenswrapper[4881]: I1211 08:16:54.328534 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:54 crc kubenswrapper[4881]: I1211 08:16:54.328587 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:54 crc kubenswrapper[4881]: I1211 08:16:54.328601 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:54 crc kubenswrapper[4881]: I1211 08:16:54.328621 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:54 crc kubenswrapper[4881]: I1211 08:16:54.328633 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:54Z","lastTransitionTime":"2025-12-11T08:16:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:54 crc kubenswrapper[4881]: E1211 08:16:54.343020 4881 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:16:54Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:54Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:16:54Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:54Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:16:54Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:54Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:16:54Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:54Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"46a6300e-52c1-447e-8230-6662e62288c7\\\",\\\"systemUUID\\\":\\\"fece3d29-5045-4c4f-98be-52739a921bd2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:54Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:54 crc kubenswrapper[4881]: I1211 08:16:54.348375 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:54 crc kubenswrapper[4881]: I1211 08:16:54.348440 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:54 crc kubenswrapper[4881]: I1211 08:16:54.348450 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:54 crc kubenswrapper[4881]: I1211 08:16:54.348468 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:54 crc kubenswrapper[4881]: I1211 08:16:54.348479 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:54Z","lastTransitionTime":"2025-12-11T08:16:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:54 crc kubenswrapper[4881]: E1211 08:16:54.364386 4881 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:16:54Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:54Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:16:54Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:54Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:16:54Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:54Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:16:54Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:54Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"46a6300e-52c1-447e-8230-6662e62288c7\\\",\\\"systemUUID\\\":\\\"fece3d29-5045-4c4f-98be-52739a921bd2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:54Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:54 crc kubenswrapper[4881]: I1211 08:16:54.369191 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:54 crc kubenswrapper[4881]: I1211 08:16:54.369296 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:54 crc kubenswrapper[4881]: I1211 08:16:54.369313 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:54 crc kubenswrapper[4881]: I1211 08:16:54.369362 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:54 crc kubenswrapper[4881]: I1211 08:16:54.369380 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:54Z","lastTransitionTime":"2025-12-11T08:16:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:54 crc kubenswrapper[4881]: E1211 08:16:54.384606 4881 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:16:54Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:54Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:16:54Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:54Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:16:54Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:54Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:16:54Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:54Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"46a6300e-52c1-447e-8230-6662e62288c7\\\",\\\"systemUUID\\\":\\\"fece3d29-5045-4c4f-98be-52739a921bd2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:54Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:54 crc kubenswrapper[4881]: I1211 08:16:54.389431 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:54 crc kubenswrapper[4881]: I1211 08:16:54.389473 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:54 crc kubenswrapper[4881]: I1211 08:16:54.389484 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:54 crc kubenswrapper[4881]: I1211 08:16:54.389501 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:54 crc kubenswrapper[4881]: I1211 08:16:54.389512 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:54Z","lastTransitionTime":"2025-12-11T08:16:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:54 crc kubenswrapper[4881]: E1211 08:16:54.407299 4881 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:16:54Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:54Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:16:54Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:54Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:16:54Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:54Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:16:54Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:54Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"46a6300e-52c1-447e-8230-6662e62288c7\\\",\\\"systemUUID\\\":\\\"fece3d29-5045-4c4f-98be-52739a921bd2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:16:54Z is after 2025-08-24T17:21:41Z" Dec 11 08:16:54 crc kubenswrapper[4881]: E1211 08:16:54.407505 4881 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Dec 11 08:16:54 crc kubenswrapper[4881]: I1211 08:16:54.410046 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:54 crc kubenswrapper[4881]: I1211 08:16:54.410088 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:54 crc kubenswrapper[4881]: I1211 08:16:54.410099 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:54 crc kubenswrapper[4881]: I1211 08:16:54.410117 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:54 crc kubenswrapper[4881]: I1211 08:16:54.410128 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:54Z","lastTransitionTime":"2025-12-11T08:16:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:54 crc kubenswrapper[4881]: I1211 08:16:54.513076 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:54 crc kubenswrapper[4881]: I1211 08:16:54.513132 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:54 crc kubenswrapper[4881]: I1211 08:16:54.513149 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:54 crc kubenswrapper[4881]: I1211 08:16:54.513175 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:54 crc kubenswrapper[4881]: I1211 08:16:54.513191 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:54Z","lastTransitionTime":"2025-12-11T08:16:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:54 crc kubenswrapper[4881]: I1211 08:16:54.616717 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:54 crc kubenswrapper[4881]: I1211 08:16:54.616856 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:54 crc kubenswrapper[4881]: I1211 08:16:54.616895 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:54 crc kubenswrapper[4881]: I1211 08:16:54.616928 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:54 crc kubenswrapper[4881]: I1211 08:16:54.616951 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:54Z","lastTransitionTime":"2025-12-11T08:16:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:54 crc kubenswrapper[4881]: I1211 08:16:54.720993 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:54 crc kubenswrapper[4881]: I1211 08:16:54.721072 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:54 crc kubenswrapper[4881]: I1211 08:16:54.721095 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:54 crc kubenswrapper[4881]: I1211 08:16:54.721128 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:54 crc kubenswrapper[4881]: I1211 08:16:54.721152 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:54Z","lastTransitionTime":"2025-12-11T08:16:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:54 crc kubenswrapper[4881]: I1211 08:16:54.823861 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:54 crc kubenswrapper[4881]: I1211 08:16:54.823935 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:54 crc kubenswrapper[4881]: I1211 08:16:54.823957 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:54 crc kubenswrapper[4881]: I1211 08:16:54.823984 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:54 crc kubenswrapper[4881]: I1211 08:16:54.824002 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:54Z","lastTransitionTime":"2025-12-11T08:16:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:54 crc kubenswrapper[4881]: I1211 08:16:54.926948 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:54 crc kubenswrapper[4881]: I1211 08:16:54.927010 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:54 crc kubenswrapper[4881]: I1211 08:16:54.927028 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:54 crc kubenswrapper[4881]: I1211 08:16:54.927051 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:54 crc kubenswrapper[4881]: I1211 08:16:54.927069 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:54Z","lastTransitionTime":"2025-12-11T08:16:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:55 crc kubenswrapper[4881]: I1211 08:16:55.005432 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 11 08:16:55 crc kubenswrapper[4881]: I1211 08:16:55.005571 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bzslm" Dec 11 08:16:55 crc kubenswrapper[4881]: I1211 08:16:55.005672 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 11 08:16:55 crc kubenswrapper[4881]: E1211 08:16:55.005663 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 11 08:16:55 crc kubenswrapper[4881]: E1211 08:16:55.005829 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bzslm" podUID="3a9b30f4-30b0-4b16-9a4a-135d2e7a9beb" Dec 11 08:16:55 crc kubenswrapper[4881]: E1211 08:16:55.005985 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 11 08:16:55 crc kubenswrapper[4881]: I1211 08:16:55.029420 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:55 crc kubenswrapper[4881]: I1211 08:16:55.029502 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:55 crc kubenswrapper[4881]: I1211 08:16:55.029521 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:55 crc kubenswrapper[4881]: I1211 08:16:55.029548 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:55 crc kubenswrapper[4881]: I1211 08:16:55.029568 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:55Z","lastTransitionTime":"2025-12-11T08:16:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:55 crc kubenswrapper[4881]: I1211 08:16:55.132816 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:55 crc kubenswrapper[4881]: I1211 08:16:55.132872 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:55 crc kubenswrapper[4881]: I1211 08:16:55.132919 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:55 crc kubenswrapper[4881]: I1211 08:16:55.132943 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:55 crc kubenswrapper[4881]: I1211 08:16:55.132958 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:55Z","lastTransitionTime":"2025-12-11T08:16:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:55 crc kubenswrapper[4881]: I1211 08:16:55.234998 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:55 crc kubenswrapper[4881]: I1211 08:16:55.235030 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:55 crc kubenswrapper[4881]: I1211 08:16:55.235038 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:55 crc kubenswrapper[4881]: I1211 08:16:55.235053 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:55 crc kubenswrapper[4881]: I1211 08:16:55.235062 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:55Z","lastTransitionTime":"2025-12-11T08:16:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:55 crc kubenswrapper[4881]: I1211 08:16:55.338233 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:55 crc kubenswrapper[4881]: I1211 08:16:55.338288 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:55 crc kubenswrapper[4881]: I1211 08:16:55.338305 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:55 crc kubenswrapper[4881]: I1211 08:16:55.338357 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:55 crc kubenswrapper[4881]: I1211 08:16:55.338378 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:55Z","lastTransitionTime":"2025-12-11T08:16:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:55 crc kubenswrapper[4881]: I1211 08:16:55.440700 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:55 crc kubenswrapper[4881]: I1211 08:16:55.440746 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:55 crc kubenswrapper[4881]: I1211 08:16:55.440762 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:55 crc kubenswrapper[4881]: I1211 08:16:55.440778 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:55 crc kubenswrapper[4881]: I1211 08:16:55.440789 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:55Z","lastTransitionTime":"2025-12-11T08:16:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:55 crc kubenswrapper[4881]: I1211 08:16:55.543139 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:55 crc kubenswrapper[4881]: I1211 08:16:55.543242 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:55 crc kubenswrapper[4881]: I1211 08:16:55.543264 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:55 crc kubenswrapper[4881]: I1211 08:16:55.543291 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:55 crc kubenswrapper[4881]: I1211 08:16:55.543309 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:55Z","lastTransitionTime":"2025-12-11T08:16:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:55 crc kubenswrapper[4881]: I1211 08:16:55.646938 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:55 crc kubenswrapper[4881]: I1211 08:16:55.647027 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:55 crc kubenswrapper[4881]: I1211 08:16:55.647053 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:55 crc kubenswrapper[4881]: I1211 08:16:55.647085 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:55 crc kubenswrapper[4881]: I1211 08:16:55.647115 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:55Z","lastTransitionTime":"2025-12-11T08:16:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:55 crc kubenswrapper[4881]: I1211 08:16:55.749840 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:55 crc kubenswrapper[4881]: I1211 08:16:55.749894 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:55 crc kubenswrapper[4881]: I1211 08:16:55.749911 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:55 crc kubenswrapper[4881]: I1211 08:16:55.749936 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:55 crc kubenswrapper[4881]: I1211 08:16:55.749952 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:55Z","lastTransitionTime":"2025-12-11T08:16:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:55 crc kubenswrapper[4881]: I1211 08:16:55.853467 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:55 crc kubenswrapper[4881]: I1211 08:16:55.853520 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:55 crc kubenswrapper[4881]: I1211 08:16:55.853537 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:55 crc kubenswrapper[4881]: I1211 08:16:55.853562 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:55 crc kubenswrapper[4881]: I1211 08:16:55.853579 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:55Z","lastTransitionTime":"2025-12-11T08:16:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:55 crc kubenswrapper[4881]: I1211 08:16:55.957496 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:55 crc kubenswrapper[4881]: I1211 08:16:55.957569 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:55 crc kubenswrapper[4881]: I1211 08:16:55.957597 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:55 crc kubenswrapper[4881]: I1211 08:16:55.957629 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:55 crc kubenswrapper[4881]: I1211 08:16:55.957653 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:55Z","lastTransitionTime":"2025-12-11T08:16:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:56 crc kubenswrapper[4881]: I1211 08:16:56.005101 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 11 08:16:56 crc kubenswrapper[4881]: E1211 08:16:56.005292 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 11 08:16:56 crc kubenswrapper[4881]: I1211 08:16:56.060945 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:56 crc kubenswrapper[4881]: I1211 08:16:56.060984 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:56 crc kubenswrapper[4881]: I1211 08:16:56.060992 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:56 crc kubenswrapper[4881]: I1211 08:16:56.061010 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:56 crc kubenswrapper[4881]: I1211 08:16:56.061020 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:56Z","lastTransitionTime":"2025-12-11T08:16:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:56 crc kubenswrapper[4881]: I1211 08:16:56.163761 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:56 crc kubenswrapper[4881]: I1211 08:16:56.163815 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:56 crc kubenswrapper[4881]: I1211 08:16:56.163828 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:56 crc kubenswrapper[4881]: I1211 08:16:56.163846 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:56 crc kubenswrapper[4881]: I1211 08:16:56.163863 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:56Z","lastTransitionTime":"2025-12-11T08:16:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:56 crc kubenswrapper[4881]: I1211 08:16:56.266833 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:56 crc kubenswrapper[4881]: I1211 08:16:56.266897 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:56 crc kubenswrapper[4881]: I1211 08:16:56.266910 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:56 crc kubenswrapper[4881]: I1211 08:16:56.266929 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:56 crc kubenswrapper[4881]: I1211 08:16:56.266941 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:56Z","lastTransitionTime":"2025-12-11T08:16:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:56 crc kubenswrapper[4881]: I1211 08:16:56.371635 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:56 crc kubenswrapper[4881]: I1211 08:16:56.371742 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:56 crc kubenswrapper[4881]: I1211 08:16:56.371767 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:56 crc kubenswrapper[4881]: I1211 08:16:56.371809 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:56 crc kubenswrapper[4881]: I1211 08:16:56.371826 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:56Z","lastTransitionTime":"2025-12-11T08:16:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:56 crc kubenswrapper[4881]: I1211 08:16:56.475033 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:56 crc kubenswrapper[4881]: I1211 08:16:56.475102 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:56 crc kubenswrapper[4881]: I1211 08:16:56.475126 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:56 crc kubenswrapper[4881]: I1211 08:16:56.475161 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:56 crc kubenswrapper[4881]: I1211 08:16:56.475180 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:56Z","lastTransitionTime":"2025-12-11T08:16:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:56 crc kubenswrapper[4881]: I1211 08:16:56.579894 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:56 crc kubenswrapper[4881]: I1211 08:16:56.579983 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:56 crc kubenswrapper[4881]: I1211 08:16:56.580008 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:56 crc kubenswrapper[4881]: I1211 08:16:56.580040 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:56 crc kubenswrapper[4881]: I1211 08:16:56.580065 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:56Z","lastTransitionTime":"2025-12-11T08:16:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:56 crc kubenswrapper[4881]: I1211 08:16:56.683850 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:56 crc kubenswrapper[4881]: I1211 08:16:56.683913 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:56 crc kubenswrapper[4881]: I1211 08:16:56.683934 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:56 crc kubenswrapper[4881]: I1211 08:16:56.683961 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:56 crc kubenswrapper[4881]: I1211 08:16:56.683978 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:56Z","lastTransitionTime":"2025-12-11T08:16:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:56 crc kubenswrapper[4881]: I1211 08:16:56.787705 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:56 crc kubenswrapper[4881]: I1211 08:16:56.787777 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:56 crc kubenswrapper[4881]: I1211 08:16:56.787798 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:56 crc kubenswrapper[4881]: I1211 08:16:56.787823 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:56 crc kubenswrapper[4881]: I1211 08:16:56.787841 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:56Z","lastTransitionTime":"2025-12-11T08:16:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:56 crc kubenswrapper[4881]: I1211 08:16:56.891009 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:56 crc kubenswrapper[4881]: I1211 08:16:56.891070 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:56 crc kubenswrapper[4881]: I1211 08:16:56.891085 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:56 crc kubenswrapper[4881]: I1211 08:16:56.891106 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:56 crc kubenswrapper[4881]: I1211 08:16:56.891123 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:56Z","lastTransitionTime":"2025-12-11T08:16:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:56 crc kubenswrapper[4881]: I1211 08:16:56.994726 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:56 crc kubenswrapper[4881]: I1211 08:16:56.994797 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:56 crc kubenswrapper[4881]: I1211 08:16:56.994822 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:56 crc kubenswrapper[4881]: I1211 08:16:56.994855 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:56 crc kubenswrapper[4881]: I1211 08:16:56.994877 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:56Z","lastTransitionTime":"2025-12-11T08:16:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:57 crc kubenswrapper[4881]: I1211 08:16:57.005243 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 11 08:16:57 crc kubenswrapper[4881]: I1211 08:16:57.005380 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bzslm" Dec 11 08:16:57 crc kubenswrapper[4881]: E1211 08:16:57.005477 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 11 08:16:57 crc kubenswrapper[4881]: E1211 08:16:57.005567 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bzslm" podUID="3a9b30f4-30b0-4b16-9a4a-135d2e7a9beb" Dec 11 08:16:57 crc kubenswrapper[4881]: I1211 08:16:57.005660 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 11 08:16:57 crc kubenswrapper[4881]: E1211 08:16:57.005706 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 11 08:16:57 crc kubenswrapper[4881]: I1211 08:16:57.097829 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:57 crc kubenswrapper[4881]: I1211 08:16:57.098162 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:57 crc kubenswrapper[4881]: I1211 08:16:57.098299 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:57 crc kubenswrapper[4881]: I1211 08:16:57.098458 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:57 crc kubenswrapper[4881]: I1211 08:16:57.098581 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:57Z","lastTransitionTime":"2025-12-11T08:16:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:57 crc kubenswrapper[4881]: I1211 08:16:57.201879 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:57 crc kubenswrapper[4881]: I1211 08:16:57.202194 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:57 crc kubenswrapper[4881]: I1211 08:16:57.202426 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:57 crc kubenswrapper[4881]: I1211 08:16:57.202606 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:57 crc kubenswrapper[4881]: I1211 08:16:57.202742 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:57Z","lastTransitionTime":"2025-12-11T08:16:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:57 crc kubenswrapper[4881]: I1211 08:16:57.306463 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:57 crc kubenswrapper[4881]: I1211 08:16:57.307254 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:57 crc kubenswrapper[4881]: I1211 08:16:57.307469 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:57 crc kubenswrapper[4881]: I1211 08:16:57.307660 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:57 crc kubenswrapper[4881]: I1211 08:16:57.307795 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:57Z","lastTransitionTime":"2025-12-11T08:16:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:57 crc kubenswrapper[4881]: I1211 08:16:57.410950 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:57 crc kubenswrapper[4881]: I1211 08:16:57.411026 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:57 crc kubenswrapper[4881]: I1211 08:16:57.411040 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:57 crc kubenswrapper[4881]: I1211 08:16:57.411064 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:57 crc kubenswrapper[4881]: I1211 08:16:57.411083 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:57Z","lastTransitionTime":"2025-12-11T08:16:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:57 crc kubenswrapper[4881]: I1211 08:16:57.520078 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:57 crc kubenswrapper[4881]: I1211 08:16:57.520134 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:57 crc kubenswrapper[4881]: I1211 08:16:57.520149 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:57 crc kubenswrapper[4881]: I1211 08:16:57.520172 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:57 crc kubenswrapper[4881]: I1211 08:16:57.520189 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:57Z","lastTransitionTime":"2025-12-11T08:16:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:57 crc kubenswrapper[4881]: I1211 08:16:57.623403 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:57 crc kubenswrapper[4881]: I1211 08:16:57.623469 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:57 crc kubenswrapper[4881]: I1211 08:16:57.623493 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:57 crc kubenswrapper[4881]: I1211 08:16:57.623523 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:57 crc kubenswrapper[4881]: I1211 08:16:57.623546 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:57Z","lastTransitionTime":"2025-12-11T08:16:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:57 crc kubenswrapper[4881]: I1211 08:16:57.726193 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:57 crc kubenswrapper[4881]: I1211 08:16:57.726241 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:57 crc kubenswrapper[4881]: I1211 08:16:57.726254 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:57 crc kubenswrapper[4881]: I1211 08:16:57.726275 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:57 crc kubenswrapper[4881]: I1211 08:16:57.726287 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:57Z","lastTransitionTime":"2025-12-11T08:16:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:57 crc kubenswrapper[4881]: I1211 08:16:57.829420 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:57 crc kubenswrapper[4881]: I1211 08:16:57.829476 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:57 crc kubenswrapper[4881]: I1211 08:16:57.829490 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:57 crc kubenswrapper[4881]: I1211 08:16:57.829513 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:57 crc kubenswrapper[4881]: I1211 08:16:57.829527 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:57Z","lastTransitionTime":"2025-12-11T08:16:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:57 crc kubenswrapper[4881]: I1211 08:16:57.932580 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:57 crc kubenswrapper[4881]: I1211 08:16:57.932638 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:57 crc kubenswrapper[4881]: I1211 08:16:57.932654 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:57 crc kubenswrapper[4881]: I1211 08:16:57.932675 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:57 crc kubenswrapper[4881]: I1211 08:16:57.932689 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:57Z","lastTransitionTime":"2025-12-11T08:16:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:58 crc kubenswrapper[4881]: I1211 08:16:58.004867 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 11 08:16:58 crc kubenswrapper[4881]: E1211 08:16:58.005081 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 11 08:16:58 crc kubenswrapper[4881]: I1211 08:16:58.035482 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:58 crc kubenswrapper[4881]: I1211 08:16:58.035529 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:58 crc kubenswrapper[4881]: I1211 08:16:58.035540 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:58 crc kubenswrapper[4881]: I1211 08:16:58.035558 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:58 crc kubenswrapper[4881]: I1211 08:16:58.035572 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:58Z","lastTransitionTime":"2025-12-11T08:16:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:58 crc kubenswrapper[4881]: I1211 08:16:58.138268 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:58 crc kubenswrapper[4881]: I1211 08:16:58.138460 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:58 crc kubenswrapper[4881]: I1211 08:16:58.138485 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:58 crc kubenswrapper[4881]: I1211 08:16:58.138504 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:58 crc kubenswrapper[4881]: I1211 08:16:58.138518 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:58Z","lastTransitionTime":"2025-12-11T08:16:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:58 crc kubenswrapper[4881]: I1211 08:16:58.241453 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:58 crc kubenswrapper[4881]: I1211 08:16:58.241494 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:58 crc kubenswrapper[4881]: I1211 08:16:58.241506 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:58 crc kubenswrapper[4881]: I1211 08:16:58.241526 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:58 crc kubenswrapper[4881]: I1211 08:16:58.241538 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:58Z","lastTransitionTime":"2025-12-11T08:16:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:58 crc kubenswrapper[4881]: I1211 08:16:58.343877 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:58 crc kubenswrapper[4881]: I1211 08:16:58.343947 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:58 crc kubenswrapper[4881]: I1211 08:16:58.343970 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:58 crc kubenswrapper[4881]: I1211 08:16:58.344002 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:58 crc kubenswrapper[4881]: I1211 08:16:58.344028 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:58Z","lastTransitionTime":"2025-12-11T08:16:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:58 crc kubenswrapper[4881]: I1211 08:16:58.446889 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:58 crc kubenswrapper[4881]: I1211 08:16:58.446958 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:58 crc kubenswrapper[4881]: I1211 08:16:58.446981 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:58 crc kubenswrapper[4881]: I1211 08:16:58.447012 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:58 crc kubenswrapper[4881]: I1211 08:16:58.447036 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:58Z","lastTransitionTime":"2025-12-11T08:16:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:58 crc kubenswrapper[4881]: I1211 08:16:58.549675 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:58 crc kubenswrapper[4881]: I1211 08:16:58.549734 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:58 crc kubenswrapper[4881]: I1211 08:16:58.549744 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:58 crc kubenswrapper[4881]: I1211 08:16:58.549759 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:58 crc kubenswrapper[4881]: I1211 08:16:58.549774 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:58Z","lastTransitionTime":"2025-12-11T08:16:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:58 crc kubenswrapper[4881]: I1211 08:16:58.652697 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:58 crc kubenswrapper[4881]: I1211 08:16:58.652760 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:58 crc kubenswrapper[4881]: I1211 08:16:58.652773 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:58 crc kubenswrapper[4881]: I1211 08:16:58.652791 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:58 crc kubenswrapper[4881]: I1211 08:16:58.652805 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:58Z","lastTransitionTime":"2025-12-11T08:16:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:58 crc kubenswrapper[4881]: I1211 08:16:58.755365 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:58 crc kubenswrapper[4881]: I1211 08:16:58.755406 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:58 crc kubenswrapper[4881]: I1211 08:16:58.755418 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:58 crc kubenswrapper[4881]: I1211 08:16:58.755476 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:58 crc kubenswrapper[4881]: I1211 08:16:58.755487 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:58Z","lastTransitionTime":"2025-12-11T08:16:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:58 crc kubenswrapper[4881]: I1211 08:16:58.858869 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:58 crc kubenswrapper[4881]: I1211 08:16:58.858932 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:58 crc kubenswrapper[4881]: I1211 08:16:58.858946 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:58 crc kubenswrapper[4881]: I1211 08:16:58.858966 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:58 crc kubenswrapper[4881]: I1211 08:16:58.858993 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:58Z","lastTransitionTime":"2025-12-11T08:16:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:58 crc kubenswrapper[4881]: I1211 08:16:58.961974 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:58 crc kubenswrapper[4881]: I1211 08:16:58.962060 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:58 crc kubenswrapper[4881]: I1211 08:16:58.962088 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:58 crc kubenswrapper[4881]: I1211 08:16:58.962121 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:58 crc kubenswrapper[4881]: I1211 08:16:58.962143 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:58Z","lastTransitionTime":"2025-12-11T08:16:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:59 crc kubenswrapper[4881]: I1211 08:16:59.004915 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 11 08:16:59 crc kubenswrapper[4881]: I1211 08:16:59.004954 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 11 08:16:59 crc kubenswrapper[4881]: I1211 08:16:59.004915 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bzslm" Dec 11 08:16:59 crc kubenswrapper[4881]: E1211 08:16:59.005076 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 11 08:16:59 crc kubenswrapper[4881]: E1211 08:16:59.005218 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bzslm" podUID="3a9b30f4-30b0-4b16-9a4a-135d2e7a9beb" Dec 11 08:16:59 crc kubenswrapper[4881]: E1211 08:16:59.005296 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 11 08:16:59 crc kubenswrapper[4881]: I1211 08:16:59.065291 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:59 crc kubenswrapper[4881]: I1211 08:16:59.065361 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:59 crc kubenswrapper[4881]: I1211 08:16:59.065381 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:59 crc kubenswrapper[4881]: I1211 08:16:59.065405 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:59 crc kubenswrapper[4881]: I1211 08:16:59.065420 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:59Z","lastTransitionTime":"2025-12-11T08:16:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:59 crc kubenswrapper[4881]: I1211 08:16:59.168609 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:59 crc kubenswrapper[4881]: I1211 08:16:59.168661 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:59 crc kubenswrapper[4881]: I1211 08:16:59.168672 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:59 crc kubenswrapper[4881]: I1211 08:16:59.168689 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:59 crc kubenswrapper[4881]: I1211 08:16:59.168702 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:59Z","lastTransitionTime":"2025-12-11T08:16:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:59 crc kubenswrapper[4881]: I1211 08:16:59.271528 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:59 crc kubenswrapper[4881]: I1211 08:16:59.271594 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:59 crc kubenswrapper[4881]: I1211 08:16:59.271605 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:59 crc kubenswrapper[4881]: I1211 08:16:59.271624 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:59 crc kubenswrapper[4881]: I1211 08:16:59.271635 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:59Z","lastTransitionTime":"2025-12-11T08:16:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:59 crc kubenswrapper[4881]: I1211 08:16:59.374269 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:59 crc kubenswrapper[4881]: I1211 08:16:59.374359 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:59 crc kubenswrapper[4881]: I1211 08:16:59.374377 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:59 crc kubenswrapper[4881]: I1211 08:16:59.374425 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:59 crc kubenswrapper[4881]: I1211 08:16:59.374442 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:59Z","lastTransitionTime":"2025-12-11T08:16:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:59 crc kubenswrapper[4881]: I1211 08:16:59.477051 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:59 crc kubenswrapper[4881]: I1211 08:16:59.477115 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:59 crc kubenswrapper[4881]: I1211 08:16:59.477133 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:59 crc kubenswrapper[4881]: I1211 08:16:59.477155 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:59 crc kubenswrapper[4881]: I1211 08:16:59.477171 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:59Z","lastTransitionTime":"2025-12-11T08:16:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:59 crc kubenswrapper[4881]: I1211 08:16:59.579952 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:59 crc kubenswrapper[4881]: I1211 08:16:59.580055 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:59 crc kubenswrapper[4881]: I1211 08:16:59.580078 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:59 crc kubenswrapper[4881]: I1211 08:16:59.580107 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:59 crc kubenswrapper[4881]: I1211 08:16:59.580129 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:59Z","lastTransitionTime":"2025-12-11T08:16:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:59 crc kubenswrapper[4881]: I1211 08:16:59.683220 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:59 crc kubenswrapper[4881]: I1211 08:16:59.683286 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:59 crc kubenswrapper[4881]: I1211 08:16:59.683300 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:59 crc kubenswrapper[4881]: I1211 08:16:59.683325 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:59 crc kubenswrapper[4881]: I1211 08:16:59.683361 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:59Z","lastTransitionTime":"2025-12-11T08:16:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:59 crc kubenswrapper[4881]: I1211 08:16:59.785822 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:59 crc kubenswrapper[4881]: I1211 08:16:59.785865 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:59 crc kubenswrapper[4881]: I1211 08:16:59.785877 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:59 crc kubenswrapper[4881]: I1211 08:16:59.785897 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:59 crc kubenswrapper[4881]: I1211 08:16:59.785913 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:59Z","lastTransitionTime":"2025-12-11T08:16:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:59 crc kubenswrapper[4881]: I1211 08:16:59.887659 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:59 crc kubenswrapper[4881]: I1211 08:16:59.887694 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:59 crc kubenswrapper[4881]: I1211 08:16:59.887703 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:59 crc kubenswrapper[4881]: I1211 08:16:59.887717 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:59 crc kubenswrapper[4881]: I1211 08:16:59.887726 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:59Z","lastTransitionTime":"2025-12-11T08:16:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:16:59 crc kubenswrapper[4881]: I1211 08:16:59.991652 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:16:59 crc kubenswrapper[4881]: I1211 08:16:59.991703 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:16:59 crc kubenswrapper[4881]: I1211 08:16:59.991716 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:16:59 crc kubenswrapper[4881]: I1211 08:16:59.991736 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:16:59 crc kubenswrapper[4881]: I1211 08:16:59.991749 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:16:59Z","lastTransitionTime":"2025-12-11T08:16:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:00 crc kubenswrapper[4881]: I1211 08:17:00.004924 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 11 08:17:00 crc kubenswrapper[4881]: E1211 08:17:00.005275 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 11 08:17:00 crc kubenswrapper[4881]: I1211 08:17:00.094383 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:00 crc kubenswrapper[4881]: I1211 08:17:00.094419 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:00 crc kubenswrapper[4881]: I1211 08:17:00.094428 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:00 crc kubenswrapper[4881]: I1211 08:17:00.094445 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:00 crc kubenswrapper[4881]: I1211 08:17:00.094455 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:00Z","lastTransitionTime":"2025-12-11T08:17:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:00 crc kubenswrapper[4881]: I1211 08:17:00.196638 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:00 crc kubenswrapper[4881]: I1211 08:17:00.196692 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:00 crc kubenswrapper[4881]: I1211 08:17:00.196704 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:00 crc kubenswrapper[4881]: I1211 08:17:00.196722 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:00 crc kubenswrapper[4881]: I1211 08:17:00.196734 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:00Z","lastTransitionTime":"2025-12-11T08:17:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:00 crc kubenswrapper[4881]: I1211 08:17:00.298785 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:00 crc kubenswrapper[4881]: I1211 08:17:00.299280 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:00 crc kubenswrapper[4881]: I1211 08:17:00.299508 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:00 crc kubenswrapper[4881]: I1211 08:17:00.299751 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:00 crc kubenswrapper[4881]: I1211 08:17:00.299953 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:00Z","lastTransitionTime":"2025-12-11T08:17:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:00 crc kubenswrapper[4881]: I1211 08:17:00.402530 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:00 crc kubenswrapper[4881]: I1211 08:17:00.403209 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:00 crc kubenswrapper[4881]: I1211 08:17:00.403399 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:00 crc kubenswrapper[4881]: I1211 08:17:00.403525 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:00 crc kubenswrapper[4881]: I1211 08:17:00.403641 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:00Z","lastTransitionTime":"2025-12-11T08:17:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:00 crc kubenswrapper[4881]: I1211 08:17:00.506667 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:00 crc kubenswrapper[4881]: I1211 08:17:00.506710 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:00 crc kubenswrapper[4881]: I1211 08:17:00.506723 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:00 crc kubenswrapper[4881]: I1211 08:17:00.506740 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:00 crc kubenswrapper[4881]: I1211 08:17:00.506751 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:00Z","lastTransitionTime":"2025-12-11T08:17:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:00 crc kubenswrapper[4881]: I1211 08:17:00.610139 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:00 crc kubenswrapper[4881]: I1211 08:17:00.610209 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:00 crc kubenswrapper[4881]: I1211 08:17:00.610221 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:00 crc kubenswrapper[4881]: I1211 08:17:00.610240 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:00 crc kubenswrapper[4881]: I1211 08:17:00.610255 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:00Z","lastTransitionTime":"2025-12-11T08:17:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:00 crc kubenswrapper[4881]: I1211 08:17:00.713539 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:00 crc kubenswrapper[4881]: I1211 08:17:00.713591 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:00 crc kubenswrapper[4881]: I1211 08:17:00.713601 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:00 crc kubenswrapper[4881]: I1211 08:17:00.713618 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:00 crc kubenswrapper[4881]: I1211 08:17:00.713629 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:00Z","lastTransitionTime":"2025-12-11T08:17:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:00 crc kubenswrapper[4881]: I1211 08:17:00.815436 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:00 crc kubenswrapper[4881]: I1211 08:17:00.815485 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:00 crc kubenswrapper[4881]: I1211 08:17:00.815493 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:00 crc kubenswrapper[4881]: I1211 08:17:00.815508 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:00 crc kubenswrapper[4881]: I1211 08:17:00.815517 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:00Z","lastTransitionTime":"2025-12-11T08:17:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:00 crc kubenswrapper[4881]: I1211 08:17:00.917805 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:00 crc kubenswrapper[4881]: I1211 08:17:00.917843 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:00 crc kubenswrapper[4881]: I1211 08:17:00.917854 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:00 crc kubenswrapper[4881]: I1211 08:17:00.917868 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:00 crc kubenswrapper[4881]: I1211 08:17:00.917878 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:00Z","lastTransitionTime":"2025-12-11T08:17:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:01 crc kubenswrapper[4881]: I1211 08:17:01.004801 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 11 08:17:01 crc kubenswrapper[4881]: E1211 08:17:01.004950 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 11 08:17:01 crc kubenswrapper[4881]: I1211 08:17:01.005171 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 11 08:17:01 crc kubenswrapper[4881]: E1211 08:17:01.005234 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 11 08:17:01 crc kubenswrapper[4881]: I1211 08:17:01.005408 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bzslm" Dec 11 08:17:01 crc kubenswrapper[4881]: E1211 08:17:01.005497 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bzslm" podUID="3a9b30f4-30b0-4b16-9a4a-135d2e7a9beb" Dec 11 08:17:01 crc kubenswrapper[4881]: I1211 08:17:01.019630 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:01 crc kubenswrapper[4881]: I1211 08:17:01.019672 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:01 crc kubenswrapper[4881]: I1211 08:17:01.019684 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:01 crc kubenswrapper[4881]: I1211 08:17:01.019699 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:01 crc kubenswrapper[4881]: I1211 08:17:01.019714 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:01Z","lastTransitionTime":"2025-12-11T08:17:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:01 crc kubenswrapper[4881]: I1211 08:17:01.123001 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:01 crc kubenswrapper[4881]: I1211 08:17:01.123049 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:01 crc kubenswrapper[4881]: I1211 08:17:01.123067 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:01 crc kubenswrapper[4881]: I1211 08:17:01.123093 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:01 crc kubenswrapper[4881]: I1211 08:17:01.123128 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:01Z","lastTransitionTime":"2025-12-11T08:17:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:01 crc kubenswrapper[4881]: I1211 08:17:01.225818 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:01 crc kubenswrapper[4881]: I1211 08:17:01.225854 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:01 crc kubenswrapper[4881]: I1211 08:17:01.225863 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:01 crc kubenswrapper[4881]: I1211 08:17:01.225882 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:01 crc kubenswrapper[4881]: I1211 08:17:01.225892 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:01Z","lastTransitionTime":"2025-12-11T08:17:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:01 crc kubenswrapper[4881]: I1211 08:17:01.328137 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:01 crc kubenswrapper[4881]: I1211 08:17:01.328189 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:01 crc kubenswrapper[4881]: I1211 08:17:01.328201 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:01 crc kubenswrapper[4881]: I1211 08:17:01.328220 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:01 crc kubenswrapper[4881]: I1211 08:17:01.328232 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:01Z","lastTransitionTime":"2025-12-11T08:17:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:01 crc kubenswrapper[4881]: I1211 08:17:01.430465 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:01 crc kubenswrapper[4881]: I1211 08:17:01.430508 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:01 crc kubenswrapper[4881]: I1211 08:17:01.430517 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:01 crc kubenswrapper[4881]: I1211 08:17:01.430534 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:01 crc kubenswrapper[4881]: I1211 08:17:01.430546 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:01Z","lastTransitionTime":"2025-12-11T08:17:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:01 crc kubenswrapper[4881]: I1211 08:17:01.533841 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:01 crc kubenswrapper[4881]: I1211 08:17:01.533883 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:01 crc kubenswrapper[4881]: I1211 08:17:01.533892 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:01 crc kubenswrapper[4881]: I1211 08:17:01.533908 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:01 crc kubenswrapper[4881]: I1211 08:17:01.533916 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:01Z","lastTransitionTime":"2025-12-11T08:17:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:01 crc kubenswrapper[4881]: I1211 08:17:01.637055 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:01 crc kubenswrapper[4881]: I1211 08:17:01.637126 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:01 crc kubenswrapper[4881]: I1211 08:17:01.637140 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:01 crc kubenswrapper[4881]: I1211 08:17:01.637161 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:01 crc kubenswrapper[4881]: I1211 08:17:01.637173 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:01Z","lastTransitionTime":"2025-12-11T08:17:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:01 crc kubenswrapper[4881]: I1211 08:17:01.740125 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:01 crc kubenswrapper[4881]: I1211 08:17:01.740169 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:01 crc kubenswrapper[4881]: I1211 08:17:01.740180 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:01 crc kubenswrapper[4881]: I1211 08:17:01.740198 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:01 crc kubenswrapper[4881]: I1211 08:17:01.740208 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:01Z","lastTransitionTime":"2025-12-11T08:17:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:01 crc kubenswrapper[4881]: I1211 08:17:01.842129 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:01 crc kubenswrapper[4881]: I1211 08:17:01.842164 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:01 crc kubenswrapper[4881]: I1211 08:17:01.842177 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:01 crc kubenswrapper[4881]: I1211 08:17:01.842193 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:01 crc kubenswrapper[4881]: I1211 08:17:01.842204 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:01Z","lastTransitionTime":"2025-12-11T08:17:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:01 crc kubenswrapper[4881]: I1211 08:17:01.944791 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:01 crc kubenswrapper[4881]: I1211 08:17:01.944826 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:01 crc kubenswrapper[4881]: I1211 08:17:01.944836 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:01 crc kubenswrapper[4881]: I1211 08:17:01.944855 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:01 crc kubenswrapper[4881]: I1211 08:17:01.944868 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:01Z","lastTransitionTime":"2025-12-11T08:17:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:02 crc kubenswrapper[4881]: I1211 08:17:02.004698 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 11 08:17:02 crc kubenswrapper[4881]: E1211 08:17:02.004914 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 11 08:17:02 crc kubenswrapper[4881]: I1211 08:17:02.047989 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:02 crc kubenswrapper[4881]: I1211 08:17:02.048033 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:02 crc kubenswrapper[4881]: I1211 08:17:02.048042 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:02 crc kubenswrapper[4881]: I1211 08:17:02.048060 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:02 crc kubenswrapper[4881]: I1211 08:17:02.048071 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:02Z","lastTransitionTime":"2025-12-11T08:17:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:02 crc kubenswrapper[4881]: I1211 08:17:02.151070 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:02 crc kubenswrapper[4881]: I1211 08:17:02.151103 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:02 crc kubenswrapper[4881]: I1211 08:17:02.151111 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:02 crc kubenswrapper[4881]: I1211 08:17:02.151125 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:02 crc kubenswrapper[4881]: I1211 08:17:02.151133 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:02Z","lastTransitionTime":"2025-12-11T08:17:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:02 crc kubenswrapper[4881]: I1211 08:17:02.253415 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:02 crc kubenswrapper[4881]: I1211 08:17:02.253474 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:02 crc kubenswrapper[4881]: I1211 08:17:02.253485 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:02 crc kubenswrapper[4881]: I1211 08:17:02.253504 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:02 crc kubenswrapper[4881]: I1211 08:17:02.253516 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:02Z","lastTransitionTime":"2025-12-11T08:17:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:02 crc kubenswrapper[4881]: I1211 08:17:02.356264 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:02 crc kubenswrapper[4881]: I1211 08:17:02.356533 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:02 crc kubenswrapper[4881]: I1211 08:17:02.356639 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:02 crc kubenswrapper[4881]: I1211 08:17:02.356708 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:02 crc kubenswrapper[4881]: I1211 08:17:02.356765 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:02Z","lastTransitionTime":"2025-12-11T08:17:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:02 crc kubenswrapper[4881]: I1211 08:17:02.459429 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:02 crc kubenswrapper[4881]: I1211 08:17:02.459475 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:02 crc kubenswrapper[4881]: I1211 08:17:02.459489 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:02 crc kubenswrapper[4881]: I1211 08:17:02.459507 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:02 crc kubenswrapper[4881]: I1211 08:17:02.459518 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:02Z","lastTransitionTime":"2025-12-11T08:17:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:02 crc kubenswrapper[4881]: I1211 08:17:02.562144 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:02 crc kubenswrapper[4881]: I1211 08:17:02.562194 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:02 crc kubenswrapper[4881]: I1211 08:17:02.562203 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:02 crc kubenswrapper[4881]: I1211 08:17:02.562218 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:02 crc kubenswrapper[4881]: I1211 08:17:02.562229 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:02Z","lastTransitionTime":"2025-12-11T08:17:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:02 crc kubenswrapper[4881]: I1211 08:17:02.665585 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:02 crc kubenswrapper[4881]: I1211 08:17:02.665640 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:02 crc kubenswrapper[4881]: I1211 08:17:02.665650 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:02 crc kubenswrapper[4881]: I1211 08:17:02.665668 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:02 crc kubenswrapper[4881]: I1211 08:17:02.665678 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:02Z","lastTransitionTime":"2025-12-11T08:17:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:02 crc kubenswrapper[4881]: I1211 08:17:02.769268 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:02 crc kubenswrapper[4881]: I1211 08:17:02.769356 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:02 crc kubenswrapper[4881]: I1211 08:17:02.769368 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:02 crc kubenswrapper[4881]: I1211 08:17:02.769393 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:02 crc kubenswrapper[4881]: I1211 08:17:02.769408 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:02Z","lastTransitionTime":"2025-12-11T08:17:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:02 crc kubenswrapper[4881]: I1211 08:17:02.871973 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:02 crc kubenswrapper[4881]: I1211 08:17:02.872047 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:02 crc kubenswrapper[4881]: I1211 08:17:02.872058 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:02 crc kubenswrapper[4881]: I1211 08:17:02.872080 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:02 crc kubenswrapper[4881]: I1211 08:17:02.872095 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:02Z","lastTransitionTime":"2025-12-11T08:17:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:02 crc kubenswrapper[4881]: I1211 08:17:02.975697 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:02 crc kubenswrapper[4881]: I1211 08:17:02.975763 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:02 crc kubenswrapper[4881]: I1211 08:17:02.975779 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:02 crc kubenswrapper[4881]: I1211 08:17:02.975805 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:02 crc kubenswrapper[4881]: I1211 08:17:02.975825 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:02Z","lastTransitionTime":"2025-12-11T08:17:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:03 crc kubenswrapper[4881]: I1211 08:17:03.005186 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bzslm" Dec 11 08:17:03 crc kubenswrapper[4881]: I1211 08:17:03.005318 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 11 08:17:03 crc kubenswrapper[4881]: I1211 08:17:03.005461 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 11 08:17:03 crc kubenswrapper[4881]: E1211 08:17:03.005375 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bzslm" podUID="3a9b30f4-30b0-4b16-9a4a-135d2e7a9beb" Dec 11 08:17:03 crc kubenswrapper[4881]: E1211 08:17:03.005608 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 11 08:17:03 crc kubenswrapper[4881]: E1211 08:17:03.005808 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 11 08:17:03 crc kubenswrapper[4881]: I1211 08:17:03.023118 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e233ddb-7c1f-4b3d-a111-de8cdc7ccd18\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://221f8c696ca3c5ab5dc2025890b5279fd406a8dfdd4ea482bb12f0c5d304255d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3dec421d26a9373c52bf2fb9434cc3f3b373f409b1d86ec1effca7534acb7262\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7cb4176d384609eb549c9f00057256bd34053ca3eca229c20cd250b93f87f910\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://91541795f1dd997a0d977904a6a3cbeae8b66b8bb57cde8dac344d3703352e1a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:15:53Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:03Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:03 crc kubenswrapper[4881]: I1211 08:17:03.037281 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6c507af2-56f1-45a6-ab18-c6d27c4e3f85\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ae6cead982609bcea650eccda0c847ae1c568061104bda0584984b9d62481b0e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bdda482f14afc374a3ba38a02b0b93bab4faf300cc47f7ee39ef9183af904f22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a2846dae5b0d424649d68bdbeb34b7791ee2a8e1b05a0309876cd0d79c5fca01\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://89d2f7f7873955f51814a4a24f42fc5fff4c2c08480d7ff81bfc18fd73102d7d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://89d2f7f7873955f51814a4a24f42fc5fff4c2c08480d7ff81bfc18fd73102d7d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:15:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:15:54Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:15:53Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:03Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:03 crc kubenswrapper[4881]: I1211 08:17:03.048841 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-847k7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1711214f-683a-4a2e-b9b8-f3fd2dd6dbc2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0ad11e04da8a6d33e46ca8c5f28be920d52c7c2ce22b4539fc8b7b0b85edb425\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2txkz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:20Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-847k7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:03Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:03 crc kubenswrapper[4881]: I1211 08:17:03.061585 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-jzsv5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aacce2d2-7fd3-439a-b46b-51858d3de240\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6277956e6d4873b4105ce03e358e2857371feead7eb74e2b7412ad7ed4de486\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tlbfj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:24Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-jzsv5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:03Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:03 crc kubenswrapper[4881]: I1211 08:17:03.079257 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:03 crc kubenswrapper[4881]: I1211 08:17:03.079297 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:03 crc kubenswrapper[4881]: I1211 08:17:03.079307 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:03 crc kubenswrapper[4881]: I1211 08:17:03.079325 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:03 crc kubenswrapper[4881]: I1211 08:17:03.079353 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:03Z","lastTransitionTime":"2025-12-11T08:17:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:03 crc kubenswrapper[4881]: I1211 08:17:03.087053 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wf8q8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f14cc110-e74f-4cb7-a998-041e3f9b537b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3fece2162ea78bf051c94f0f8adb0b194d05e5d3c5c25cb4e3674096d1204079\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3715d1eacbf7a843b7a1a0e83dee159ddbe5bf62812e94bf5f74f4677da2617b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c23a70dbe6b32be7c8a0c3799f7dd2323f0dcc86d7d58ee3e140cda4ffbb03f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6d8574de8e2431b2aca7954b9a0498e353c00d982b4384c1f198b2099fe527a6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5a0cb039f4c38da8b633dfcd2e0d23afbc3607b41f04d3c230aee5f86cba79b5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5b8e9dda68edf7ec8848da4eb851807ad0dc10a086fb6129e3a22f9a35e368ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6293b9bfc7cfe65f9d9c2cb46c40ba8aba252b7f4dfec7b3552d3a9271bfa25e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6293b9bfc7cfe65f9d9c2cb46c40ba8aba252b7f4dfec7b3552d3a9271bfa25e\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-11T08:16:50Z\\\",\\\"message\\\":\\\"d57b-49ff-9cd3-202ed3574b26}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:} {Op:update Table:NAT Row:map[external_ip:192.168.126.11 logical_ip:10.217.0.59 options:{GoMap:map[stateless:false]} type:snat] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {dce28c51-c9f1-478b-97c8-7e209d6e7cbe}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:} {Op:mutate Table:Logical_Router Row:map[] Rows:[] Columns:[] Mutations:[{Column:nat Mutator:insert Value:{GoSet:[{GoUUID:dce28c51-c9f1-478b-97c8-7e209d6e7cbe}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {e3c4661a-36a6-47f0-a6c0-a4ee741f2224}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1211 08:16:50.354677 6553 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nI1211 08:16:50.354696 6553 model_client.go:398] Mutate operations generated as: [{Op:mutate Table:Port_Group Row:map[] Rows:[] Columns:[] Mutations:[{Column:ports Mutator:insert Value:{GoSet:[{GoUUID:61897e97-c771-4738-8709-09636387cb00}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {c02bd945-d57b-49ff-9cd3-202ed3574b26}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nF1211 08:16:50.354771 6553 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:49Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-wf8q8_openshift-ovn-kubernetes(f14cc110-e74f-4cb7-a998-041e3f9b537b)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://88bd335385bc696076b9b2719bfbd27b294003397c545f24ee2c50cd854fcc28\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1794069e7175bc0ce9dc315807ad766fd28fb46e11ed6e7addc3199cccaea63a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1794069e7175bc0ce9dc315807ad766fd28fb46e11ed6e7addc3199cccaea63a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:21Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-wf8q8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:03Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:03 crc kubenswrapper[4881]: I1211 08:17:03.104170 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"712c62b1-8ccd-4aa3-bcf9-678e361454ec\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f4bc17ebdc46bdf12b24515e7a055043646bcea98fe47ea7f5c44e679d02bde9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://daf982782f40db65e6a33e1e1bcfd01493d5a385c52013546d38d921ac4dcad3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3ceeda38e245a549205af6a7f2468c682ca1420574f2d3b375dfba7d93a3c669\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://204b876ced5657df64e1a6074ba7ddbb3e286df0e12630d709ff64d2905b8487\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ff7ad52ec7f42bde82d0f5b9a6ef051a56c37004bb4172dbc504afde1e5f69bf\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-11T08:16:03Z\\\",\\\"message\\\":\\\"W1211 08:16:02.420586 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1211 08:16:02.420959 1 crypto.go:601] Generating new CA for check-endpoints-signer@1765440962 cert, and key in /tmp/serving-cert-2611071911/serving-signer.crt, /tmp/serving-cert-2611071911/serving-signer.key\\\\nI1211 08:16:03.011861 1 observer_polling.go:159] Starting file observer\\\\nW1211 08:16:03.024183 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1211 08:16:03.024367 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1211 08:16:03.024984 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2611071911/tls.crt::/tmp/serving-cert-2611071911/tls.key\\\\\\\"\\\\nF1211 08:16:03.405647 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:00Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6735b5cd08055deeae614855e5de8a056b276c8006a16c45f2a0fce97da172cb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:57Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70fb9b6123864b3652eabbb833b18032659a0c038f4e5857e7363a3776022396\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://70fb9b6123864b3652eabbb833b18032659a0c038f4e5857e7363a3776022396\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:15:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:15:53Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:03Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:03 crc kubenswrapper[4881]: I1211 08:17:03.122882 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://02d46e407feeba96230ff44b31f513e09434cdc3378124322398587ebda11b4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:03Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:03 crc kubenswrapper[4881]: I1211 08:17:03.137071 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:03Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:03 crc kubenswrapper[4881]: I1211 08:17:03.148440 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:20Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:20Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://35b9fdb7a609d6a955abda7e49e7bceec55811d33e14295ba7258c0e085cc517\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:03Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:03 crc kubenswrapper[4881]: I1211 08:17:03.161082 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-cwhxk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"afd0cc21-e31c-47c9-a598-cd93dde96121\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0f0031f6a7c33c9d1c538ec8384083ab5bc7b7fcda9c01c8e5c922a7e375a7e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://44d5ca755969b2434115259f141a9f8f17da8e0c1bef5b61c48475e09dc91601\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://44d5ca755969b2434115259f141a9f8f17da8e0c1bef5b61c48475e09dc91601\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://18c881494d5238565dfdb69e61ecdebb725f60a14c0c651e0bcc65cb2aff297a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://18c881494d5238565dfdb69e61ecdebb725f60a14c0c651e0bcc65cb2aff297a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a13ad4683fb404bf7d973528489e3c39e33c2991d86a80875dab067023626584\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a13ad4683fb404bf7d973528489e3c39e33c2991d86a80875dab067023626584\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b84c7832c1d1df15202c858e63e3d97319f2014f413f98a9d332a3f89a11210\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b84c7832c1d1df15202c858e63e3d97319f2014f413f98a9d332a3f89a11210\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb6df865673b8073f0fa13525833ae978ff17856a9b4149d549e177bc5d9a494\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bb6df865673b8073f0fa13525833ae978ff17856a9b4149d549e177bc5d9a494\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8a6eb6e7ccd64f8d181fe104b44a4299f9b28d0409e46175da23144ef88e4129\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8a6eb6e7ccd64f8d181fe104b44a4299f9b28d0409e46175da23144ef88e4129\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-cwhxk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:03Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:03 crc kubenswrapper[4881]: I1211 08:17:03.171018 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"56d69133-af36-4cbd-af7d-3a58cc4dd8ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b73c9948845a28d7c3cb81e94d962a4f11ac061f4108a18f46d451a554e9adb7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7rwh4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b0321657968185975569a4bf8037ab956988b13493feec0be9c439fbe6c20707\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7rwh4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:21Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-z9nnh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:03Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:03 crc kubenswrapper[4881]: I1211 08:17:03.181083 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-kqw5r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"71774618-27c8-499d-83d9-e88693c86758\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://136c88f0cab9d7a47a8c5925592e8cc09b62e515258829cc096df9ad3bb30615\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6btfk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c48e4553e3eb00f1e725dd9cacf2b25360544772ef82d6846361a4b7c44c6244\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6btfk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-kqw5r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:03Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:03 crc kubenswrapper[4881]: I1211 08:17:03.181505 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:03 crc kubenswrapper[4881]: I1211 08:17:03.181547 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:03 crc kubenswrapper[4881]: I1211 08:17:03.181560 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:03 crc kubenswrapper[4881]: I1211 08:17:03.181578 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:03 crc kubenswrapper[4881]: I1211 08:17:03.181589 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:03Z","lastTransitionTime":"2025-12-11T08:17:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:03 crc kubenswrapper[4881]: I1211 08:17:03.190948 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-bzslm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3a9b30f4-30b0-4b16-9a4a-135d2e7a9beb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:35Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:35Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:35Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-k86hj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-k86hj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:35Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-bzslm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:03Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:03 crc kubenswrapper[4881]: I1211 08:17:03.204927 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:03Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:03 crc kubenswrapper[4881]: I1211 08:17:03.216231 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6ec4e8a28a722463111eeb961e6e80d1f8ef1b776c40fc212a2ca5aaf0c43b63\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6eff6a04afb607c6248352793d1205ae80c194a77222a234d586925a8ca8cd00\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:03Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:03 crc kubenswrapper[4881]: I1211 08:17:03.227850 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:03Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:03 crc kubenswrapper[4881]: I1211 08:17:03.238632 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-g8jhd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"368e635e-0e63-4202-b9e4-4a3a85c6f30c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f17445543c4ae20340b00c834ecfa381b0d9624196a6da869ea823ee99df132f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zrsnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-g8jhd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:03Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:03 crc kubenswrapper[4881]: I1211 08:17:03.283905 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:03 crc kubenswrapper[4881]: I1211 08:17:03.283960 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:03 crc kubenswrapper[4881]: I1211 08:17:03.283970 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:03 crc kubenswrapper[4881]: I1211 08:17:03.283985 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:03 crc kubenswrapper[4881]: I1211 08:17:03.283994 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:03Z","lastTransitionTime":"2025-12-11T08:17:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:03 crc kubenswrapper[4881]: I1211 08:17:03.387038 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:03 crc kubenswrapper[4881]: I1211 08:17:03.387091 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:03 crc kubenswrapper[4881]: I1211 08:17:03.387103 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:03 crc kubenswrapper[4881]: I1211 08:17:03.387121 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:03 crc kubenswrapper[4881]: I1211 08:17:03.387135 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:03Z","lastTransitionTime":"2025-12-11T08:17:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:03 crc kubenswrapper[4881]: I1211 08:17:03.489305 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:03 crc kubenswrapper[4881]: I1211 08:17:03.489370 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:03 crc kubenswrapper[4881]: I1211 08:17:03.489379 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:03 crc kubenswrapper[4881]: I1211 08:17:03.489394 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:03 crc kubenswrapper[4881]: I1211 08:17:03.489404 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:03Z","lastTransitionTime":"2025-12-11T08:17:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:03 crc kubenswrapper[4881]: I1211 08:17:03.592034 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:03 crc kubenswrapper[4881]: I1211 08:17:03.592102 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:03 crc kubenswrapper[4881]: I1211 08:17:03.592114 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:03 crc kubenswrapper[4881]: I1211 08:17:03.592135 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:03 crc kubenswrapper[4881]: I1211 08:17:03.592149 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:03Z","lastTransitionTime":"2025-12-11T08:17:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:03 crc kubenswrapper[4881]: I1211 08:17:03.695055 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:03 crc kubenswrapper[4881]: I1211 08:17:03.695097 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:03 crc kubenswrapper[4881]: I1211 08:17:03.695107 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:03 crc kubenswrapper[4881]: I1211 08:17:03.695126 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:03 crc kubenswrapper[4881]: I1211 08:17:03.695140 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:03Z","lastTransitionTime":"2025-12-11T08:17:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:03 crc kubenswrapper[4881]: I1211 08:17:03.798268 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:03 crc kubenswrapper[4881]: I1211 08:17:03.798477 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:03 crc kubenswrapper[4881]: I1211 08:17:03.798499 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:03 crc kubenswrapper[4881]: I1211 08:17:03.798528 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:03 crc kubenswrapper[4881]: I1211 08:17:03.798547 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:03Z","lastTransitionTime":"2025-12-11T08:17:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:03 crc kubenswrapper[4881]: I1211 08:17:03.900686 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:03 crc kubenswrapper[4881]: I1211 08:17:03.900735 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:03 crc kubenswrapper[4881]: I1211 08:17:03.900748 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:03 crc kubenswrapper[4881]: I1211 08:17:03.900769 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:03 crc kubenswrapper[4881]: I1211 08:17:03.900784 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:03Z","lastTransitionTime":"2025-12-11T08:17:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:04 crc kubenswrapper[4881]: I1211 08:17:04.004177 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:04 crc kubenswrapper[4881]: I1211 08:17:04.004236 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:04 crc kubenswrapper[4881]: I1211 08:17:04.004248 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:04 crc kubenswrapper[4881]: I1211 08:17:04.004265 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:04 crc kubenswrapper[4881]: I1211 08:17:04.004275 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:04Z","lastTransitionTime":"2025-12-11T08:17:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:04 crc kubenswrapper[4881]: I1211 08:17:04.004326 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 11 08:17:04 crc kubenswrapper[4881]: E1211 08:17:04.004502 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 11 08:17:04 crc kubenswrapper[4881]: I1211 08:17:04.108320 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:04 crc kubenswrapper[4881]: I1211 08:17:04.108393 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:04 crc kubenswrapper[4881]: I1211 08:17:04.108405 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:04 crc kubenswrapper[4881]: I1211 08:17:04.108441 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:04 crc kubenswrapper[4881]: I1211 08:17:04.108458 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:04Z","lastTransitionTime":"2025-12-11T08:17:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:04 crc kubenswrapper[4881]: I1211 08:17:04.212808 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:04 crc kubenswrapper[4881]: I1211 08:17:04.212866 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:04 crc kubenswrapper[4881]: I1211 08:17:04.212876 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:04 crc kubenswrapper[4881]: I1211 08:17:04.212895 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:04 crc kubenswrapper[4881]: I1211 08:17:04.212907 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:04Z","lastTransitionTime":"2025-12-11T08:17:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:04 crc kubenswrapper[4881]: I1211 08:17:04.315873 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:04 crc kubenswrapper[4881]: I1211 08:17:04.315921 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:04 crc kubenswrapper[4881]: I1211 08:17:04.315932 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:04 crc kubenswrapper[4881]: I1211 08:17:04.315950 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:04 crc kubenswrapper[4881]: I1211 08:17:04.315967 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:04Z","lastTransitionTime":"2025-12-11T08:17:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:04 crc kubenswrapper[4881]: I1211 08:17:04.419424 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:04 crc kubenswrapper[4881]: I1211 08:17:04.419478 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:04 crc kubenswrapper[4881]: I1211 08:17:04.419491 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:04 crc kubenswrapper[4881]: I1211 08:17:04.419512 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:04 crc kubenswrapper[4881]: I1211 08:17:04.419527 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:04Z","lastTransitionTime":"2025-12-11T08:17:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:04 crc kubenswrapper[4881]: I1211 08:17:04.522290 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:04 crc kubenswrapper[4881]: I1211 08:17:04.522357 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:04 crc kubenswrapper[4881]: I1211 08:17:04.522367 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:04 crc kubenswrapper[4881]: I1211 08:17:04.522389 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:04 crc kubenswrapper[4881]: I1211 08:17:04.522408 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:04Z","lastTransitionTime":"2025-12-11T08:17:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:04 crc kubenswrapper[4881]: I1211 08:17:04.625286 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:04 crc kubenswrapper[4881]: I1211 08:17:04.625354 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:04 crc kubenswrapper[4881]: I1211 08:17:04.625368 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:04 crc kubenswrapper[4881]: I1211 08:17:04.625386 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:04 crc kubenswrapper[4881]: I1211 08:17:04.625397 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:04Z","lastTransitionTime":"2025-12-11T08:17:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:04 crc kubenswrapper[4881]: I1211 08:17:04.692179 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:04 crc kubenswrapper[4881]: I1211 08:17:04.692230 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:04 crc kubenswrapper[4881]: I1211 08:17:04.692241 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:04 crc kubenswrapper[4881]: I1211 08:17:04.692260 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:04 crc kubenswrapper[4881]: I1211 08:17:04.692272 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:04Z","lastTransitionTime":"2025-12-11T08:17:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:04 crc kubenswrapper[4881]: E1211 08:17:04.706855 4881 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:17:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T08:17:04Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:17:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T08:17:04Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:17:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T08:17:04Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:17:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T08:17:04Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"46a6300e-52c1-447e-8230-6662e62288c7\\\",\\\"systemUUID\\\":\\\"fece3d29-5045-4c4f-98be-52739a921bd2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:04Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:04 crc kubenswrapper[4881]: I1211 08:17:04.712708 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:04 crc kubenswrapper[4881]: I1211 08:17:04.712768 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:04 crc kubenswrapper[4881]: I1211 08:17:04.712786 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:04 crc kubenswrapper[4881]: I1211 08:17:04.712812 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:04 crc kubenswrapper[4881]: I1211 08:17:04.712834 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:04Z","lastTransitionTime":"2025-12-11T08:17:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:04 crc kubenswrapper[4881]: E1211 08:17:04.730576 4881 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:17:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T08:17:04Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:17:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T08:17:04Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:17:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T08:17:04Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:17:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T08:17:04Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"46a6300e-52c1-447e-8230-6662e62288c7\\\",\\\"systemUUID\\\":\\\"fece3d29-5045-4c4f-98be-52739a921bd2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:04Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:04 crc kubenswrapper[4881]: I1211 08:17:04.735601 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:04 crc kubenswrapper[4881]: I1211 08:17:04.735637 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:04 crc kubenswrapper[4881]: I1211 08:17:04.735649 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:04 crc kubenswrapper[4881]: I1211 08:17:04.735666 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:04 crc kubenswrapper[4881]: I1211 08:17:04.735678 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:04Z","lastTransitionTime":"2025-12-11T08:17:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:04 crc kubenswrapper[4881]: E1211 08:17:04.756841 4881 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:17:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T08:17:04Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:17:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T08:17:04Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:17:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T08:17:04Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:17:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T08:17:04Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"46a6300e-52c1-447e-8230-6662e62288c7\\\",\\\"systemUUID\\\":\\\"fece3d29-5045-4c4f-98be-52739a921bd2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:04Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:04 crc kubenswrapper[4881]: I1211 08:17:04.761648 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:04 crc kubenswrapper[4881]: I1211 08:17:04.761783 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:04 crc kubenswrapper[4881]: I1211 08:17:04.761862 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:04 crc kubenswrapper[4881]: I1211 08:17:04.761955 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:04 crc kubenswrapper[4881]: I1211 08:17:04.762072 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:04Z","lastTransitionTime":"2025-12-11T08:17:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:04 crc kubenswrapper[4881]: E1211 08:17:04.775162 4881 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:17:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T08:17:04Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:17:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T08:17:04Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:17:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T08:17:04Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:17:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T08:17:04Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"46a6300e-52c1-447e-8230-6662e62288c7\\\",\\\"systemUUID\\\":\\\"fece3d29-5045-4c4f-98be-52739a921bd2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:04Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:04 crc kubenswrapper[4881]: I1211 08:17:04.780027 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:04 crc kubenswrapper[4881]: I1211 08:17:04.780181 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:04 crc kubenswrapper[4881]: I1211 08:17:04.780257 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:04 crc kubenswrapper[4881]: I1211 08:17:04.780352 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:04 crc kubenswrapper[4881]: I1211 08:17:04.780432 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:04Z","lastTransitionTime":"2025-12-11T08:17:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:04 crc kubenswrapper[4881]: E1211 08:17:04.794045 4881 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:17:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T08:17:04Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:17:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T08:17:04Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:17:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T08:17:04Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:17:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T08:17:04Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"46a6300e-52c1-447e-8230-6662e62288c7\\\",\\\"systemUUID\\\":\\\"fece3d29-5045-4c4f-98be-52739a921bd2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:04Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:04 crc kubenswrapper[4881]: E1211 08:17:04.794319 4881 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Dec 11 08:17:04 crc kubenswrapper[4881]: I1211 08:17:04.796188 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:04 crc kubenswrapper[4881]: I1211 08:17:04.796266 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:04 crc kubenswrapper[4881]: I1211 08:17:04.796288 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:04 crc kubenswrapper[4881]: I1211 08:17:04.796318 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:04 crc kubenswrapper[4881]: I1211 08:17:04.796398 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:04Z","lastTransitionTime":"2025-12-11T08:17:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:04 crc kubenswrapper[4881]: I1211 08:17:04.899069 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:04 crc kubenswrapper[4881]: I1211 08:17:04.899133 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:04 crc kubenswrapper[4881]: I1211 08:17:04.899151 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:04 crc kubenswrapper[4881]: I1211 08:17:04.899178 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:04 crc kubenswrapper[4881]: I1211 08:17:04.899195 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:04Z","lastTransitionTime":"2025-12-11T08:17:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:05 crc kubenswrapper[4881]: I1211 08:17:05.001907 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:05 crc kubenswrapper[4881]: I1211 08:17:05.002189 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:05 crc kubenswrapper[4881]: I1211 08:17:05.002358 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:05 crc kubenswrapper[4881]: I1211 08:17:05.002468 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:05 crc kubenswrapper[4881]: I1211 08:17:05.002565 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:05Z","lastTransitionTime":"2025-12-11T08:17:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:05 crc kubenswrapper[4881]: I1211 08:17:05.005149 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 11 08:17:05 crc kubenswrapper[4881]: I1211 08:17:05.005241 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bzslm" Dec 11 08:17:05 crc kubenswrapper[4881]: E1211 08:17:05.005356 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 11 08:17:05 crc kubenswrapper[4881]: I1211 08:17:05.005537 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 11 08:17:05 crc kubenswrapper[4881]: E1211 08:17:05.005760 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 11 08:17:05 crc kubenswrapper[4881]: E1211 08:17:05.005639 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bzslm" podUID="3a9b30f4-30b0-4b16-9a4a-135d2e7a9beb" Dec 11 08:17:05 crc kubenswrapper[4881]: I1211 08:17:05.006516 4881 scope.go:117] "RemoveContainer" containerID="6293b9bfc7cfe65f9d9c2cb46c40ba8aba252b7f4dfec7b3552d3a9271bfa25e" Dec 11 08:17:05 crc kubenswrapper[4881]: E1211 08:17:05.006696 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-wf8q8_openshift-ovn-kubernetes(f14cc110-e74f-4cb7-a998-041e3f9b537b)\"" pod="openshift-ovn-kubernetes/ovnkube-node-wf8q8" podUID="f14cc110-e74f-4cb7-a998-041e3f9b537b" Dec 11 08:17:05 crc kubenswrapper[4881]: I1211 08:17:05.105260 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:05 crc kubenswrapper[4881]: I1211 08:17:05.105291 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:05 crc kubenswrapper[4881]: I1211 08:17:05.105299 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:05 crc kubenswrapper[4881]: I1211 08:17:05.105312 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:05 crc kubenswrapper[4881]: I1211 08:17:05.105323 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:05Z","lastTransitionTime":"2025-12-11T08:17:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:05 crc kubenswrapper[4881]: I1211 08:17:05.217315 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:05 crc kubenswrapper[4881]: I1211 08:17:05.217842 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:05 crc kubenswrapper[4881]: I1211 08:17:05.218032 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:05 crc kubenswrapper[4881]: I1211 08:17:05.218203 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:05 crc kubenswrapper[4881]: I1211 08:17:05.218433 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:05Z","lastTransitionTime":"2025-12-11T08:17:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:05 crc kubenswrapper[4881]: I1211 08:17:05.320833 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:05 crc kubenswrapper[4881]: I1211 08:17:05.321030 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:05 crc kubenswrapper[4881]: I1211 08:17:05.321181 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:05 crc kubenswrapper[4881]: I1211 08:17:05.321319 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:05 crc kubenswrapper[4881]: I1211 08:17:05.321477 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:05Z","lastTransitionTime":"2025-12-11T08:17:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:05 crc kubenswrapper[4881]: I1211 08:17:05.423058 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:05 crc kubenswrapper[4881]: I1211 08:17:05.423095 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:05 crc kubenswrapper[4881]: I1211 08:17:05.423105 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:05 crc kubenswrapper[4881]: I1211 08:17:05.423122 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:05 crc kubenswrapper[4881]: I1211 08:17:05.423133 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:05Z","lastTransitionTime":"2025-12-11T08:17:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:05 crc kubenswrapper[4881]: I1211 08:17:05.525283 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:05 crc kubenswrapper[4881]: I1211 08:17:05.525364 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:05 crc kubenswrapper[4881]: I1211 08:17:05.525378 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:05 crc kubenswrapper[4881]: I1211 08:17:05.525394 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:05 crc kubenswrapper[4881]: I1211 08:17:05.525406 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:05Z","lastTransitionTime":"2025-12-11T08:17:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:05 crc kubenswrapper[4881]: I1211 08:17:05.628456 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:05 crc kubenswrapper[4881]: I1211 08:17:05.628554 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:05 crc kubenswrapper[4881]: I1211 08:17:05.628581 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:05 crc kubenswrapper[4881]: I1211 08:17:05.628620 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:05 crc kubenswrapper[4881]: I1211 08:17:05.628643 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:05Z","lastTransitionTime":"2025-12-11T08:17:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:05 crc kubenswrapper[4881]: I1211 08:17:05.731398 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:05 crc kubenswrapper[4881]: I1211 08:17:05.731476 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:05 crc kubenswrapper[4881]: I1211 08:17:05.731492 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:05 crc kubenswrapper[4881]: I1211 08:17:05.731514 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:05 crc kubenswrapper[4881]: I1211 08:17:05.731531 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:05Z","lastTransitionTime":"2025-12-11T08:17:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:05 crc kubenswrapper[4881]: I1211 08:17:05.834585 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:05 crc kubenswrapper[4881]: I1211 08:17:05.834651 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:05 crc kubenswrapper[4881]: I1211 08:17:05.834669 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:05 crc kubenswrapper[4881]: I1211 08:17:05.834692 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:05 crc kubenswrapper[4881]: I1211 08:17:05.834709 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:05Z","lastTransitionTime":"2025-12-11T08:17:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:05 crc kubenswrapper[4881]: I1211 08:17:05.936964 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:05 crc kubenswrapper[4881]: I1211 08:17:05.937208 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:05 crc kubenswrapper[4881]: I1211 08:17:05.937291 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:05 crc kubenswrapper[4881]: I1211 08:17:05.937406 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:05 crc kubenswrapper[4881]: I1211 08:17:05.937500 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:05Z","lastTransitionTime":"2025-12-11T08:17:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:06 crc kubenswrapper[4881]: I1211 08:17:06.004866 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 11 08:17:06 crc kubenswrapper[4881]: E1211 08:17:06.005055 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 11 08:17:06 crc kubenswrapper[4881]: I1211 08:17:06.039876 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:06 crc kubenswrapper[4881]: I1211 08:17:06.039937 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:06 crc kubenswrapper[4881]: I1211 08:17:06.039947 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:06 crc kubenswrapper[4881]: I1211 08:17:06.039972 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:06 crc kubenswrapper[4881]: I1211 08:17:06.039989 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:06Z","lastTransitionTime":"2025-12-11T08:17:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:06 crc kubenswrapper[4881]: I1211 08:17:06.142932 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:06 crc kubenswrapper[4881]: I1211 08:17:06.142978 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:06 crc kubenswrapper[4881]: I1211 08:17:06.142989 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:06 crc kubenswrapper[4881]: I1211 08:17:06.143010 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:06 crc kubenswrapper[4881]: I1211 08:17:06.143024 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:06Z","lastTransitionTime":"2025-12-11T08:17:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:06 crc kubenswrapper[4881]: I1211 08:17:06.245418 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:06 crc kubenswrapper[4881]: I1211 08:17:06.245461 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:06 crc kubenswrapper[4881]: I1211 08:17:06.245472 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:06 crc kubenswrapper[4881]: I1211 08:17:06.245488 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:06 crc kubenswrapper[4881]: I1211 08:17:06.245499 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:06Z","lastTransitionTime":"2025-12-11T08:17:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:06 crc kubenswrapper[4881]: I1211 08:17:06.347690 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:06 crc kubenswrapper[4881]: I1211 08:17:06.347727 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:06 crc kubenswrapper[4881]: I1211 08:17:06.347735 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:06 crc kubenswrapper[4881]: I1211 08:17:06.347749 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:06 crc kubenswrapper[4881]: I1211 08:17:06.347760 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:06Z","lastTransitionTime":"2025-12-11T08:17:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:06 crc kubenswrapper[4881]: I1211 08:17:06.449797 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:06 crc kubenswrapper[4881]: I1211 08:17:06.449861 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:06 crc kubenswrapper[4881]: I1211 08:17:06.449874 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:06 crc kubenswrapper[4881]: I1211 08:17:06.449889 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:06 crc kubenswrapper[4881]: I1211 08:17:06.449988 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:06Z","lastTransitionTime":"2025-12-11T08:17:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:06 crc kubenswrapper[4881]: I1211 08:17:06.552405 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:06 crc kubenswrapper[4881]: I1211 08:17:06.552458 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:06 crc kubenswrapper[4881]: I1211 08:17:06.552468 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:06 crc kubenswrapper[4881]: I1211 08:17:06.552483 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:06 crc kubenswrapper[4881]: I1211 08:17:06.552492 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:06Z","lastTransitionTime":"2025-12-11T08:17:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:06 crc kubenswrapper[4881]: I1211 08:17:06.654983 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:06 crc kubenswrapper[4881]: I1211 08:17:06.655047 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:06 crc kubenswrapper[4881]: I1211 08:17:06.655063 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:06 crc kubenswrapper[4881]: I1211 08:17:06.655091 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:06 crc kubenswrapper[4881]: I1211 08:17:06.655110 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:06Z","lastTransitionTime":"2025-12-11T08:17:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:06 crc kubenswrapper[4881]: I1211 08:17:06.757982 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:06 crc kubenswrapper[4881]: I1211 08:17:06.758041 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:06 crc kubenswrapper[4881]: I1211 08:17:06.758051 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:06 crc kubenswrapper[4881]: I1211 08:17:06.758067 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:06 crc kubenswrapper[4881]: I1211 08:17:06.758076 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:06Z","lastTransitionTime":"2025-12-11T08:17:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:06 crc kubenswrapper[4881]: I1211 08:17:06.860649 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:06 crc kubenswrapper[4881]: I1211 08:17:06.860726 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:06 crc kubenswrapper[4881]: I1211 08:17:06.860752 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:06 crc kubenswrapper[4881]: I1211 08:17:06.860787 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:06 crc kubenswrapper[4881]: I1211 08:17:06.860812 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:06Z","lastTransitionTime":"2025-12-11T08:17:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:06 crc kubenswrapper[4881]: I1211 08:17:06.964228 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:06 crc kubenswrapper[4881]: I1211 08:17:06.964285 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:06 crc kubenswrapper[4881]: I1211 08:17:06.964294 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:06 crc kubenswrapper[4881]: I1211 08:17:06.964312 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:06 crc kubenswrapper[4881]: I1211 08:17:06.964325 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:06Z","lastTransitionTime":"2025-12-11T08:17:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:07 crc kubenswrapper[4881]: I1211 08:17:07.004860 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 11 08:17:07 crc kubenswrapper[4881]: I1211 08:17:07.004944 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bzslm" Dec 11 08:17:07 crc kubenswrapper[4881]: E1211 08:17:07.005034 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 11 08:17:07 crc kubenswrapper[4881]: I1211 08:17:07.005050 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 11 08:17:07 crc kubenswrapper[4881]: E1211 08:17:07.005145 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bzslm" podUID="3a9b30f4-30b0-4b16-9a4a-135d2e7a9beb" Dec 11 08:17:07 crc kubenswrapper[4881]: E1211 08:17:07.005330 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 11 08:17:07 crc kubenswrapper[4881]: I1211 08:17:07.067852 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:07 crc kubenswrapper[4881]: I1211 08:17:07.067899 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:07 crc kubenswrapper[4881]: I1211 08:17:07.067914 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:07 crc kubenswrapper[4881]: I1211 08:17:07.067936 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:07 crc kubenswrapper[4881]: I1211 08:17:07.067949 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:07Z","lastTransitionTime":"2025-12-11T08:17:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:07 crc kubenswrapper[4881]: I1211 08:17:07.156532 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/3a9b30f4-30b0-4b16-9a4a-135d2e7a9beb-metrics-certs\") pod \"network-metrics-daemon-bzslm\" (UID: \"3a9b30f4-30b0-4b16-9a4a-135d2e7a9beb\") " pod="openshift-multus/network-metrics-daemon-bzslm" Dec 11 08:17:07 crc kubenswrapper[4881]: E1211 08:17:07.156772 4881 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Dec 11 08:17:07 crc kubenswrapper[4881]: E1211 08:17:07.156897 4881 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/3a9b30f4-30b0-4b16-9a4a-135d2e7a9beb-metrics-certs podName:3a9b30f4-30b0-4b16-9a4a-135d2e7a9beb nodeName:}" failed. No retries permitted until 2025-12-11 08:17:39.156864499 +0000 UTC m=+107.534233396 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/3a9b30f4-30b0-4b16-9a4a-135d2e7a9beb-metrics-certs") pod "network-metrics-daemon-bzslm" (UID: "3a9b30f4-30b0-4b16-9a4a-135d2e7a9beb") : object "openshift-multus"/"metrics-daemon-secret" not registered Dec 11 08:17:07 crc kubenswrapper[4881]: I1211 08:17:07.171090 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:07 crc kubenswrapper[4881]: I1211 08:17:07.171147 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:07 crc kubenswrapper[4881]: I1211 08:17:07.171156 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:07 crc kubenswrapper[4881]: I1211 08:17:07.171174 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:07 crc kubenswrapper[4881]: I1211 08:17:07.171187 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:07Z","lastTransitionTime":"2025-12-11T08:17:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:07 crc kubenswrapper[4881]: I1211 08:17:07.273746 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:07 crc kubenswrapper[4881]: I1211 08:17:07.273792 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:07 crc kubenswrapper[4881]: I1211 08:17:07.273805 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:07 crc kubenswrapper[4881]: I1211 08:17:07.273830 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:07 crc kubenswrapper[4881]: I1211 08:17:07.273843 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:07Z","lastTransitionTime":"2025-12-11T08:17:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:07 crc kubenswrapper[4881]: I1211 08:17:07.375842 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:07 crc kubenswrapper[4881]: I1211 08:17:07.375886 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:07 crc kubenswrapper[4881]: I1211 08:17:07.375900 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:07 crc kubenswrapper[4881]: I1211 08:17:07.375917 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:07 crc kubenswrapper[4881]: I1211 08:17:07.375928 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:07Z","lastTransitionTime":"2025-12-11T08:17:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:07 crc kubenswrapper[4881]: I1211 08:17:07.478401 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:07 crc kubenswrapper[4881]: I1211 08:17:07.478584 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:07 crc kubenswrapper[4881]: I1211 08:17:07.478604 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:07 crc kubenswrapper[4881]: I1211 08:17:07.478630 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:07 crc kubenswrapper[4881]: I1211 08:17:07.478648 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:07Z","lastTransitionTime":"2025-12-11T08:17:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:07 crc kubenswrapper[4881]: I1211 08:17:07.582395 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:07 crc kubenswrapper[4881]: I1211 08:17:07.582462 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:07 crc kubenswrapper[4881]: I1211 08:17:07.582481 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:07 crc kubenswrapper[4881]: I1211 08:17:07.582506 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:07 crc kubenswrapper[4881]: I1211 08:17:07.582523 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:07Z","lastTransitionTime":"2025-12-11T08:17:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:07 crc kubenswrapper[4881]: I1211 08:17:07.686501 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:07 crc kubenswrapper[4881]: I1211 08:17:07.686564 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:07 crc kubenswrapper[4881]: I1211 08:17:07.686582 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:07 crc kubenswrapper[4881]: I1211 08:17:07.686609 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:07 crc kubenswrapper[4881]: I1211 08:17:07.686629 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:07Z","lastTransitionTime":"2025-12-11T08:17:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:07 crc kubenswrapper[4881]: I1211 08:17:07.789202 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:07 crc kubenswrapper[4881]: I1211 08:17:07.789280 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:07 crc kubenswrapper[4881]: I1211 08:17:07.789304 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:07 crc kubenswrapper[4881]: I1211 08:17:07.789378 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:07 crc kubenswrapper[4881]: I1211 08:17:07.789405 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:07Z","lastTransitionTime":"2025-12-11T08:17:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:07 crc kubenswrapper[4881]: I1211 08:17:07.892731 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:07 crc kubenswrapper[4881]: I1211 08:17:07.892805 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:07 crc kubenswrapper[4881]: I1211 08:17:07.892830 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:07 crc kubenswrapper[4881]: I1211 08:17:07.892859 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:07 crc kubenswrapper[4881]: I1211 08:17:07.893290 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:07Z","lastTransitionTime":"2025-12-11T08:17:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:08 crc kubenswrapper[4881]: I1211 08:17:08.001395 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:08 crc kubenswrapper[4881]: I1211 08:17:08.001459 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:08 crc kubenswrapper[4881]: I1211 08:17:08.001478 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:08 crc kubenswrapper[4881]: I1211 08:17:08.001569 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:08 crc kubenswrapper[4881]: I1211 08:17:08.001624 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:08Z","lastTransitionTime":"2025-12-11T08:17:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:08 crc kubenswrapper[4881]: I1211 08:17:08.004686 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 11 08:17:08 crc kubenswrapper[4881]: E1211 08:17:08.004870 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 11 08:17:08 crc kubenswrapper[4881]: I1211 08:17:08.106417 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:08 crc kubenswrapper[4881]: I1211 08:17:08.106807 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:08 crc kubenswrapper[4881]: I1211 08:17:08.107010 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:08 crc kubenswrapper[4881]: I1211 08:17:08.107163 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:08 crc kubenswrapper[4881]: I1211 08:17:08.107398 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:08Z","lastTransitionTime":"2025-12-11T08:17:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:08 crc kubenswrapper[4881]: I1211 08:17:08.210585 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:08 crc kubenswrapper[4881]: I1211 08:17:08.210646 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:08 crc kubenswrapper[4881]: I1211 08:17:08.210665 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:08 crc kubenswrapper[4881]: I1211 08:17:08.210688 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:08 crc kubenswrapper[4881]: I1211 08:17:08.210703 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:08Z","lastTransitionTime":"2025-12-11T08:17:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:08 crc kubenswrapper[4881]: I1211 08:17:08.314385 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:08 crc kubenswrapper[4881]: I1211 08:17:08.314445 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:08 crc kubenswrapper[4881]: I1211 08:17:08.314462 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:08 crc kubenswrapper[4881]: I1211 08:17:08.314485 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:08 crc kubenswrapper[4881]: I1211 08:17:08.314502 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:08Z","lastTransitionTime":"2025-12-11T08:17:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:08 crc kubenswrapper[4881]: I1211 08:17:08.416855 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:08 crc kubenswrapper[4881]: I1211 08:17:08.416913 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:08 crc kubenswrapper[4881]: I1211 08:17:08.416935 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:08 crc kubenswrapper[4881]: I1211 08:17:08.416995 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:08 crc kubenswrapper[4881]: I1211 08:17:08.417012 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:08Z","lastTransitionTime":"2025-12-11T08:17:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:08 crc kubenswrapper[4881]: I1211 08:17:08.521312 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:08 crc kubenswrapper[4881]: I1211 08:17:08.521431 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:08 crc kubenswrapper[4881]: I1211 08:17:08.521485 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:08 crc kubenswrapper[4881]: I1211 08:17:08.521517 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:08 crc kubenswrapper[4881]: I1211 08:17:08.521582 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:08Z","lastTransitionTime":"2025-12-11T08:17:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:08 crc kubenswrapper[4881]: I1211 08:17:08.626469 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:08 crc kubenswrapper[4881]: I1211 08:17:08.626534 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:08 crc kubenswrapper[4881]: I1211 08:17:08.626552 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:08 crc kubenswrapper[4881]: I1211 08:17:08.626680 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:08 crc kubenswrapper[4881]: I1211 08:17:08.626700 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:08Z","lastTransitionTime":"2025-12-11T08:17:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:08 crc kubenswrapper[4881]: I1211 08:17:08.729987 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:08 crc kubenswrapper[4881]: I1211 08:17:08.730025 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:08 crc kubenswrapper[4881]: I1211 08:17:08.730036 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:08 crc kubenswrapper[4881]: I1211 08:17:08.730058 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:08 crc kubenswrapper[4881]: I1211 08:17:08.730071 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:08Z","lastTransitionTime":"2025-12-11T08:17:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:08 crc kubenswrapper[4881]: I1211 08:17:08.833867 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:08 crc kubenswrapper[4881]: I1211 08:17:08.833933 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:08 crc kubenswrapper[4881]: I1211 08:17:08.833946 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:08 crc kubenswrapper[4881]: I1211 08:17:08.833969 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:08 crc kubenswrapper[4881]: I1211 08:17:08.833983 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:08Z","lastTransitionTime":"2025-12-11T08:17:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:08 crc kubenswrapper[4881]: I1211 08:17:08.937157 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:08 crc kubenswrapper[4881]: I1211 08:17:08.937227 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:08 crc kubenswrapper[4881]: I1211 08:17:08.937252 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:08 crc kubenswrapper[4881]: I1211 08:17:08.937282 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:08 crc kubenswrapper[4881]: I1211 08:17:08.937308 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:08Z","lastTransitionTime":"2025-12-11T08:17:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:09 crc kubenswrapper[4881]: I1211 08:17:09.005483 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 11 08:17:09 crc kubenswrapper[4881]: I1211 08:17:09.005484 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 11 08:17:09 crc kubenswrapper[4881]: E1211 08:17:09.005706 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 11 08:17:09 crc kubenswrapper[4881]: I1211 08:17:09.005484 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bzslm" Dec 11 08:17:09 crc kubenswrapper[4881]: E1211 08:17:09.005868 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 11 08:17:09 crc kubenswrapper[4881]: E1211 08:17:09.006444 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bzslm" podUID="3a9b30f4-30b0-4b16-9a4a-135d2e7a9beb" Dec 11 08:17:09 crc kubenswrapper[4881]: I1211 08:17:09.023770 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/kube-rbac-proxy-crio-crc"] Dec 11 08:17:09 crc kubenswrapper[4881]: I1211 08:17:09.040451 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:09 crc kubenswrapper[4881]: I1211 08:17:09.040549 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:09 crc kubenswrapper[4881]: I1211 08:17:09.040575 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:09 crc kubenswrapper[4881]: I1211 08:17:09.040609 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:09 crc kubenswrapper[4881]: I1211 08:17:09.040635 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:09Z","lastTransitionTime":"2025-12-11T08:17:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:09 crc kubenswrapper[4881]: I1211 08:17:09.144139 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:09 crc kubenswrapper[4881]: I1211 08:17:09.144186 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:09 crc kubenswrapper[4881]: I1211 08:17:09.144199 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:09 crc kubenswrapper[4881]: I1211 08:17:09.144220 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:09 crc kubenswrapper[4881]: I1211 08:17:09.144236 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:09Z","lastTransitionTime":"2025-12-11T08:17:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:09 crc kubenswrapper[4881]: I1211 08:17:09.247496 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:09 crc kubenswrapper[4881]: I1211 08:17:09.247574 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:09 crc kubenswrapper[4881]: I1211 08:17:09.247615 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:09 crc kubenswrapper[4881]: I1211 08:17:09.247648 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:09 crc kubenswrapper[4881]: I1211 08:17:09.247666 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:09Z","lastTransitionTime":"2025-12-11T08:17:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:09 crc kubenswrapper[4881]: I1211 08:17:09.351539 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:09 crc kubenswrapper[4881]: I1211 08:17:09.351600 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:09 crc kubenswrapper[4881]: I1211 08:17:09.351619 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:09 crc kubenswrapper[4881]: I1211 08:17:09.351647 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:09 crc kubenswrapper[4881]: I1211 08:17:09.351664 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:09Z","lastTransitionTime":"2025-12-11T08:17:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:09 crc kubenswrapper[4881]: I1211 08:17:09.454742 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:09 crc kubenswrapper[4881]: I1211 08:17:09.454805 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:09 crc kubenswrapper[4881]: I1211 08:17:09.454821 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:09 crc kubenswrapper[4881]: I1211 08:17:09.454849 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:09 crc kubenswrapper[4881]: I1211 08:17:09.454864 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:09Z","lastTransitionTime":"2025-12-11T08:17:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:09 crc kubenswrapper[4881]: I1211 08:17:09.461993 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-g8jhd_368e635e-0e63-4202-b9e4-4a3a85c6f30c/kube-multus/0.log" Dec 11 08:17:09 crc kubenswrapper[4881]: I1211 08:17:09.462099 4881 generic.go:334] "Generic (PLEG): container finished" podID="368e635e-0e63-4202-b9e4-4a3a85c6f30c" containerID="f17445543c4ae20340b00c834ecfa381b0d9624196a6da869ea823ee99df132f" exitCode=1 Dec 11 08:17:09 crc kubenswrapper[4881]: I1211 08:17:09.462216 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-g8jhd" event={"ID":"368e635e-0e63-4202-b9e4-4a3a85c6f30c","Type":"ContainerDied","Data":"f17445543c4ae20340b00c834ecfa381b0d9624196a6da869ea823ee99df132f"} Dec 11 08:17:09 crc kubenswrapper[4881]: I1211 08:17:09.462987 4881 scope.go:117] "RemoveContainer" containerID="f17445543c4ae20340b00c834ecfa381b0d9624196a6da869ea823ee99df132f" Dec 11 08:17:09 crc kubenswrapper[4881]: I1211 08:17:09.481494 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-bzslm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3a9b30f4-30b0-4b16-9a4a-135d2e7a9beb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:35Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:35Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:35Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-k86hj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-k86hj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:35Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-bzslm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:09Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:09 crc kubenswrapper[4881]: I1211 08:17:09.507924 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:09Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:09 crc kubenswrapper[4881]: I1211 08:17:09.521568 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6ec4e8a28a722463111eeb961e6e80d1f8ef1b776c40fc212a2ca5aaf0c43b63\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6eff6a04afb607c6248352793d1205ae80c194a77222a234d586925a8ca8cd00\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:09Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:09 crc kubenswrapper[4881]: I1211 08:17:09.536463 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:09Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:09 crc kubenswrapper[4881]: I1211 08:17:09.549743 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-g8jhd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"368e635e-0e63-4202-b9e4-4a3a85c6f30c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:17:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:17:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f17445543c4ae20340b00c834ecfa381b0d9624196a6da869ea823ee99df132f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f17445543c4ae20340b00c834ecfa381b0d9624196a6da869ea823ee99df132f\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-11T08:17:08Z\\\",\\\"message\\\":\\\"2025-12-11T08:16:23+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_76e76f4f-e849-4b50-a2b6-debceb43335b\\\\n2025-12-11T08:16:23+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_76e76f4f-e849-4b50-a2b6-debceb43335b to /host/opt/cni/bin/\\\\n2025-12-11T08:16:23Z [verbose] multus-daemon started\\\\n2025-12-11T08:16:23Z [verbose] Readiness Indicator file check\\\\n2025-12-11T08:17:08Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zrsnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-g8jhd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:09Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:09 crc kubenswrapper[4881]: I1211 08:17:09.558025 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:09 crc kubenswrapper[4881]: I1211 08:17:09.558073 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:09 crc kubenswrapper[4881]: I1211 08:17:09.558086 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:09 crc kubenswrapper[4881]: I1211 08:17:09.558105 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:09 crc kubenswrapper[4881]: I1211 08:17:09.558120 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:09Z","lastTransitionTime":"2025-12-11T08:17:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:09 crc kubenswrapper[4881]: I1211 08:17:09.569432 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-cwhxk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"afd0cc21-e31c-47c9-a598-cd93dde96121\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0f0031f6a7c33c9d1c538ec8384083ab5bc7b7fcda9c01c8e5c922a7e375a7e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://44d5ca755969b2434115259f141a9f8f17da8e0c1bef5b61c48475e09dc91601\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://44d5ca755969b2434115259f141a9f8f17da8e0c1bef5b61c48475e09dc91601\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://18c881494d5238565dfdb69e61ecdebb725f60a14c0c651e0bcc65cb2aff297a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://18c881494d5238565dfdb69e61ecdebb725f60a14c0c651e0bcc65cb2aff297a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a13ad4683fb404bf7d973528489e3c39e33c2991d86a80875dab067023626584\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a13ad4683fb404bf7d973528489e3c39e33c2991d86a80875dab067023626584\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b84c7832c1d1df15202c858e63e3d97319f2014f413f98a9d332a3f89a11210\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b84c7832c1d1df15202c858e63e3d97319f2014f413f98a9d332a3f89a11210\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb6df865673b8073f0fa13525833ae978ff17856a9b4149d549e177bc5d9a494\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bb6df865673b8073f0fa13525833ae978ff17856a9b4149d549e177bc5d9a494\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8a6eb6e7ccd64f8d181fe104b44a4299f9b28d0409e46175da23144ef88e4129\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8a6eb6e7ccd64f8d181fe104b44a4299f9b28d0409e46175da23144ef88e4129\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-cwhxk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:09Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:09 crc kubenswrapper[4881]: I1211 08:17:09.580618 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"56d69133-af36-4cbd-af7d-3a58cc4dd8ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b73c9948845a28d7c3cb81e94d962a4f11ac061f4108a18f46d451a554e9adb7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7rwh4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b0321657968185975569a4bf8037ab956988b13493feec0be9c439fbe6c20707\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7rwh4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:21Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-z9nnh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:09Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:09 crc kubenswrapper[4881]: I1211 08:17:09.593437 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-kqw5r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"71774618-27c8-499d-83d9-e88693c86758\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://136c88f0cab9d7a47a8c5925592e8cc09b62e515258829cc096df9ad3bb30615\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6btfk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c48e4553e3eb00f1e725dd9cacf2b25360544772ef82d6846361a4b7c44c6244\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6btfk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-kqw5r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:09Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:09 crc kubenswrapper[4881]: I1211 08:17:09.608369 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e233ddb-7c1f-4b3d-a111-de8cdc7ccd18\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://221f8c696ca3c5ab5dc2025890b5279fd406a8dfdd4ea482bb12f0c5d304255d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3dec421d26a9373c52bf2fb9434cc3f3b373f409b1d86ec1effca7534acb7262\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7cb4176d384609eb549c9f00057256bd34053ca3eca229c20cd250b93f87f910\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://91541795f1dd997a0d977904a6a3cbeae8b66b8bb57cde8dac344d3703352e1a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:15:53Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:09Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:09 crc kubenswrapper[4881]: I1211 08:17:09.622280 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6c507af2-56f1-45a6-ab18-c6d27c4e3f85\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ae6cead982609bcea650eccda0c847ae1c568061104bda0584984b9d62481b0e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bdda482f14afc374a3ba38a02b0b93bab4faf300cc47f7ee39ef9183af904f22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a2846dae5b0d424649d68bdbeb34b7791ee2a8e1b05a0309876cd0d79c5fca01\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://89d2f7f7873955f51814a4a24f42fc5fff4c2c08480d7ff81bfc18fd73102d7d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://89d2f7f7873955f51814a4a24f42fc5fff4c2c08480d7ff81bfc18fd73102d7d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:15:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:15:54Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:15:53Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:09Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:09 crc kubenswrapper[4881]: I1211 08:17:09.635858 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-847k7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1711214f-683a-4a2e-b9b8-f3fd2dd6dbc2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0ad11e04da8a6d33e46ca8c5f28be920d52c7c2ce22b4539fc8b7b0b85edb425\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2txkz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:20Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-847k7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:09Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:09 crc kubenswrapper[4881]: I1211 08:17:09.646313 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-jzsv5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aacce2d2-7fd3-439a-b46b-51858d3de240\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6277956e6d4873b4105ce03e358e2857371feead7eb74e2b7412ad7ed4de486\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tlbfj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:24Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-jzsv5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:09Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:09 crc kubenswrapper[4881]: I1211 08:17:09.654993 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9c3d898f-552a-471b-affd-a0489efc782e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9251be56904cea5a0c24898fa10c30718080f3fe77804166d664b3f40235c80a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e3ddede9a1ba051ed4eca12d7cca1c3309b223ca8c01eb7d0a9499ab012531ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e3ddede9a1ba051ed4eca12d7cca1c3309b223ca8c01eb7d0a9499ab012531ff\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:15:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:15:53Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:09Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:09 crc kubenswrapper[4881]: I1211 08:17:09.660414 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:09 crc kubenswrapper[4881]: I1211 08:17:09.660456 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:09 crc kubenswrapper[4881]: I1211 08:17:09.660467 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:09 crc kubenswrapper[4881]: I1211 08:17:09.660486 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:09 crc kubenswrapper[4881]: I1211 08:17:09.660496 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:09Z","lastTransitionTime":"2025-12-11T08:17:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:09 crc kubenswrapper[4881]: I1211 08:17:09.668967 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"712c62b1-8ccd-4aa3-bcf9-678e361454ec\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f4bc17ebdc46bdf12b24515e7a055043646bcea98fe47ea7f5c44e679d02bde9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://daf982782f40db65e6a33e1e1bcfd01493d5a385c52013546d38d921ac4dcad3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3ceeda38e245a549205af6a7f2468c682ca1420574f2d3b375dfba7d93a3c669\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://204b876ced5657df64e1a6074ba7ddbb3e286df0e12630d709ff64d2905b8487\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ff7ad52ec7f42bde82d0f5b9a6ef051a56c37004bb4172dbc504afde1e5f69bf\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-11T08:16:03Z\\\",\\\"message\\\":\\\"W1211 08:16:02.420586 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1211 08:16:02.420959 1 crypto.go:601] Generating new CA for check-endpoints-signer@1765440962 cert, and key in /tmp/serving-cert-2611071911/serving-signer.crt, /tmp/serving-cert-2611071911/serving-signer.key\\\\nI1211 08:16:03.011861 1 observer_polling.go:159] Starting file observer\\\\nW1211 08:16:03.024183 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1211 08:16:03.024367 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1211 08:16:03.024984 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2611071911/tls.crt::/tmp/serving-cert-2611071911/tls.key\\\\\\\"\\\\nF1211 08:16:03.405647 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:00Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6735b5cd08055deeae614855e5de8a056b276c8006a16c45f2a0fce97da172cb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:57Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70fb9b6123864b3652eabbb833b18032659a0c038f4e5857e7363a3776022396\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://70fb9b6123864b3652eabbb833b18032659a0c038f4e5857e7363a3776022396\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:15:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:15:53Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:09Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:09 crc kubenswrapper[4881]: I1211 08:17:09.685138 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://02d46e407feeba96230ff44b31f513e09434cdc3378124322398587ebda11b4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:09Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:09 crc kubenswrapper[4881]: I1211 08:17:09.699216 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:09Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:09 crc kubenswrapper[4881]: I1211 08:17:09.712688 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:20Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:20Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://35b9fdb7a609d6a955abda7e49e7bceec55811d33e14295ba7258c0e085cc517\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:09Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:09 crc kubenswrapper[4881]: I1211 08:17:09.731695 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wf8q8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f14cc110-e74f-4cb7-a998-041e3f9b537b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3fece2162ea78bf051c94f0f8adb0b194d05e5d3c5c25cb4e3674096d1204079\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3715d1eacbf7a843b7a1a0e83dee159ddbe5bf62812e94bf5f74f4677da2617b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c23a70dbe6b32be7c8a0c3799f7dd2323f0dcc86d7d58ee3e140cda4ffbb03f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6d8574de8e2431b2aca7954b9a0498e353c00d982b4384c1f198b2099fe527a6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5a0cb039f4c38da8b633dfcd2e0d23afbc3607b41f04d3c230aee5f86cba79b5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5b8e9dda68edf7ec8848da4eb851807ad0dc10a086fb6129e3a22f9a35e368ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6293b9bfc7cfe65f9d9c2cb46c40ba8aba252b7f4dfec7b3552d3a9271bfa25e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6293b9bfc7cfe65f9d9c2cb46c40ba8aba252b7f4dfec7b3552d3a9271bfa25e\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-11T08:16:50Z\\\",\\\"message\\\":\\\"d57b-49ff-9cd3-202ed3574b26}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:} {Op:update Table:NAT Row:map[external_ip:192.168.126.11 logical_ip:10.217.0.59 options:{GoMap:map[stateless:false]} type:snat] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {dce28c51-c9f1-478b-97c8-7e209d6e7cbe}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:} {Op:mutate Table:Logical_Router Row:map[] Rows:[] Columns:[] Mutations:[{Column:nat Mutator:insert Value:{GoSet:[{GoUUID:dce28c51-c9f1-478b-97c8-7e209d6e7cbe}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {e3c4661a-36a6-47f0-a6c0-a4ee741f2224}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1211 08:16:50.354677 6553 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nI1211 08:16:50.354696 6553 model_client.go:398] Mutate operations generated as: [{Op:mutate Table:Port_Group Row:map[] Rows:[] Columns:[] Mutations:[{Column:ports Mutator:insert Value:{GoSet:[{GoUUID:61897e97-c771-4738-8709-09636387cb00}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {c02bd945-d57b-49ff-9cd3-202ed3574b26}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nF1211 08:16:50.354771 6553 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:49Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-wf8q8_openshift-ovn-kubernetes(f14cc110-e74f-4cb7-a998-041e3f9b537b)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://88bd335385bc696076b9b2719bfbd27b294003397c545f24ee2c50cd854fcc28\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1794069e7175bc0ce9dc315807ad766fd28fb46e11ed6e7addc3199cccaea63a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1794069e7175bc0ce9dc315807ad766fd28fb46e11ed6e7addc3199cccaea63a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:21Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-wf8q8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:09Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:09 crc kubenswrapper[4881]: I1211 08:17:09.762760 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:09 crc kubenswrapper[4881]: I1211 08:17:09.762811 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:09 crc kubenswrapper[4881]: I1211 08:17:09.762821 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:09 crc kubenswrapper[4881]: I1211 08:17:09.762837 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:09 crc kubenswrapper[4881]: I1211 08:17:09.762847 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:09Z","lastTransitionTime":"2025-12-11T08:17:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:09 crc kubenswrapper[4881]: I1211 08:17:09.865556 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:09 crc kubenswrapper[4881]: I1211 08:17:09.865598 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:09 crc kubenswrapper[4881]: I1211 08:17:09.865608 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:09 crc kubenswrapper[4881]: I1211 08:17:09.865626 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:09 crc kubenswrapper[4881]: I1211 08:17:09.865637 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:09Z","lastTransitionTime":"2025-12-11T08:17:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:09 crc kubenswrapper[4881]: I1211 08:17:09.968868 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:09 crc kubenswrapper[4881]: I1211 08:17:09.968918 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:09 crc kubenswrapper[4881]: I1211 08:17:09.968934 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:09 crc kubenswrapper[4881]: I1211 08:17:09.968956 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:09 crc kubenswrapper[4881]: I1211 08:17:09.968974 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:09Z","lastTransitionTime":"2025-12-11T08:17:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:10 crc kubenswrapper[4881]: I1211 08:17:10.004496 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 11 08:17:10 crc kubenswrapper[4881]: E1211 08:17:10.004744 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 11 08:17:10 crc kubenswrapper[4881]: I1211 08:17:10.073008 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:10 crc kubenswrapper[4881]: I1211 08:17:10.073067 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:10 crc kubenswrapper[4881]: I1211 08:17:10.073086 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:10 crc kubenswrapper[4881]: I1211 08:17:10.073112 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:10 crc kubenswrapper[4881]: I1211 08:17:10.073129 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:10Z","lastTransitionTime":"2025-12-11T08:17:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:10 crc kubenswrapper[4881]: I1211 08:17:10.177139 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:10 crc kubenswrapper[4881]: I1211 08:17:10.177226 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:10 crc kubenswrapper[4881]: I1211 08:17:10.177243 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:10 crc kubenswrapper[4881]: I1211 08:17:10.177269 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:10 crc kubenswrapper[4881]: I1211 08:17:10.177286 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:10Z","lastTransitionTime":"2025-12-11T08:17:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:10 crc kubenswrapper[4881]: I1211 08:17:10.280635 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:10 crc kubenswrapper[4881]: I1211 08:17:10.280683 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:10 crc kubenswrapper[4881]: I1211 08:17:10.280696 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:10 crc kubenswrapper[4881]: I1211 08:17:10.280715 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:10 crc kubenswrapper[4881]: I1211 08:17:10.280727 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:10Z","lastTransitionTime":"2025-12-11T08:17:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:10 crc kubenswrapper[4881]: I1211 08:17:10.383869 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:10 crc kubenswrapper[4881]: I1211 08:17:10.383925 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:10 crc kubenswrapper[4881]: I1211 08:17:10.383946 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:10 crc kubenswrapper[4881]: I1211 08:17:10.383974 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:10 crc kubenswrapper[4881]: I1211 08:17:10.383995 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:10Z","lastTransitionTime":"2025-12-11T08:17:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:10 crc kubenswrapper[4881]: I1211 08:17:10.467872 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-g8jhd_368e635e-0e63-4202-b9e4-4a3a85c6f30c/kube-multus/0.log" Dec 11 08:17:10 crc kubenswrapper[4881]: I1211 08:17:10.467929 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-g8jhd" event={"ID":"368e635e-0e63-4202-b9e4-4a3a85c6f30c","Type":"ContainerStarted","Data":"472f02e542c67bbd11145db9b59f2bae1dc688d45e95099b17a33fa1e27dbac8"} Dec 11 08:17:10 crc kubenswrapper[4881]: I1211 08:17:10.488108 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:10 crc kubenswrapper[4881]: I1211 08:17:10.488159 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:10 crc kubenswrapper[4881]: I1211 08:17:10.488171 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:10 crc kubenswrapper[4881]: I1211 08:17:10.488193 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:10 crc kubenswrapper[4881]: I1211 08:17:10.488208 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:10Z","lastTransitionTime":"2025-12-11T08:17:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:10 crc kubenswrapper[4881]: I1211 08:17:10.489306 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"56d69133-af36-4cbd-af7d-3a58cc4dd8ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b73c9948845a28d7c3cb81e94d962a4f11ac061f4108a18f46d451a554e9adb7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7rwh4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b0321657968185975569a4bf8037ab956988b13493feec0be9c439fbe6c20707\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7rwh4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:21Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-z9nnh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:10Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:10 crc kubenswrapper[4881]: I1211 08:17:10.508233 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-kqw5r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"71774618-27c8-499d-83d9-e88693c86758\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://136c88f0cab9d7a47a8c5925592e8cc09b62e515258829cc096df9ad3bb30615\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6btfk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c48e4553e3eb00f1e725dd9cacf2b25360544772ef82d6846361a4b7c44c6244\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6btfk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-kqw5r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:10Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:10 crc kubenswrapper[4881]: I1211 08:17:10.524631 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-bzslm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3a9b30f4-30b0-4b16-9a4a-135d2e7a9beb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:35Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:35Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:35Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-k86hj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-k86hj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:35Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-bzslm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:10Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:10 crc kubenswrapper[4881]: I1211 08:17:10.541570 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:10Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:10 crc kubenswrapper[4881]: I1211 08:17:10.560700 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6ec4e8a28a722463111eeb961e6e80d1f8ef1b776c40fc212a2ca5aaf0c43b63\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6eff6a04afb607c6248352793d1205ae80c194a77222a234d586925a8ca8cd00\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:10Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:10 crc kubenswrapper[4881]: I1211 08:17:10.574568 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:10Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:10 crc kubenswrapper[4881]: I1211 08:17:10.590774 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:10 crc kubenswrapper[4881]: I1211 08:17:10.590824 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:10 crc kubenswrapper[4881]: I1211 08:17:10.590837 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:10 crc kubenswrapper[4881]: I1211 08:17:10.590857 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:10 crc kubenswrapper[4881]: I1211 08:17:10.590870 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:10Z","lastTransitionTime":"2025-12-11T08:17:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:10 crc kubenswrapper[4881]: I1211 08:17:10.592937 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-g8jhd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"368e635e-0e63-4202-b9e4-4a3a85c6f30c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:17:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:17:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://472f02e542c67bbd11145db9b59f2bae1dc688d45e95099b17a33fa1e27dbac8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f17445543c4ae20340b00c834ecfa381b0d9624196a6da869ea823ee99df132f\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-11T08:17:08Z\\\",\\\"message\\\":\\\"2025-12-11T08:16:23+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_76e76f4f-e849-4b50-a2b6-debceb43335b\\\\n2025-12-11T08:16:23+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_76e76f4f-e849-4b50-a2b6-debceb43335b to /host/opt/cni/bin/\\\\n2025-12-11T08:16:23Z [verbose] multus-daemon started\\\\n2025-12-11T08:16:23Z [verbose] Readiness Indicator file check\\\\n2025-12-11T08:17:08Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:21Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:17:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zrsnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-g8jhd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:10Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:10 crc kubenswrapper[4881]: I1211 08:17:10.607966 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-cwhxk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"afd0cc21-e31c-47c9-a598-cd93dde96121\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0f0031f6a7c33c9d1c538ec8384083ab5bc7b7fcda9c01c8e5c922a7e375a7e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://44d5ca755969b2434115259f141a9f8f17da8e0c1bef5b61c48475e09dc91601\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://44d5ca755969b2434115259f141a9f8f17da8e0c1bef5b61c48475e09dc91601\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://18c881494d5238565dfdb69e61ecdebb725f60a14c0c651e0bcc65cb2aff297a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://18c881494d5238565dfdb69e61ecdebb725f60a14c0c651e0bcc65cb2aff297a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a13ad4683fb404bf7d973528489e3c39e33c2991d86a80875dab067023626584\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a13ad4683fb404bf7d973528489e3c39e33c2991d86a80875dab067023626584\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b84c7832c1d1df15202c858e63e3d97319f2014f413f98a9d332a3f89a11210\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b84c7832c1d1df15202c858e63e3d97319f2014f413f98a9d332a3f89a11210\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb6df865673b8073f0fa13525833ae978ff17856a9b4149d549e177bc5d9a494\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bb6df865673b8073f0fa13525833ae978ff17856a9b4149d549e177bc5d9a494\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8a6eb6e7ccd64f8d181fe104b44a4299f9b28d0409e46175da23144ef88e4129\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8a6eb6e7ccd64f8d181fe104b44a4299f9b28d0409e46175da23144ef88e4129\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-cwhxk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:10Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:10 crc kubenswrapper[4881]: I1211 08:17:10.620199 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e233ddb-7c1f-4b3d-a111-de8cdc7ccd18\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://221f8c696ca3c5ab5dc2025890b5279fd406a8dfdd4ea482bb12f0c5d304255d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3dec421d26a9373c52bf2fb9434cc3f3b373f409b1d86ec1effca7534acb7262\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7cb4176d384609eb549c9f00057256bd34053ca3eca229c20cd250b93f87f910\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://91541795f1dd997a0d977904a6a3cbeae8b66b8bb57cde8dac344d3703352e1a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:15:53Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:10Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:10 crc kubenswrapper[4881]: I1211 08:17:10.638260 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6c507af2-56f1-45a6-ab18-c6d27c4e3f85\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ae6cead982609bcea650eccda0c847ae1c568061104bda0584984b9d62481b0e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bdda482f14afc374a3ba38a02b0b93bab4faf300cc47f7ee39ef9183af904f22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a2846dae5b0d424649d68bdbeb34b7791ee2a8e1b05a0309876cd0d79c5fca01\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://89d2f7f7873955f51814a4a24f42fc5fff4c2c08480d7ff81bfc18fd73102d7d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://89d2f7f7873955f51814a4a24f42fc5fff4c2c08480d7ff81bfc18fd73102d7d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:15:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:15:54Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:15:53Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:10Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:10 crc kubenswrapper[4881]: I1211 08:17:10.651863 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-847k7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1711214f-683a-4a2e-b9b8-f3fd2dd6dbc2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0ad11e04da8a6d33e46ca8c5f28be920d52c7c2ce22b4539fc8b7b0b85edb425\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2txkz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:20Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-847k7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:10Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:10 crc kubenswrapper[4881]: I1211 08:17:10.663670 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-jzsv5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aacce2d2-7fd3-439a-b46b-51858d3de240\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6277956e6d4873b4105ce03e358e2857371feead7eb74e2b7412ad7ed4de486\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tlbfj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:24Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-jzsv5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:10Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:10 crc kubenswrapper[4881]: I1211 08:17:10.679027 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9c3d898f-552a-471b-affd-a0489efc782e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9251be56904cea5a0c24898fa10c30718080f3fe77804166d664b3f40235c80a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e3ddede9a1ba051ed4eca12d7cca1c3309b223ca8c01eb7d0a9499ab012531ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e3ddede9a1ba051ed4eca12d7cca1c3309b223ca8c01eb7d0a9499ab012531ff\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:15:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:15:53Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:10Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:10 crc kubenswrapper[4881]: I1211 08:17:10.694090 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:10 crc kubenswrapper[4881]: I1211 08:17:10.694157 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:10 crc kubenswrapper[4881]: I1211 08:17:10.694169 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:10 crc kubenswrapper[4881]: I1211 08:17:10.694193 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:10 crc kubenswrapper[4881]: I1211 08:17:10.694209 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:10Z","lastTransitionTime":"2025-12-11T08:17:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:10 crc kubenswrapper[4881]: I1211 08:17:10.705812 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"712c62b1-8ccd-4aa3-bcf9-678e361454ec\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f4bc17ebdc46bdf12b24515e7a055043646bcea98fe47ea7f5c44e679d02bde9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://daf982782f40db65e6a33e1e1bcfd01493d5a385c52013546d38d921ac4dcad3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3ceeda38e245a549205af6a7f2468c682ca1420574f2d3b375dfba7d93a3c669\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://204b876ced5657df64e1a6074ba7ddbb3e286df0e12630d709ff64d2905b8487\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ff7ad52ec7f42bde82d0f5b9a6ef051a56c37004bb4172dbc504afde1e5f69bf\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-11T08:16:03Z\\\",\\\"message\\\":\\\"W1211 08:16:02.420586 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1211 08:16:02.420959 1 crypto.go:601] Generating new CA for check-endpoints-signer@1765440962 cert, and key in /tmp/serving-cert-2611071911/serving-signer.crt, /tmp/serving-cert-2611071911/serving-signer.key\\\\nI1211 08:16:03.011861 1 observer_polling.go:159] Starting file observer\\\\nW1211 08:16:03.024183 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1211 08:16:03.024367 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1211 08:16:03.024984 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2611071911/tls.crt::/tmp/serving-cert-2611071911/tls.key\\\\\\\"\\\\nF1211 08:16:03.405647 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:00Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6735b5cd08055deeae614855e5de8a056b276c8006a16c45f2a0fce97da172cb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:57Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70fb9b6123864b3652eabbb833b18032659a0c038f4e5857e7363a3776022396\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://70fb9b6123864b3652eabbb833b18032659a0c038f4e5857e7363a3776022396\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:15:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:15:53Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:10Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:10 crc kubenswrapper[4881]: I1211 08:17:10.729400 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://02d46e407feeba96230ff44b31f513e09434cdc3378124322398587ebda11b4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:10Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:10 crc kubenswrapper[4881]: I1211 08:17:10.746939 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:10Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:10 crc kubenswrapper[4881]: I1211 08:17:10.763927 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:20Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:20Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://35b9fdb7a609d6a955abda7e49e7bceec55811d33e14295ba7258c0e085cc517\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:10Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:10 crc kubenswrapper[4881]: I1211 08:17:10.790991 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wf8q8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f14cc110-e74f-4cb7-a998-041e3f9b537b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3fece2162ea78bf051c94f0f8adb0b194d05e5d3c5c25cb4e3674096d1204079\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3715d1eacbf7a843b7a1a0e83dee159ddbe5bf62812e94bf5f74f4677da2617b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c23a70dbe6b32be7c8a0c3799f7dd2323f0dcc86d7d58ee3e140cda4ffbb03f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6d8574de8e2431b2aca7954b9a0498e353c00d982b4384c1f198b2099fe527a6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5a0cb039f4c38da8b633dfcd2e0d23afbc3607b41f04d3c230aee5f86cba79b5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5b8e9dda68edf7ec8848da4eb851807ad0dc10a086fb6129e3a22f9a35e368ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6293b9bfc7cfe65f9d9c2cb46c40ba8aba252b7f4dfec7b3552d3a9271bfa25e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6293b9bfc7cfe65f9d9c2cb46c40ba8aba252b7f4dfec7b3552d3a9271bfa25e\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-11T08:16:50Z\\\",\\\"message\\\":\\\"d57b-49ff-9cd3-202ed3574b26}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:} {Op:update Table:NAT Row:map[external_ip:192.168.126.11 logical_ip:10.217.0.59 options:{GoMap:map[stateless:false]} type:snat] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {dce28c51-c9f1-478b-97c8-7e209d6e7cbe}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:} {Op:mutate Table:Logical_Router Row:map[] Rows:[] Columns:[] Mutations:[{Column:nat Mutator:insert Value:{GoSet:[{GoUUID:dce28c51-c9f1-478b-97c8-7e209d6e7cbe}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {e3c4661a-36a6-47f0-a6c0-a4ee741f2224}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1211 08:16:50.354677 6553 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nI1211 08:16:50.354696 6553 model_client.go:398] Mutate operations generated as: [{Op:mutate Table:Port_Group Row:map[] Rows:[] Columns:[] Mutations:[{Column:ports Mutator:insert Value:{GoSet:[{GoUUID:61897e97-c771-4738-8709-09636387cb00}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {c02bd945-d57b-49ff-9cd3-202ed3574b26}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nF1211 08:16:50.354771 6553 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:49Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-wf8q8_openshift-ovn-kubernetes(f14cc110-e74f-4cb7-a998-041e3f9b537b)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://88bd335385bc696076b9b2719bfbd27b294003397c545f24ee2c50cd854fcc28\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1794069e7175bc0ce9dc315807ad766fd28fb46e11ed6e7addc3199cccaea63a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1794069e7175bc0ce9dc315807ad766fd28fb46e11ed6e7addc3199cccaea63a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:21Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-wf8q8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:10Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:10 crc kubenswrapper[4881]: I1211 08:17:10.796562 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:10 crc kubenswrapper[4881]: I1211 08:17:10.796607 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:10 crc kubenswrapper[4881]: I1211 08:17:10.796619 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:10 crc kubenswrapper[4881]: I1211 08:17:10.796638 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:10 crc kubenswrapper[4881]: I1211 08:17:10.796652 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:10Z","lastTransitionTime":"2025-12-11T08:17:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:10 crc kubenswrapper[4881]: I1211 08:17:10.899313 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:10 crc kubenswrapper[4881]: I1211 08:17:10.899378 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:10 crc kubenswrapper[4881]: I1211 08:17:10.899390 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:10 crc kubenswrapper[4881]: I1211 08:17:10.899407 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:10 crc kubenswrapper[4881]: I1211 08:17:10.899417 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:10Z","lastTransitionTime":"2025-12-11T08:17:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:11 crc kubenswrapper[4881]: I1211 08:17:11.001670 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:11 crc kubenswrapper[4881]: I1211 08:17:11.001721 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:11 crc kubenswrapper[4881]: I1211 08:17:11.001745 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:11 crc kubenswrapper[4881]: I1211 08:17:11.001777 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:11 crc kubenswrapper[4881]: I1211 08:17:11.001799 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:11Z","lastTransitionTime":"2025-12-11T08:17:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:11 crc kubenswrapper[4881]: I1211 08:17:11.006273 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 11 08:17:11 crc kubenswrapper[4881]: E1211 08:17:11.006461 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 11 08:17:11 crc kubenswrapper[4881]: I1211 08:17:11.006772 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bzslm" Dec 11 08:17:11 crc kubenswrapper[4881]: E1211 08:17:11.007099 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bzslm" podUID="3a9b30f4-30b0-4b16-9a4a-135d2e7a9beb" Dec 11 08:17:11 crc kubenswrapper[4881]: I1211 08:17:11.007234 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 11 08:17:11 crc kubenswrapper[4881]: E1211 08:17:11.007509 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 11 08:17:11 crc kubenswrapper[4881]: I1211 08:17:11.105370 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:11 crc kubenswrapper[4881]: I1211 08:17:11.105433 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:11 crc kubenswrapper[4881]: I1211 08:17:11.105455 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:11 crc kubenswrapper[4881]: I1211 08:17:11.105480 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:11 crc kubenswrapper[4881]: I1211 08:17:11.105498 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:11Z","lastTransitionTime":"2025-12-11T08:17:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:11 crc kubenswrapper[4881]: I1211 08:17:11.217413 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:11 crc kubenswrapper[4881]: I1211 08:17:11.217482 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:11 crc kubenswrapper[4881]: I1211 08:17:11.217505 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:11 crc kubenswrapper[4881]: I1211 08:17:11.217532 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:11 crc kubenswrapper[4881]: I1211 08:17:11.217546 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:11Z","lastTransitionTime":"2025-12-11T08:17:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:11 crc kubenswrapper[4881]: I1211 08:17:11.321915 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:11 crc kubenswrapper[4881]: I1211 08:17:11.321995 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:11 crc kubenswrapper[4881]: I1211 08:17:11.322017 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:11 crc kubenswrapper[4881]: I1211 08:17:11.322046 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:11 crc kubenswrapper[4881]: I1211 08:17:11.322063 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:11Z","lastTransitionTime":"2025-12-11T08:17:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:11 crc kubenswrapper[4881]: I1211 08:17:11.425429 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:11 crc kubenswrapper[4881]: I1211 08:17:11.425488 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:11 crc kubenswrapper[4881]: I1211 08:17:11.425507 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:11 crc kubenswrapper[4881]: I1211 08:17:11.425535 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:11 crc kubenswrapper[4881]: I1211 08:17:11.425554 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:11Z","lastTransitionTime":"2025-12-11T08:17:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:11 crc kubenswrapper[4881]: I1211 08:17:11.529055 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:11 crc kubenswrapper[4881]: I1211 08:17:11.529175 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:11 crc kubenswrapper[4881]: I1211 08:17:11.529193 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:11 crc kubenswrapper[4881]: I1211 08:17:11.529254 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:11 crc kubenswrapper[4881]: I1211 08:17:11.529274 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:11Z","lastTransitionTime":"2025-12-11T08:17:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:11 crc kubenswrapper[4881]: I1211 08:17:11.633177 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:11 crc kubenswrapper[4881]: I1211 08:17:11.633233 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:11 crc kubenswrapper[4881]: I1211 08:17:11.633244 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:11 crc kubenswrapper[4881]: I1211 08:17:11.633259 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:11 crc kubenswrapper[4881]: I1211 08:17:11.633269 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:11Z","lastTransitionTime":"2025-12-11T08:17:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:11 crc kubenswrapper[4881]: I1211 08:17:11.736790 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:11 crc kubenswrapper[4881]: I1211 08:17:11.736871 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:11 crc kubenswrapper[4881]: I1211 08:17:11.736893 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:11 crc kubenswrapper[4881]: I1211 08:17:11.736926 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:11 crc kubenswrapper[4881]: I1211 08:17:11.736946 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:11Z","lastTransitionTime":"2025-12-11T08:17:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:11 crc kubenswrapper[4881]: I1211 08:17:11.840245 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:11 crc kubenswrapper[4881]: I1211 08:17:11.840284 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:11 crc kubenswrapper[4881]: I1211 08:17:11.840293 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:11 crc kubenswrapper[4881]: I1211 08:17:11.840310 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:11 crc kubenswrapper[4881]: I1211 08:17:11.840322 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:11Z","lastTransitionTime":"2025-12-11T08:17:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:11 crc kubenswrapper[4881]: I1211 08:17:11.943200 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:11 crc kubenswrapper[4881]: I1211 08:17:11.943236 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:11 crc kubenswrapper[4881]: I1211 08:17:11.943245 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:11 crc kubenswrapper[4881]: I1211 08:17:11.943262 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:11 crc kubenswrapper[4881]: I1211 08:17:11.943273 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:11Z","lastTransitionTime":"2025-12-11T08:17:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:12 crc kubenswrapper[4881]: I1211 08:17:12.004634 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 11 08:17:12 crc kubenswrapper[4881]: E1211 08:17:12.004839 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 11 08:17:12 crc kubenswrapper[4881]: I1211 08:17:12.046128 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:12 crc kubenswrapper[4881]: I1211 08:17:12.046192 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:12 crc kubenswrapper[4881]: I1211 08:17:12.046205 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:12 crc kubenswrapper[4881]: I1211 08:17:12.046225 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:12 crc kubenswrapper[4881]: I1211 08:17:12.046241 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:12Z","lastTransitionTime":"2025-12-11T08:17:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:12 crc kubenswrapper[4881]: I1211 08:17:12.149219 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:12 crc kubenswrapper[4881]: I1211 08:17:12.149253 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:12 crc kubenswrapper[4881]: I1211 08:17:12.149263 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:12 crc kubenswrapper[4881]: I1211 08:17:12.149276 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:12 crc kubenswrapper[4881]: I1211 08:17:12.149284 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:12Z","lastTransitionTime":"2025-12-11T08:17:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:12 crc kubenswrapper[4881]: I1211 08:17:12.251644 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:12 crc kubenswrapper[4881]: I1211 08:17:12.251682 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:12 crc kubenswrapper[4881]: I1211 08:17:12.251691 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:12 crc kubenswrapper[4881]: I1211 08:17:12.251704 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:12 crc kubenswrapper[4881]: I1211 08:17:12.251714 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:12Z","lastTransitionTime":"2025-12-11T08:17:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:12 crc kubenswrapper[4881]: I1211 08:17:12.355644 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:12 crc kubenswrapper[4881]: I1211 08:17:12.355727 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:12 crc kubenswrapper[4881]: I1211 08:17:12.355749 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:12 crc kubenswrapper[4881]: I1211 08:17:12.355773 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:12 crc kubenswrapper[4881]: I1211 08:17:12.355790 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:12Z","lastTransitionTime":"2025-12-11T08:17:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:12 crc kubenswrapper[4881]: I1211 08:17:12.459726 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:12 crc kubenswrapper[4881]: I1211 08:17:12.459774 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:12 crc kubenswrapper[4881]: I1211 08:17:12.459783 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:12 crc kubenswrapper[4881]: I1211 08:17:12.459801 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:12 crc kubenswrapper[4881]: I1211 08:17:12.459813 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:12Z","lastTransitionTime":"2025-12-11T08:17:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:12 crc kubenswrapper[4881]: I1211 08:17:12.563144 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:12 crc kubenswrapper[4881]: I1211 08:17:12.563212 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:12 crc kubenswrapper[4881]: I1211 08:17:12.563230 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:12 crc kubenswrapper[4881]: I1211 08:17:12.563293 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:12 crc kubenswrapper[4881]: I1211 08:17:12.563308 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:12Z","lastTransitionTime":"2025-12-11T08:17:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:12 crc kubenswrapper[4881]: I1211 08:17:12.668238 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:12 crc kubenswrapper[4881]: I1211 08:17:12.668388 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:12 crc kubenswrapper[4881]: I1211 08:17:12.668434 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:12 crc kubenswrapper[4881]: I1211 08:17:12.668508 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:12 crc kubenswrapper[4881]: I1211 08:17:12.668560 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:12Z","lastTransitionTime":"2025-12-11T08:17:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:12 crc kubenswrapper[4881]: I1211 08:17:12.772747 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:12 crc kubenswrapper[4881]: I1211 08:17:12.772819 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:12 crc kubenswrapper[4881]: I1211 08:17:12.772835 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:12 crc kubenswrapper[4881]: I1211 08:17:12.772862 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:12 crc kubenswrapper[4881]: I1211 08:17:12.772886 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:12Z","lastTransitionTime":"2025-12-11T08:17:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:12 crc kubenswrapper[4881]: I1211 08:17:12.876391 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:12 crc kubenswrapper[4881]: I1211 08:17:12.876507 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:12 crc kubenswrapper[4881]: I1211 08:17:12.876534 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:12 crc kubenswrapper[4881]: I1211 08:17:12.876566 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:12 crc kubenswrapper[4881]: I1211 08:17:12.876588 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:12Z","lastTransitionTime":"2025-12-11T08:17:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:12 crc kubenswrapper[4881]: I1211 08:17:12.980845 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:12 crc kubenswrapper[4881]: I1211 08:17:12.980949 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:12 crc kubenswrapper[4881]: I1211 08:17:12.981024 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:12 crc kubenswrapper[4881]: I1211 08:17:12.981052 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:12 crc kubenswrapper[4881]: I1211 08:17:12.981072 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:12Z","lastTransitionTime":"2025-12-11T08:17:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:13 crc kubenswrapper[4881]: I1211 08:17:13.005120 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 11 08:17:13 crc kubenswrapper[4881]: I1211 08:17:13.005212 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bzslm" Dec 11 08:17:13 crc kubenswrapper[4881]: E1211 08:17:13.005297 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 11 08:17:13 crc kubenswrapper[4881]: E1211 08:17:13.005486 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bzslm" podUID="3a9b30f4-30b0-4b16-9a4a-135d2e7a9beb" Dec 11 08:17:13 crc kubenswrapper[4881]: I1211 08:17:13.005556 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 11 08:17:13 crc kubenswrapper[4881]: E1211 08:17:13.005620 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 11 08:17:13 crc kubenswrapper[4881]: I1211 08:17:13.023301 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e233ddb-7c1f-4b3d-a111-de8cdc7ccd18\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://221f8c696ca3c5ab5dc2025890b5279fd406a8dfdd4ea482bb12f0c5d304255d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3dec421d26a9373c52bf2fb9434cc3f3b373f409b1d86ec1effca7534acb7262\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7cb4176d384609eb549c9f00057256bd34053ca3eca229c20cd250b93f87f910\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://91541795f1dd997a0d977904a6a3cbeae8b66b8bb57cde8dac344d3703352e1a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:15:53Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:13Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:13 crc kubenswrapper[4881]: I1211 08:17:13.040970 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6c507af2-56f1-45a6-ab18-c6d27c4e3f85\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ae6cead982609bcea650eccda0c847ae1c568061104bda0584984b9d62481b0e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bdda482f14afc374a3ba38a02b0b93bab4faf300cc47f7ee39ef9183af904f22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a2846dae5b0d424649d68bdbeb34b7791ee2a8e1b05a0309876cd0d79c5fca01\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://89d2f7f7873955f51814a4a24f42fc5fff4c2c08480d7ff81bfc18fd73102d7d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://89d2f7f7873955f51814a4a24f42fc5fff4c2c08480d7ff81bfc18fd73102d7d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:15:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:15:54Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:15:53Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:13Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:13 crc kubenswrapper[4881]: I1211 08:17:13.053603 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-847k7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1711214f-683a-4a2e-b9b8-f3fd2dd6dbc2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0ad11e04da8a6d33e46ca8c5f28be920d52c7c2ce22b4539fc8b7b0b85edb425\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2txkz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:20Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-847k7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:13Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:13 crc kubenswrapper[4881]: I1211 08:17:13.067731 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-jzsv5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aacce2d2-7fd3-439a-b46b-51858d3de240\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6277956e6d4873b4105ce03e358e2857371feead7eb74e2b7412ad7ed4de486\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tlbfj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:24Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-jzsv5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:13Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:13 crc kubenswrapper[4881]: I1211 08:17:13.079434 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9c3d898f-552a-471b-affd-a0489efc782e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9251be56904cea5a0c24898fa10c30718080f3fe77804166d664b3f40235c80a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e3ddede9a1ba051ed4eca12d7cca1c3309b223ca8c01eb7d0a9499ab012531ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e3ddede9a1ba051ed4eca12d7cca1c3309b223ca8c01eb7d0a9499ab012531ff\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:15:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:15:53Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:13Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:13 crc kubenswrapper[4881]: I1211 08:17:13.083430 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:13 crc kubenswrapper[4881]: I1211 08:17:13.083505 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:13 crc kubenswrapper[4881]: I1211 08:17:13.083532 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:13 crc kubenswrapper[4881]: I1211 08:17:13.083564 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:13 crc kubenswrapper[4881]: I1211 08:17:13.083588 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:13Z","lastTransitionTime":"2025-12-11T08:17:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:13 crc kubenswrapper[4881]: I1211 08:17:13.099680 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"712c62b1-8ccd-4aa3-bcf9-678e361454ec\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f4bc17ebdc46bdf12b24515e7a055043646bcea98fe47ea7f5c44e679d02bde9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://daf982782f40db65e6a33e1e1bcfd01493d5a385c52013546d38d921ac4dcad3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3ceeda38e245a549205af6a7f2468c682ca1420574f2d3b375dfba7d93a3c669\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://204b876ced5657df64e1a6074ba7ddbb3e286df0e12630d709ff64d2905b8487\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ff7ad52ec7f42bde82d0f5b9a6ef051a56c37004bb4172dbc504afde1e5f69bf\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-11T08:16:03Z\\\",\\\"message\\\":\\\"W1211 08:16:02.420586 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1211 08:16:02.420959 1 crypto.go:601] Generating new CA for check-endpoints-signer@1765440962 cert, and key in /tmp/serving-cert-2611071911/serving-signer.crt, /tmp/serving-cert-2611071911/serving-signer.key\\\\nI1211 08:16:03.011861 1 observer_polling.go:159] Starting file observer\\\\nW1211 08:16:03.024183 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1211 08:16:03.024367 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1211 08:16:03.024984 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2611071911/tls.crt::/tmp/serving-cert-2611071911/tls.key\\\\\\\"\\\\nF1211 08:16:03.405647 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:00Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6735b5cd08055deeae614855e5de8a056b276c8006a16c45f2a0fce97da172cb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:57Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70fb9b6123864b3652eabbb833b18032659a0c038f4e5857e7363a3776022396\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://70fb9b6123864b3652eabbb833b18032659a0c038f4e5857e7363a3776022396\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:15:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:15:53Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:13Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:13 crc kubenswrapper[4881]: I1211 08:17:13.121693 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://02d46e407feeba96230ff44b31f513e09434cdc3378124322398587ebda11b4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:13Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:13 crc kubenswrapper[4881]: I1211 08:17:13.140554 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:13Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:13 crc kubenswrapper[4881]: I1211 08:17:13.157875 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:20Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:20Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://35b9fdb7a609d6a955abda7e49e7bceec55811d33e14295ba7258c0e085cc517\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:13Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:13 crc kubenswrapper[4881]: I1211 08:17:13.186824 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:13 crc kubenswrapper[4881]: I1211 08:17:13.186890 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:13 crc kubenswrapper[4881]: I1211 08:17:13.186908 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:13 crc kubenswrapper[4881]: I1211 08:17:13.186934 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:13 crc kubenswrapper[4881]: I1211 08:17:13.186951 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:13Z","lastTransitionTime":"2025-12-11T08:17:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:13 crc kubenswrapper[4881]: I1211 08:17:13.190556 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wf8q8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f14cc110-e74f-4cb7-a998-041e3f9b537b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3fece2162ea78bf051c94f0f8adb0b194d05e5d3c5c25cb4e3674096d1204079\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3715d1eacbf7a843b7a1a0e83dee159ddbe5bf62812e94bf5f74f4677da2617b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c23a70dbe6b32be7c8a0c3799f7dd2323f0dcc86d7d58ee3e140cda4ffbb03f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6d8574de8e2431b2aca7954b9a0498e353c00d982b4384c1f198b2099fe527a6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5a0cb039f4c38da8b633dfcd2e0d23afbc3607b41f04d3c230aee5f86cba79b5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5b8e9dda68edf7ec8848da4eb851807ad0dc10a086fb6129e3a22f9a35e368ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6293b9bfc7cfe65f9d9c2cb46c40ba8aba252b7f4dfec7b3552d3a9271bfa25e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6293b9bfc7cfe65f9d9c2cb46c40ba8aba252b7f4dfec7b3552d3a9271bfa25e\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-11T08:16:50Z\\\",\\\"message\\\":\\\"d57b-49ff-9cd3-202ed3574b26}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:} {Op:update Table:NAT Row:map[external_ip:192.168.126.11 logical_ip:10.217.0.59 options:{GoMap:map[stateless:false]} type:snat] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {dce28c51-c9f1-478b-97c8-7e209d6e7cbe}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:} {Op:mutate Table:Logical_Router Row:map[] Rows:[] Columns:[] Mutations:[{Column:nat Mutator:insert Value:{GoSet:[{GoUUID:dce28c51-c9f1-478b-97c8-7e209d6e7cbe}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {e3c4661a-36a6-47f0-a6c0-a4ee741f2224}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1211 08:16:50.354677 6553 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nI1211 08:16:50.354696 6553 model_client.go:398] Mutate operations generated as: [{Op:mutate Table:Port_Group Row:map[] Rows:[] Columns:[] Mutations:[{Column:ports Mutator:insert Value:{GoSet:[{GoUUID:61897e97-c771-4738-8709-09636387cb00}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {c02bd945-d57b-49ff-9cd3-202ed3574b26}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nF1211 08:16:50.354771 6553 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:49Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-wf8q8_openshift-ovn-kubernetes(f14cc110-e74f-4cb7-a998-041e3f9b537b)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://88bd335385bc696076b9b2719bfbd27b294003397c545f24ee2c50cd854fcc28\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1794069e7175bc0ce9dc315807ad766fd28fb46e11ed6e7addc3199cccaea63a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1794069e7175bc0ce9dc315807ad766fd28fb46e11ed6e7addc3199cccaea63a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:21Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-wf8q8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:13Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:13 crc kubenswrapper[4881]: I1211 08:17:13.207918 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-bzslm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3a9b30f4-30b0-4b16-9a4a-135d2e7a9beb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:35Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:35Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:35Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-k86hj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-k86hj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:35Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-bzslm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:13Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:13 crc kubenswrapper[4881]: I1211 08:17:13.226205 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:13Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:13 crc kubenswrapper[4881]: I1211 08:17:13.249355 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6ec4e8a28a722463111eeb961e6e80d1f8ef1b776c40fc212a2ca5aaf0c43b63\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6eff6a04afb607c6248352793d1205ae80c194a77222a234d586925a8ca8cd00\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:13Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:13 crc kubenswrapper[4881]: I1211 08:17:13.270329 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:13Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:13 crc kubenswrapper[4881]: I1211 08:17:13.290387 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:13 crc kubenswrapper[4881]: I1211 08:17:13.290428 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:13 crc kubenswrapper[4881]: I1211 08:17:13.290441 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:13 crc kubenswrapper[4881]: I1211 08:17:13.290462 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:13 crc kubenswrapper[4881]: I1211 08:17:13.290476 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:13Z","lastTransitionTime":"2025-12-11T08:17:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:13 crc kubenswrapper[4881]: I1211 08:17:13.291291 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-g8jhd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"368e635e-0e63-4202-b9e4-4a3a85c6f30c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:17:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:17:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://472f02e542c67bbd11145db9b59f2bae1dc688d45e95099b17a33fa1e27dbac8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f17445543c4ae20340b00c834ecfa381b0d9624196a6da869ea823ee99df132f\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-11T08:17:08Z\\\",\\\"message\\\":\\\"2025-12-11T08:16:23+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_76e76f4f-e849-4b50-a2b6-debceb43335b\\\\n2025-12-11T08:16:23+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_76e76f4f-e849-4b50-a2b6-debceb43335b to /host/opt/cni/bin/\\\\n2025-12-11T08:16:23Z [verbose] multus-daemon started\\\\n2025-12-11T08:16:23Z [verbose] Readiness Indicator file check\\\\n2025-12-11T08:17:08Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:21Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:17:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zrsnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-g8jhd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:13Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:13 crc kubenswrapper[4881]: I1211 08:17:13.309122 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-cwhxk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"afd0cc21-e31c-47c9-a598-cd93dde96121\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0f0031f6a7c33c9d1c538ec8384083ab5bc7b7fcda9c01c8e5c922a7e375a7e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://44d5ca755969b2434115259f141a9f8f17da8e0c1bef5b61c48475e09dc91601\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://44d5ca755969b2434115259f141a9f8f17da8e0c1bef5b61c48475e09dc91601\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://18c881494d5238565dfdb69e61ecdebb725f60a14c0c651e0bcc65cb2aff297a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://18c881494d5238565dfdb69e61ecdebb725f60a14c0c651e0bcc65cb2aff297a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a13ad4683fb404bf7d973528489e3c39e33c2991d86a80875dab067023626584\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a13ad4683fb404bf7d973528489e3c39e33c2991d86a80875dab067023626584\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b84c7832c1d1df15202c858e63e3d97319f2014f413f98a9d332a3f89a11210\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b84c7832c1d1df15202c858e63e3d97319f2014f413f98a9d332a3f89a11210\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb6df865673b8073f0fa13525833ae978ff17856a9b4149d549e177bc5d9a494\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bb6df865673b8073f0fa13525833ae978ff17856a9b4149d549e177bc5d9a494\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8a6eb6e7ccd64f8d181fe104b44a4299f9b28d0409e46175da23144ef88e4129\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8a6eb6e7ccd64f8d181fe104b44a4299f9b28d0409e46175da23144ef88e4129\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-cwhxk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:13Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:13 crc kubenswrapper[4881]: I1211 08:17:13.321012 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"56d69133-af36-4cbd-af7d-3a58cc4dd8ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b73c9948845a28d7c3cb81e94d962a4f11ac061f4108a18f46d451a554e9adb7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7rwh4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b0321657968185975569a4bf8037ab956988b13493feec0be9c439fbe6c20707\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7rwh4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:21Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-z9nnh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:13Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:13 crc kubenswrapper[4881]: I1211 08:17:13.335477 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-kqw5r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"71774618-27c8-499d-83d9-e88693c86758\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://136c88f0cab9d7a47a8c5925592e8cc09b62e515258829cc096df9ad3bb30615\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6btfk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c48e4553e3eb00f1e725dd9cacf2b25360544772ef82d6846361a4b7c44c6244\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6btfk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-kqw5r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:13Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:13 crc kubenswrapper[4881]: I1211 08:17:13.392233 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:13 crc kubenswrapper[4881]: I1211 08:17:13.392281 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:13 crc kubenswrapper[4881]: I1211 08:17:13.392294 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:13 crc kubenswrapper[4881]: I1211 08:17:13.392318 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:13 crc kubenswrapper[4881]: I1211 08:17:13.392352 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:13Z","lastTransitionTime":"2025-12-11T08:17:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:13 crc kubenswrapper[4881]: I1211 08:17:13.495446 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:13 crc kubenswrapper[4881]: I1211 08:17:13.495511 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:13 crc kubenswrapper[4881]: I1211 08:17:13.495523 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:13 crc kubenswrapper[4881]: I1211 08:17:13.495544 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:13 crc kubenswrapper[4881]: I1211 08:17:13.495555 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:13Z","lastTransitionTime":"2025-12-11T08:17:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:13 crc kubenswrapper[4881]: I1211 08:17:13.598220 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:13 crc kubenswrapper[4881]: I1211 08:17:13.598304 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:13 crc kubenswrapper[4881]: I1211 08:17:13.598327 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:13 crc kubenswrapper[4881]: I1211 08:17:13.598425 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:13 crc kubenswrapper[4881]: I1211 08:17:13.598448 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:13Z","lastTransitionTime":"2025-12-11T08:17:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:13 crc kubenswrapper[4881]: I1211 08:17:13.701957 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:13 crc kubenswrapper[4881]: I1211 08:17:13.702027 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:13 crc kubenswrapper[4881]: I1211 08:17:13.702050 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:13 crc kubenswrapper[4881]: I1211 08:17:13.702075 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:13 crc kubenswrapper[4881]: I1211 08:17:13.702095 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:13Z","lastTransitionTime":"2025-12-11T08:17:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:13 crc kubenswrapper[4881]: I1211 08:17:13.806239 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:13 crc kubenswrapper[4881]: I1211 08:17:13.806314 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:13 crc kubenswrapper[4881]: I1211 08:17:13.806369 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:13 crc kubenswrapper[4881]: I1211 08:17:13.806404 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:13 crc kubenswrapper[4881]: I1211 08:17:13.806426 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:13Z","lastTransitionTime":"2025-12-11T08:17:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:13 crc kubenswrapper[4881]: I1211 08:17:13.909580 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:13 crc kubenswrapper[4881]: I1211 08:17:13.909629 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:13 crc kubenswrapper[4881]: I1211 08:17:13.909647 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:13 crc kubenswrapper[4881]: I1211 08:17:13.909670 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:13 crc kubenswrapper[4881]: I1211 08:17:13.909688 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:13Z","lastTransitionTime":"2025-12-11T08:17:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:14 crc kubenswrapper[4881]: I1211 08:17:14.005035 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 11 08:17:14 crc kubenswrapper[4881]: E1211 08:17:14.005193 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 11 08:17:14 crc kubenswrapper[4881]: I1211 08:17:14.012803 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:14 crc kubenswrapper[4881]: I1211 08:17:14.012855 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:14 crc kubenswrapper[4881]: I1211 08:17:14.012873 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:14 crc kubenswrapper[4881]: I1211 08:17:14.012894 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:14 crc kubenswrapper[4881]: I1211 08:17:14.012910 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:14Z","lastTransitionTime":"2025-12-11T08:17:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:14 crc kubenswrapper[4881]: I1211 08:17:14.116544 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:14 crc kubenswrapper[4881]: I1211 08:17:14.116722 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:14 crc kubenswrapper[4881]: I1211 08:17:14.116754 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:14 crc kubenswrapper[4881]: I1211 08:17:14.116784 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:14 crc kubenswrapper[4881]: I1211 08:17:14.116805 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:14Z","lastTransitionTime":"2025-12-11T08:17:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:14 crc kubenswrapper[4881]: I1211 08:17:14.220658 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:14 crc kubenswrapper[4881]: I1211 08:17:14.220724 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:14 crc kubenswrapper[4881]: I1211 08:17:14.220735 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:14 crc kubenswrapper[4881]: I1211 08:17:14.220755 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:14 crc kubenswrapper[4881]: I1211 08:17:14.220768 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:14Z","lastTransitionTime":"2025-12-11T08:17:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:14 crc kubenswrapper[4881]: I1211 08:17:14.324597 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:14 crc kubenswrapper[4881]: I1211 08:17:14.324732 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:14 crc kubenswrapper[4881]: I1211 08:17:14.324746 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:14 crc kubenswrapper[4881]: I1211 08:17:14.324767 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:14 crc kubenswrapper[4881]: I1211 08:17:14.324779 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:14Z","lastTransitionTime":"2025-12-11T08:17:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:14 crc kubenswrapper[4881]: I1211 08:17:14.428655 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:14 crc kubenswrapper[4881]: I1211 08:17:14.428731 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:14 crc kubenswrapper[4881]: I1211 08:17:14.428756 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:14 crc kubenswrapper[4881]: I1211 08:17:14.428786 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:14 crc kubenswrapper[4881]: I1211 08:17:14.428811 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:14Z","lastTransitionTime":"2025-12-11T08:17:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:14 crc kubenswrapper[4881]: I1211 08:17:14.532491 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:14 crc kubenswrapper[4881]: I1211 08:17:14.532551 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:14 crc kubenswrapper[4881]: I1211 08:17:14.532574 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:14 crc kubenswrapper[4881]: I1211 08:17:14.532599 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:14 crc kubenswrapper[4881]: I1211 08:17:14.532620 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:14Z","lastTransitionTime":"2025-12-11T08:17:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:14 crc kubenswrapper[4881]: I1211 08:17:14.636042 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:14 crc kubenswrapper[4881]: I1211 08:17:14.636114 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:14 crc kubenswrapper[4881]: I1211 08:17:14.636133 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:14 crc kubenswrapper[4881]: I1211 08:17:14.636159 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:14 crc kubenswrapper[4881]: I1211 08:17:14.636176 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:14Z","lastTransitionTime":"2025-12-11T08:17:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:14 crc kubenswrapper[4881]: I1211 08:17:14.739538 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:14 crc kubenswrapper[4881]: I1211 08:17:14.739655 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:14 crc kubenswrapper[4881]: I1211 08:17:14.739674 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:14 crc kubenswrapper[4881]: I1211 08:17:14.739698 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:14 crc kubenswrapper[4881]: I1211 08:17:14.739716 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:14Z","lastTransitionTime":"2025-12-11T08:17:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:14 crc kubenswrapper[4881]: I1211 08:17:14.814415 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:14 crc kubenswrapper[4881]: I1211 08:17:14.814477 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:14 crc kubenswrapper[4881]: I1211 08:17:14.814516 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:14 crc kubenswrapper[4881]: I1211 08:17:14.814547 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:14 crc kubenswrapper[4881]: I1211 08:17:14.814569 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:14Z","lastTransitionTime":"2025-12-11T08:17:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:14 crc kubenswrapper[4881]: E1211 08:17:14.836786 4881 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:17:14Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T08:17:14Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:17:14Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T08:17:14Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:17:14Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T08:17:14Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:17:14Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T08:17:14Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"46a6300e-52c1-447e-8230-6662e62288c7\\\",\\\"systemUUID\\\":\\\"fece3d29-5045-4c4f-98be-52739a921bd2\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:14Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:14 crc kubenswrapper[4881]: I1211 08:17:14.842190 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:14 crc kubenswrapper[4881]: I1211 08:17:14.842248 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:14 crc kubenswrapper[4881]: I1211 08:17:14.842272 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:14 crc kubenswrapper[4881]: I1211 08:17:14.842302 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:14 crc kubenswrapper[4881]: I1211 08:17:14.842327 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:14Z","lastTransitionTime":"2025-12-11T08:17:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:14 crc kubenswrapper[4881]: E1211 08:17:14.868982 4881 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:17:14Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T08:17:14Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:17:14Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T08:17:14Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:17:14Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T08:17:14Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:17:14Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T08:17:14Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"46a6300e-52c1-447e-8230-6662e62288c7\\\",\\\"systemUUID\\\":\\\"fece3d29-5045-4c4f-98be-52739a921bd2\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:14Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:14 crc kubenswrapper[4881]: I1211 08:17:14.874572 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:14 crc kubenswrapper[4881]: I1211 08:17:14.874648 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:14 crc kubenswrapper[4881]: I1211 08:17:14.874667 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:14 crc kubenswrapper[4881]: I1211 08:17:14.874696 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:14 crc kubenswrapper[4881]: I1211 08:17:14.874717 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:14Z","lastTransitionTime":"2025-12-11T08:17:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:14 crc kubenswrapper[4881]: E1211 08:17:14.899503 4881 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:17:14Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T08:17:14Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:17:14Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T08:17:14Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:17:14Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T08:17:14Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:17:14Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T08:17:14Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"46a6300e-52c1-447e-8230-6662e62288c7\\\",\\\"systemUUID\\\":\\\"fece3d29-5045-4c4f-98be-52739a921bd2\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:14Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:14 crc kubenswrapper[4881]: I1211 08:17:14.907966 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:14 crc kubenswrapper[4881]: I1211 08:17:14.908015 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:14 crc kubenswrapper[4881]: I1211 08:17:14.908029 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:14 crc kubenswrapper[4881]: I1211 08:17:14.908053 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:14 crc kubenswrapper[4881]: I1211 08:17:14.908074 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:14Z","lastTransitionTime":"2025-12-11T08:17:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:14 crc kubenswrapper[4881]: E1211 08:17:14.923976 4881 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:17:14Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T08:17:14Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:17:14Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T08:17:14Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:17:14Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T08:17:14Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:17:14Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T08:17:14Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"46a6300e-52c1-447e-8230-6662e62288c7\\\",\\\"systemUUID\\\":\\\"fece3d29-5045-4c4f-98be-52739a921bd2\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:14Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:14 crc kubenswrapper[4881]: I1211 08:17:14.927542 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:14 crc kubenswrapper[4881]: I1211 08:17:14.927681 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:14 crc kubenswrapper[4881]: I1211 08:17:14.927704 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:14 crc kubenswrapper[4881]: I1211 08:17:14.927732 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:14 crc kubenswrapper[4881]: I1211 08:17:14.927752 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:14Z","lastTransitionTime":"2025-12-11T08:17:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:14 crc kubenswrapper[4881]: E1211 08:17:14.944592 4881 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:17:14Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T08:17:14Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:17:14Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T08:17:14Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:17:14Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T08:17:14Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:17:14Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T08:17:14Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"46a6300e-52c1-447e-8230-6662e62288c7\\\",\\\"systemUUID\\\":\\\"fece3d29-5045-4c4f-98be-52739a921bd2\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:14Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:14 crc kubenswrapper[4881]: E1211 08:17:14.944843 4881 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Dec 11 08:17:14 crc kubenswrapper[4881]: I1211 08:17:14.947948 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:14 crc kubenswrapper[4881]: I1211 08:17:14.948029 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:14 crc kubenswrapper[4881]: I1211 08:17:14.948048 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:14 crc kubenswrapper[4881]: I1211 08:17:14.948074 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:14 crc kubenswrapper[4881]: I1211 08:17:14.948094 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:14Z","lastTransitionTime":"2025-12-11T08:17:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:15 crc kubenswrapper[4881]: I1211 08:17:15.004672 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 11 08:17:15 crc kubenswrapper[4881]: I1211 08:17:15.004779 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 11 08:17:15 crc kubenswrapper[4881]: I1211 08:17:15.004693 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bzslm" Dec 11 08:17:15 crc kubenswrapper[4881]: E1211 08:17:15.004924 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 11 08:17:15 crc kubenswrapper[4881]: E1211 08:17:15.005147 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bzslm" podUID="3a9b30f4-30b0-4b16-9a4a-135d2e7a9beb" Dec 11 08:17:15 crc kubenswrapper[4881]: E1211 08:17:15.005245 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 11 08:17:15 crc kubenswrapper[4881]: I1211 08:17:15.051586 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:15 crc kubenswrapper[4881]: I1211 08:17:15.051646 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:15 crc kubenswrapper[4881]: I1211 08:17:15.051658 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:15 crc kubenswrapper[4881]: I1211 08:17:15.051679 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:15 crc kubenswrapper[4881]: I1211 08:17:15.051692 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:15Z","lastTransitionTime":"2025-12-11T08:17:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:15 crc kubenswrapper[4881]: I1211 08:17:15.158498 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:15 crc kubenswrapper[4881]: I1211 08:17:15.158605 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:15 crc kubenswrapper[4881]: I1211 08:17:15.158619 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:15 crc kubenswrapper[4881]: I1211 08:17:15.158746 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:15 crc kubenswrapper[4881]: I1211 08:17:15.158762 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:15Z","lastTransitionTime":"2025-12-11T08:17:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:15 crc kubenswrapper[4881]: I1211 08:17:15.262831 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:15 crc kubenswrapper[4881]: I1211 08:17:15.262930 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:15 crc kubenswrapper[4881]: I1211 08:17:15.262953 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:15 crc kubenswrapper[4881]: I1211 08:17:15.262979 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:15 crc kubenswrapper[4881]: I1211 08:17:15.262999 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:15Z","lastTransitionTime":"2025-12-11T08:17:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:15 crc kubenswrapper[4881]: I1211 08:17:15.366119 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:15 crc kubenswrapper[4881]: I1211 08:17:15.366200 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:15 crc kubenswrapper[4881]: I1211 08:17:15.366229 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:15 crc kubenswrapper[4881]: I1211 08:17:15.366248 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:15 crc kubenswrapper[4881]: I1211 08:17:15.366261 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:15Z","lastTransitionTime":"2025-12-11T08:17:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:15 crc kubenswrapper[4881]: I1211 08:17:15.470022 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:15 crc kubenswrapper[4881]: I1211 08:17:15.470102 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:15 crc kubenswrapper[4881]: I1211 08:17:15.470127 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:15 crc kubenswrapper[4881]: I1211 08:17:15.470171 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:15 crc kubenswrapper[4881]: I1211 08:17:15.470189 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:15Z","lastTransitionTime":"2025-12-11T08:17:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:15 crc kubenswrapper[4881]: I1211 08:17:15.573199 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:15 crc kubenswrapper[4881]: I1211 08:17:15.573264 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:15 crc kubenswrapper[4881]: I1211 08:17:15.573287 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:15 crc kubenswrapper[4881]: I1211 08:17:15.573316 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:15 crc kubenswrapper[4881]: I1211 08:17:15.573364 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:15Z","lastTransitionTime":"2025-12-11T08:17:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:15 crc kubenswrapper[4881]: I1211 08:17:15.676211 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:15 crc kubenswrapper[4881]: I1211 08:17:15.676294 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:15 crc kubenswrapper[4881]: I1211 08:17:15.676316 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:15 crc kubenswrapper[4881]: I1211 08:17:15.676379 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:15 crc kubenswrapper[4881]: I1211 08:17:15.676441 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:15Z","lastTransitionTime":"2025-12-11T08:17:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:15 crc kubenswrapper[4881]: I1211 08:17:15.778996 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:15 crc kubenswrapper[4881]: I1211 08:17:15.779054 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:15 crc kubenswrapper[4881]: I1211 08:17:15.779066 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:15 crc kubenswrapper[4881]: I1211 08:17:15.779083 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:15 crc kubenswrapper[4881]: I1211 08:17:15.779108 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:15Z","lastTransitionTime":"2025-12-11T08:17:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:15 crc kubenswrapper[4881]: I1211 08:17:15.882636 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:15 crc kubenswrapper[4881]: I1211 08:17:15.882703 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:15 crc kubenswrapper[4881]: I1211 08:17:15.882727 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:15 crc kubenswrapper[4881]: I1211 08:17:15.882757 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:15 crc kubenswrapper[4881]: I1211 08:17:15.882779 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:15Z","lastTransitionTime":"2025-12-11T08:17:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:15 crc kubenswrapper[4881]: I1211 08:17:15.986609 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:15 crc kubenswrapper[4881]: I1211 08:17:15.986689 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:15 crc kubenswrapper[4881]: I1211 08:17:15.986709 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:15 crc kubenswrapper[4881]: I1211 08:17:15.986740 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:15 crc kubenswrapper[4881]: I1211 08:17:15.986761 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:15Z","lastTransitionTime":"2025-12-11T08:17:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:16 crc kubenswrapper[4881]: I1211 08:17:16.005277 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 11 08:17:16 crc kubenswrapper[4881]: E1211 08:17:16.005542 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 11 08:17:16 crc kubenswrapper[4881]: I1211 08:17:16.088942 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:16 crc kubenswrapper[4881]: I1211 08:17:16.088984 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:16 crc kubenswrapper[4881]: I1211 08:17:16.088995 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:16 crc kubenswrapper[4881]: I1211 08:17:16.089034 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:16 crc kubenswrapper[4881]: I1211 08:17:16.089048 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:16Z","lastTransitionTime":"2025-12-11T08:17:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:16 crc kubenswrapper[4881]: I1211 08:17:16.191689 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:16 crc kubenswrapper[4881]: I1211 08:17:16.191738 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:16 crc kubenswrapper[4881]: I1211 08:17:16.191754 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:16 crc kubenswrapper[4881]: I1211 08:17:16.191773 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:16 crc kubenswrapper[4881]: I1211 08:17:16.191790 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:16Z","lastTransitionTime":"2025-12-11T08:17:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:16 crc kubenswrapper[4881]: I1211 08:17:16.295553 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:16 crc kubenswrapper[4881]: I1211 08:17:16.295608 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:16 crc kubenswrapper[4881]: I1211 08:17:16.295630 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:16 crc kubenswrapper[4881]: I1211 08:17:16.295659 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:16 crc kubenswrapper[4881]: I1211 08:17:16.295682 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:16Z","lastTransitionTime":"2025-12-11T08:17:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:16 crc kubenswrapper[4881]: I1211 08:17:16.397705 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:16 crc kubenswrapper[4881]: I1211 08:17:16.397801 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:16 crc kubenswrapper[4881]: I1211 08:17:16.397819 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:16 crc kubenswrapper[4881]: I1211 08:17:16.397843 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:16 crc kubenswrapper[4881]: I1211 08:17:16.397864 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:16Z","lastTransitionTime":"2025-12-11T08:17:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:16 crc kubenswrapper[4881]: I1211 08:17:16.500919 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:16 crc kubenswrapper[4881]: I1211 08:17:16.500989 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:16 crc kubenswrapper[4881]: I1211 08:17:16.501002 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:16 crc kubenswrapper[4881]: I1211 08:17:16.501015 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:16 crc kubenswrapper[4881]: I1211 08:17:16.501024 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:16Z","lastTransitionTime":"2025-12-11T08:17:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:16 crc kubenswrapper[4881]: I1211 08:17:16.604202 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:16 crc kubenswrapper[4881]: I1211 08:17:16.604259 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:16 crc kubenswrapper[4881]: I1211 08:17:16.604270 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:16 crc kubenswrapper[4881]: I1211 08:17:16.604290 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:16 crc kubenswrapper[4881]: I1211 08:17:16.604301 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:16Z","lastTransitionTime":"2025-12-11T08:17:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:16 crc kubenswrapper[4881]: I1211 08:17:16.707941 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:16 crc kubenswrapper[4881]: I1211 08:17:16.708042 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:16 crc kubenswrapper[4881]: I1211 08:17:16.708070 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:16 crc kubenswrapper[4881]: I1211 08:17:16.708112 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:16 crc kubenswrapper[4881]: I1211 08:17:16.708155 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:16Z","lastTransitionTime":"2025-12-11T08:17:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:16 crc kubenswrapper[4881]: I1211 08:17:16.811049 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:16 crc kubenswrapper[4881]: I1211 08:17:16.811096 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:16 crc kubenswrapper[4881]: I1211 08:17:16.811112 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:16 crc kubenswrapper[4881]: I1211 08:17:16.811127 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:16 crc kubenswrapper[4881]: I1211 08:17:16.811140 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:16Z","lastTransitionTime":"2025-12-11T08:17:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:16 crc kubenswrapper[4881]: I1211 08:17:16.914225 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:16 crc kubenswrapper[4881]: I1211 08:17:16.914288 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:16 crc kubenswrapper[4881]: I1211 08:17:16.914311 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:16 crc kubenswrapper[4881]: I1211 08:17:16.914375 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:16 crc kubenswrapper[4881]: I1211 08:17:16.914425 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:16Z","lastTransitionTime":"2025-12-11T08:17:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:17 crc kubenswrapper[4881]: I1211 08:17:17.005307 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bzslm" Dec 11 08:17:17 crc kubenswrapper[4881]: I1211 08:17:17.005321 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 11 08:17:17 crc kubenswrapper[4881]: E1211 08:17:17.005482 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bzslm" podUID="3a9b30f4-30b0-4b16-9a4a-135d2e7a9beb" Dec 11 08:17:17 crc kubenswrapper[4881]: I1211 08:17:17.006405 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 11 08:17:17 crc kubenswrapper[4881]: E1211 08:17:17.007386 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 11 08:17:17 crc kubenswrapper[4881]: E1211 08:17:17.007793 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 11 08:17:17 crc kubenswrapper[4881]: I1211 08:17:17.018003 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:17 crc kubenswrapper[4881]: I1211 08:17:17.018061 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:17 crc kubenswrapper[4881]: I1211 08:17:17.018078 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:17 crc kubenswrapper[4881]: I1211 08:17:17.018099 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:17 crc kubenswrapper[4881]: I1211 08:17:17.018117 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:17Z","lastTransitionTime":"2025-12-11T08:17:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:17 crc kubenswrapper[4881]: I1211 08:17:17.120629 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:17 crc kubenswrapper[4881]: I1211 08:17:17.120759 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:17 crc kubenswrapper[4881]: I1211 08:17:17.120779 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:17 crc kubenswrapper[4881]: I1211 08:17:17.120802 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:17 crc kubenswrapper[4881]: I1211 08:17:17.120817 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:17Z","lastTransitionTime":"2025-12-11T08:17:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:17 crc kubenswrapper[4881]: I1211 08:17:17.223559 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:17 crc kubenswrapper[4881]: I1211 08:17:17.223611 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:17 crc kubenswrapper[4881]: I1211 08:17:17.223624 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:17 crc kubenswrapper[4881]: I1211 08:17:17.223643 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:17 crc kubenswrapper[4881]: I1211 08:17:17.223657 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:17Z","lastTransitionTime":"2025-12-11T08:17:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:17 crc kubenswrapper[4881]: I1211 08:17:17.326296 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:17 crc kubenswrapper[4881]: I1211 08:17:17.326369 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:17 crc kubenswrapper[4881]: I1211 08:17:17.326385 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:17 crc kubenswrapper[4881]: I1211 08:17:17.326403 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:17 crc kubenswrapper[4881]: I1211 08:17:17.326416 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:17Z","lastTransitionTime":"2025-12-11T08:17:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:17 crc kubenswrapper[4881]: I1211 08:17:17.429845 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:17 crc kubenswrapper[4881]: I1211 08:17:17.429897 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:17 crc kubenswrapper[4881]: I1211 08:17:17.429909 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:17 crc kubenswrapper[4881]: I1211 08:17:17.429927 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:17 crc kubenswrapper[4881]: I1211 08:17:17.429939 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:17Z","lastTransitionTime":"2025-12-11T08:17:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:17 crc kubenswrapper[4881]: I1211 08:17:17.532987 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:17 crc kubenswrapper[4881]: I1211 08:17:17.533019 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:17 crc kubenswrapper[4881]: I1211 08:17:17.533027 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:17 crc kubenswrapper[4881]: I1211 08:17:17.533040 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:17 crc kubenswrapper[4881]: I1211 08:17:17.533049 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:17Z","lastTransitionTime":"2025-12-11T08:17:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:17 crc kubenswrapper[4881]: I1211 08:17:17.636757 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:17 crc kubenswrapper[4881]: I1211 08:17:17.636842 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:17 crc kubenswrapper[4881]: I1211 08:17:17.636870 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:17 crc kubenswrapper[4881]: I1211 08:17:17.636907 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:17 crc kubenswrapper[4881]: I1211 08:17:17.636934 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:17Z","lastTransitionTime":"2025-12-11T08:17:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:17 crc kubenswrapper[4881]: I1211 08:17:17.739681 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:17 crc kubenswrapper[4881]: I1211 08:17:17.739742 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:17 crc kubenswrapper[4881]: I1211 08:17:17.739765 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:17 crc kubenswrapper[4881]: I1211 08:17:17.739795 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:17 crc kubenswrapper[4881]: I1211 08:17:17.739818 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:17Z","lastTransitionTime":"2025-12-11T08:17:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:17 crc kubenswrapper[4881]: I1211 08:17:17.843513 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:17 crc kubenswrapper[4881]: I1211 08:17:17.843584 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:17 crc kubenswrapper[4881]: I1211 08:17:17.843601 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:17 crc kubenswrapper[4881]: I1211 08:17:17.843625 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:17 crc kubenswrapper[4881]: I1211 08:17:17.843648 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:17Z","lastTransitionTime":"2025-12-11T08:17:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:17 crc kubenswrapper[4881]: I1211 08:17:17.946201 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:17 crc kubenswrapper[4881]: I1211 08:17:17.946262 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:17 crc kubenswrapper[4881]: I1211 08:17:17.946272 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:17 crc kubenswrapper[4881]: I1211 08:17:17.946290 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:17 crc kubenswrapper[4881]: I1211 08:17:17.946302 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:17Z","lastTransitionTime":"2025-12-11T08:17:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:18 crc kubenswrapper[4881]: I1211 08:17:18.005187 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 11 08:17:18 crc kubenswrapper[4881]: E1211 08:17:18.005393 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 11 08:17:18 crc kubenswrapper[4881]: I1211 08:17:18.049059 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:18 crc kubenswrapper[4881]: I1211 08:17:18.049105 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:18 crc kubenswrapper[4881]: I1211 08:17:18.049118 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:18 crc kubenswrapper[4881]: I1211 08:17:18.049134 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:18 crc kubenswrapper[4881]: I1211 08:17:18.049144 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:18Z","lastTransitionTime":"2025-12-11T08:17:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:18 crc kubenswrapper[4881]: I1211 08:17:18.152376 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:18 crc kubenswrapper[4881]: I1211 08:17:18.152610 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:18 crc kubenswrapper[4881]: I1211 08:17:18.152620 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:18 crc kubenswrapper[4881]: I1211 08:17:18.152636 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:18 crc kubenswrapper[4881]: I1211 08:17:18.152650 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:18Z","lastTransitionTime":"2025-12-11T08:17:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:18 crc kubenswrapper[4881]: I1211 08:17:18.256159 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:18 crc kubenswrapper[4881]: I1211 08:17:18.256240 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:18 crc kubenswrapper[4881]: I1211 08:17:18.256261 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:18 crc kubenswrapper[4881]: I1211 08:17:18.256292 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:18 crc kubenswrapper[4881]: I1211 08:17:18.256313 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:18Z","lastTransitionTime":"2025-12-11T08:17:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:18 crc kubenswrapper[4881]: I1211 08:17:18.359749 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:18 crc kubenswrapper[4881]: I1211 08:17:18.359799 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:18 crc kubenswrapper[4881]: I1211 08:17:18.359808 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:18 crc kubenswrapper[4881]: I1211 08:17:18.359825 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:18 crc kubenswrapper[4881]: I1211 08:17:18.359837 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:18Z","lastTransitionTime":"2025-12-11T08:17:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:18 crc kubenswrapper[4881]: I1211 08:17:18.462570 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:18 crc kubenswrapper[4881]: I1211 08:17:18.462641 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:18 crc kubenswrapper[4881]: I1211 08:17:18.462659 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:18 crc kubenswrapper[4881]: I1211 08:17:18.462692 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:18 crc kubenswrapper[4881]: I1211 08:17:18.462716 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:18Z","lastTransitionTime":"2025-12-11T08:17:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:18 crc kubenswrapper[4881]: I1211 08:17:18.565798 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:18 crc kubenswrapper[4881]: I1211 08:17:18.565955 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:18 crc kubenswrapper[4881]: I1211 08:17:18.565969 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:18 crc kubenswrapper[4881]: I1211 08:17:18.565986 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:18 crc kubenswrapper[4881]: I1211 08:17:18.565996 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:18Z","lastTransitionTime":"2025-12-11T08:17:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:18 crc kubenswrapper[4881]: I1211 08:17:18.668955 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:18 crc kubenswrapper[4881]: I1211 08:17:18.669029 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:18 crc kubenswrapper[4881]: I1211 08:17:18.669047 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:18 crc kubenswrapper[4881]: I1211 08:17:18.669066 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:18 crc kubenswrapper[4881]: I1211 08:17:18.669078 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:18Z","lastTransitionTime":"2025-12-11T08:17:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:18 crc kubenswrapper[4881]: I1211 08:17:18.772700 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:18 crc kubenswrapper[4881]: I1211 08:17:18.772763 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:18 crc kubenswrapper[4881]: I1211 08:17:18.772787 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:18 crc kubenswrapper[4881]: I1211 08:17:18.772816 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:18 crc kubenswrapper[4881]: I1211 08:17:18.772837 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:18Z","lastTransitionTime":"2025-12-11T08:17:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:18 crc kubenswrapper[4881]: I1211 08:17:18.874759 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:18 crc kubenswrapper[4881]: I1211 08:17:18.874812 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:18 crc kubenswrapper[4881]: I1211 08:17:18.874825 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:18 crc kubenswrapper[4881]: I1211 08:17:18.874845 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:18 crc kubenswrapper[4881]: I1211 08:17:18.874857 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:18Z","lastTransitionTime":"2025-12-11T08:17:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:18 crc kubenswrapper[4881]: I1211 08:17:18.979092 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:18 crc kubenswrapper[4881]: I1211 08:17:18.979135 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:18 crc kubenswrapper[4881]: I1211 08:17:18.979144 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:18 crc kubenswrapper[4881]: I1211 08:17:18.979160 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:18 crc kubenswrapper[4881]: I1211 08:17:18.979170 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:18Z","lastTransitionTime":"2025-12-11T08:17:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:19 crc kubenswrapper[4881]: I1211 08:17:19.004912 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 11 08:17:19 crc kubenswrapper[4881]: I1211 08:17:19.005046 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 11 08:17:19 crc kubenswrapper[4881]: E1211 08:17:19.005678 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 11 08:17:19 crc kubenswrapper[4881]: E1211 08:17:19.005799 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 11 08:17:19 crc kubenswrapper[4881]: I1211 08:17:19.005274 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bzslm" Dec 11 08:17:19 crc kubenswrapper[4881]: E1211 08:17:19.006286 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bzslm" podUID="3a9b30f4-30b0-4b16-9a4a-135d2e7a9beb" Dec 11 08:17:19 crc kubenswrapper[4881]: I1211 08:17:19.082395 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:19 crc kubenswrapper[4881]: I1211 08:17:19.082435 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:19 crc kubenswrapper[4881]: I1211 08:17:19.082443 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:19 crc kubenswrapper[4881]: I1211 08:17:19.082458 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:19 crc kubenswrapper[4881]: I1211 08:17:19.082467 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:19Z","lastTransitionTime":"2025-12-11T08:17:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:19 crc kubenswrapper[4881]: I1211 08:17:19.186766 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:19 crc kubenswrapper[4881]: I1211 08:17:19.186847 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:19 crc kubenswrapper[4881]: I1211 08:17:19.186862 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:19 crc kubenswrapper[4881]: I1211 08:17:19.186884 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:19 crc kubenswrapper[4881]: I1211 08:17:19.186900 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:19Z","lastTransitionTime":"2025-12-11T08:17:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:19 crc kubenswrapper[4881]: I1211 08:17:19.290739 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:19 crc kubenswrapper[4881]: I1211 08:17:19.290815 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:19 crc kubenswrapper[4881]: I1211 08:17:19.290834 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:19 crc kubenswrapper[4881]: I1211 08:17:19.290861 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:19 crc kubenswrapper[4881]: I1211 08:17:19.290880 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:19Z","lastTransitionTime":"2025-12-11T08:17:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:19 crc kubenswrapper[4881]: I1211 08:17:19.394832 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:19 crc kubenswrapper[4881]: I1211 08:17:19.394902 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:19 crc kubenswrapper[4881]: I1211 08:17:19.394928 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:19 crc kubenswrapper[4881]: I1211 08:17:19.394962 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:19 crc kubenswrapper[4881]: I1211 08:17:19.394987 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:19Z","lastTransitionTime":"2025-12-11T08:17:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:19 crc kubenswrapper[4881]: I1211 08:17:19.499068 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:19 crc kubenswrapper[4881]: I1211 08:17:19.499128 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:19 crc kubenswrapper[4881]: I1211 08:17:19.499146 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:19 crc kubenswrapper[4881]: I1211 08:17:19.499170 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:19 crc kubenswrapper[4881]: I1211 08:17:19.499188 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:19Z","lastTransitionTime":"2025-12-11T08:17:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:19 crc kubenswrapper[4881]: I1211 08:17:19.602384 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:19 crc kubenswrapper[4881]: I1211 08:17:19.602445 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:19 crc kubenswrapper[4881]: I1211 08:17:19.602462 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:19 crc kubenswrapper[4881]: I1211 08:17:19.602483 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:19 crc kubenswrapper[4881]: I1211 08:17:19.602497 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:19Z","lastTransitionTime":"2025-12-11T08:17:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:19 crc kubenswrapper[4881]: I1211 08:17:19.705860 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:19 crc kubenswrapper[4881]: I1211 08:17:19.705919 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:19 crc kubenswrapper[4881]: I1211 08:17:19.705936 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:19 crc kubenswrapper[4881]: I1211 08:17:19.705959 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:19 crc kubenswrapper[4881]: I1211 08:17:19.705977 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:19Z","lastTransitionTime":"2025-12-11T08:17:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:19 crc kubenswrapper[4881]: I1211 08:17:19.808795 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:19 crc kubenswrapper[4881]: I1211 08:17:19.808845 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:19 crc kubenswrapper[4881]: I1211 08:17:19.808860 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:19 crc kubenswrapper[4881]: I1211 08:17:19.808878 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:19 crc kubenswrapper[4881]: I1211 08:17:19.808892 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:19Z","lastTransitionTime":"2025-12-11T08:17:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:19 crc kubenswrapper[4881]: I1211 08:17:19.910760 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:19 crc kubenswrapper[4881]: I1211 08:17:19.910796 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:19 crc kubenswrapper[4881]: I1211 08:17:19.910806 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:19 crc kubenswrapper[4881]: I1211 08:17:19.910820 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:19 crc kubenswrapper[4881]: I1211 08:17:19.910828 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:19Z","lastTransitionTime":"2025-12-11T08:17:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:20 crc kubenswrapper[4881]: I1211 08:17:20.005109 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 11 08:17:20 crc kubenswrapper[4881]: E1211 08:17:20.005299 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 11 08:17:20 crc kubenswrapper[4881]: I1211 08:17:20.006459 4881 scope.go:117] "RemoveContainer" containerID="6293b9bfc7cfe65f9d9c2cb46c40ba8aba252b7f4dfec7b3552d3a9271bfa25e" Dec 11 08:17:20 crc kubenswrapper[4881]: I1211 08:17:20.012793 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:20 crc kubenswrapper[4881]: I1211 08:17:20.012844 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:20 crc kubenswrapper[4881]: I1211 08:17:20.012853 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:20 crc kubenswrapper[4881]: I1211 08:17:20.012868 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:20 crc kubenswrapper[4881]: I1211 08:17:20.012881 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:20Z","lastTransitionTime":"2025-12-11T08:17:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:20 crc kubenswrapper[4881]: I1211 08:17:20.115594 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:20 crc kubenswrapper[4881]: I1211 08:17:20.115641 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:20 crc kubenswrapper[4881]: I1211 08:17:20.115655 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:20 crc kubenswrapper[4881]: I1211 08:17:20.115676 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:20 crc kubenswrapper[4881]: I1211 08:17:20.115690 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:20Z","lastTransitionTime":"2025-12-11T08:17:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:20 crc kubenswrapper[4881]: I1211 08:17:20.218552 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:20 crc kubenswrapper[4881]: I1211 08:17:20.218599 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:20 crc kubenswrapper[4881]: I1211 08:17:20.218610 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:20 crc kubenswrapper[4881]: I1211 08:17:20.218627 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:20 crc kubenswrapper[4881]: I1211 08:17:20.218639 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:20Z","lastTransitionTime":"2025-12-11T08:17:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:20 crc kubenswrapper[4881]: I1211 08:17:20.321458 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:20 crc kubenswrapper[4881]: I1211 08:17:20.321516 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:20 crc kubenswrapper[4881]: I1211 08:17:20.321534 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:20 crc kubenswrapper[4881]: I1211 08:17:20.321559 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:20 crc kubenswrapper[4881]: I1211 08:17:20.321575 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:20Z","lastTransitionTime":"2025-12-11T08:17:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:20 crc kubenswrapper[4881]: I1211 08:17:20.425791 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:20 crc kubenswrapper[4881]: I1211 08:17:20.425856 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:20 crc kubenswrapper[4881]: I1211 08:17:20.425873 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:20 crc kubenswrapper[4881]: I1211 08:17:20.425896 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:20 crc kubenswrapper[4881]: I1211 08:17:20.425916 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:20Z","lastTransitionTime":"2025-12-11T08:17:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:20 crc kubenswrapper[4881]: I1211 08:17:20.528901 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:20 crc kubenswrapper[4881]: I1211 08:17:20.528965 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:20 crc kubenswrapper[4881]: I1211 08:17:20.528987 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:20 crc kubenswrapper[4881]: I1211 08:17:20.529018 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:20 crc kubenswrapper[4881]: I1211 08:17:20.529051 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:20Z","lastTransitionTime":"2025-12-11T08:17:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:20 crc kubenswrapper[4881]: I1211 08:17:20.631755 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:20 crc kubenswrapper[4881]: I1211 08:17:20.631849 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:20 crc kubenswrapper[4881]: I1211 08:17:20.631864 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:20 crc kubenswrapper[4881]: I1211 08:17:20.631883 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:20 crc kubenswrapper[4881]: I1211 08:17:20.631895 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:20Z","lastTransitionTime":"2025-12-11T08:17:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:20 crc kubenswrapper[4881]: I1211 08:17:20.734483 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:20 crc kubenswrapper[4881]: I1211 08:17:20.734551 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:20 crc kubenswrapper[4881]: I1211 08:17:20.734575 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:20 crc kubenswrapper[4881]: I1211 08:17:20.734617 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:20 crc kubenswrapper[4881]: I1211 08:17:20.734637 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:20Z","lastTransitionTime":"2025-12-11T08:17:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:20 crc kubenswrapper[4881]: I1211 08:17:20.825463 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 11 08:17:20 crc kubenswrapper[4881]: E1211 08:17:20.825734 4881 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-11 08:18:24.825683599 +0000 UTC m=+153.203052336 (durationBeforeRetry 1m4s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 08:17:20 crc kubenswrapper[4881]: I1211 08:17:20.838256 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:20 crc kubenswrapper[4881]: I1211 08:17:20.838300 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:20 crc kubenswrapper[4881]: I1211 08:17:20.838318 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:20 crc kubenswrapper[4881]: I1211 08:17:20.838368 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:20 crc kubenswrapper[4881]: I1211 08:17:20.838391 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:20Z","lastTransitionTime":"2025-12-11T08:17:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:20 crc kubenswrapper[4881]: I1211 08:17:20.927415 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 11 08:17:20 crc kubenswrapper[4881]: I1211 08:17:20.927480 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 11 08:17:20 crc kubenswrapper[4881]: I1211 08:17:20.927504 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 11 08:17:20 crc kubenswrapper[4881]: I1211 08:17:20.927533 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 11 08:17:20 crc kubenswrapper[4881]: E1211 08:17:20.927835 4881 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Dec 11 08:17:20 crc kubenswrapper[4881]: E1211 08:17:20.927981 4881 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-12-11 08:18:24.927944776 +0000 UTC m=+153.305313483 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Dec 11 08:17:20 crc kubenswrapper[4881]: E1211 08:17:20.928459 4881 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Dec 11 08:17:20 crc kubenswrapper[4881]: E1211 08:17:20.928525 4881 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Dec 11 08:17:20 crc kubenswrapper[4881]: E1211 08:17:20.928552 4881 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Dec 11 08:17:20 crc kubenswrapper[4881]: E1211 08:17:20.928569 4881 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Dec 11 08:17:20 crc kubenswrapper[4881]: E1211 08:17:20.928631 4881 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Dec 11 08:17:20 crc kubenswrapper[4881]: E1211 08:17:20.928655 4881 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 11 08:17:20 crc kubenswrapper[4881]: E1211 08:17:20.928582 4881 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 11 08:17:20 crc kubenswrapper[4881]: E1211 08:17:20.928594 4881 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-12-11 08:18:24.92856977 +0000 UTC m=+153.305938487 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Dec 11 08:17:20 crc kubenswrapper[4881]: E1211 08:17:20.928800 4881 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-12-11 08:18:24.928781905 +0000 UTC m=+153.306150772 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 11 08:17:20 crc kubenswrapper[4881]: E1211 08:17:20.928822 4881 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-12-11 08:18:24.928811306 +0000 UTC m=+153.306180213 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 11 08:17:20 crc kubenswrapper[4881]: I1211 08:17:20.941700 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:20 crc kubenswrapper[4881]: I1211 08:17:20.941765 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:20 crc kubenswrapper[4881]: I1211 08:17:20.941789 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:20 crc kubenswrapper[4881]: I1211 08:17:20.941816 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:20 crc kubenswrapper[4881]: I1211 08:17:20.941843 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:20Z","lastTransitionTime":"2025-12-11T08:17:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:21 crc kubenswrapper[4881]: I1211 08:17:21.005410 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 11 08:17:21 crc kubenswrapper[4881]: E1211 08:17:21.005528 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 11 08:17:21 crc kubenswrapper[4881]: I1211 08:17:21.005645 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bzslm" Dec 11 08:17:21 crc kubenswrapper[4881]: I1211 08:17:21.005655 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 11 08:17:21 crc kubenswrapper[4881]: E1211 08:17:21.005802 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bzslm" podUID="3a9b30f4-30b0-4b16-9a4a-135d2e7a9beb" Dec 11 08:17:21 crc kubenswrapper[4881]: E1211 08:17:21.005859 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 11 08:17:21 crc kubenswrapper[4881]: I1211 08:17:21.044814 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:21 crc kubenswrapper[4881]: I1211 08:17:21.044877 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:21 crc kubenswrapper[4881]: I1211 08:17:21.044917 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:21 crc kubenswrapper[4881]: I1211 08:17:21.044936 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:21 crc kubenswrapper[4881]: I1211 08:17:21.044949 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:21Z","lastTransitionTime":"2025-12-11T08:17:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:21 crc kubenswrapper[4881]: I1211 08:17:21.147281 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:21 crc kubenswrapper[4881]: I1211 08:17:21.147324 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:21 crc kubenswrapper[4881]: I1211 08:17:21.147357 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:21 crc kubenswrapper[4881]: I1211 08:17:21.147377 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:21 crc kubenswrapper[4881]: I1211 08:17:21.147390 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:21Z","lastTransitionTime":"2025-12-11T08:17:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:21 crc kubenswrapper[4881]: I1211 08:17:21.250624 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:21 crc kubenswrapper[4881]: I1211 08:17:21.250679 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:21 crc kubenswrapper[4881]: I1211 08:17:21.250694 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:21 crc kubenswrapper[4881]: I1211 08:17:21.250718 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:21 crc kubenswrapper[4881]: I1211 08:17:21.250733 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:21Z","lastTransitionTime":"2025-12-11T08:17:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:21 crc kubenswrapper[4881]: I1211 08:17:21.354188 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:21 crc kubenswrapper[4881]: I1211 08:17:21.354237 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:21 crc kubenswrapper[4881]: I1211 08:17:21.354254 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:21 crc kubenswrapper[4881]: I1211 08:17:21.354276 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:21 crc kubenswrapper[4881]: I1211 08:17:21.354287 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:21Z","lastTransitionTime":"2025-12-11T08:17:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:21 crc kubenswrapper[4881]: I1211 08:17:21.456663 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:21 crc kubenswrapper[4881]: I1211 08:17:21.456742 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:21 crc kubenswrapper[4881]: I1211 08:17:21.456754 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:21 crc kubenswrapper[4881]: I1211 08:17:21.456769 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:21 crc kubenswrapper[4881]: I1211 08:17:21.456779 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:21Z","lastTransitionTime":"2025-12-11T08:17:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:21 crc kubenswrapper[4881]: I1211 08:17:21.514146 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-wf8q8_f14cc110-e74f-4cb7-a998-041e3f9b537b/ovnkube-controller/2.log" Dec 11 08:17:21 crc kubenswrapper[4881]: I1211 08:17:21.517484 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wf8q8" event={"ID":"f14cc110-e74f-4cb7-a998-041e3f9b537b","Type":"ContainerStarted","Data":"4613d67b59382894af01ae31e8f1a60355dd4362ea6499cdc10741d2abfd078b"} Dec 11 08:17:21 crc kubenswrapper[4881]: I1211 08:17:21.559787 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:21 crc kubenswrapper[4881]: I1211 08:17:21.559839 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:21 crc kubenswrapper[4881]: I1211 08:17:21.559856 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:21 crc kubenswrapper[4881]: I1211 08:17:21.559878 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:21 crc kubenswrapper[4881]: I1211 08:17:21.559895 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:21Z","lastTransitionTime":"2025-12-11T08:17:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:21 crc kubenswrapper[4881]: I1211 08:17:21.662951 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:21 crc kubenswrapper[4881]: I1211 08:17:21.663007 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:21 crc kubenswrapper[4881]: I1211 08:17:21.663021 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:21 crc kubenswrapper[4881]: I1211 08:17:21.663041 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:21 crc kubenswrapper[4881]: I1211 08:17:21.663053 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:21Z","lastTransitionTime":"2025-12-11T08:17:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:21 crc kubenswrapper[4881]: I1211 08:17:21.765724 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:21 crc kubenswrapper[4881]: I1211 08:17:21.765753 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:21 crc kubenswrapper[4881]: I1211 08:17:21.765761 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:21 crc kubenswrapper[4881]: I1211 08:17:21.765772 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:21 crc kubenswrapper[4881]: I1211 08:17:21.765781 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:21Z","lastTransitionTime":"2025-12-11T08:17:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:21 crc kubenswrapper[4881]: I1211 08:17:21.868745 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:21 crc kubenswrapper[4881]: I1211 08:17:21.868789 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:21 crc kubenswrapper[4881]: I1211 08:17:21.868799 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:21 crc kubenswrapper[4881]: I1211 08:17:21.868814 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:21 crc kubenswrapper[4881]: I1211 08:17:21.868824 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:21Z","lastTransitionTime":"2025-12-11T08:17:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:21 crc kubenswrapper[4881]: I1211 08:17:21.971513 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:21 crc kubenswrapper[4881]: I1211 08:17:21.971564 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:21 crc kubenswrapper[4881]: I1211 08:17:21.971580 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:21 crc kubenswrapper[4881]: I1211 08:17:21.971604 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:21 crc kubenswrapper[4881]: I1211 08:17:21.971619 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:21Z","lastTransitionTime":"2025-12-11T08:17:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:22 crc kubenswrapper[4881]: I1211 08:17:22.004970 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 11 08:17:22 crc kubenswrapper[4881]: E1211 08:17:22.005173 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 11 08:17:22 crc kubenswrapper[4881]: I1211 08:17:22.074284 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:22 crc kubenswrapper[4881]: I1211 08:17:22.074375 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:22 crc kubenswrapper[4881]: I1211 08:17:22.074397 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:22 crc kubenswrapper[4881]: I1211 08:17:22.074428 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:22 crc kubenswrapper[4881]: I1211 08:17:22.074450 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:22Z","lastTransitionTime":"2025-12-11T08:17:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:22 crc kubenswrapper[4881]: I1211 08:17:22.177752 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:22 crc kubenswrapper[4881]: I1211 08:17:22.177817 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:22 crc kubenswrapper[4881]: I1211 08:17:22.177835 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:22 crc kubenswrapper[4881]: I1211 08:17:22.177860 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:22 crc kubenswrapper[4881]: I1211 08:17:22.177877 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:22Z","lastTransitionTime":"2025-12-11T08:17:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:22 crc kubenswrapper[4881]: I1211 08:17:22.280233 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:22 crc kubenswrapper[4881]: I1211 08:17:22.280295 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:22 crc kubenswrapper[4881]: I1211 08:17:22.280305 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:22 crc kubenswrapper[4881]: I1211 08:17:22.280327 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:22 crc kubenswrapper[4881]: I1211 08:17:22.280353 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:22Z","lastTransitionTime":"2025-12-11T08:17:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:22 crc kubenswrapper[4881]: I1211 08:17:22.384106 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:22 crc kubenswrapper[4881]: I1211 08:17:22.384173 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:22 crc kubenswrapper[4881]: I1211 08:17:22.384192 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:22 crc kubenswrapper[4881]: I1211 08:17:22.384219 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:22 crc kubenswrapper[4881]: I1211 08:17:22.384241 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:22Z","lastTransitionTime":"2025-12-11T08:17:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:22 crc kubenswrapper[4881]: I1211 08:17:22.486798 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:22 crc kubenswrapper[4881]: I1211 08:17:22.486847 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:22 crc kubenswrapper[4881]: I1211 08:17:22.486862 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:22 crc kubenswrapper[4881]: I1211 08:17:22.486883 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:22 crc kubenswrapper[4881]: I1211 08:17:22.486899 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:22Z","lastTransitionTime":"2025-12-11T08:17:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:22 crc kubenswrapper[4881]: I1211 08:17:22.523716 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-wf8q8_f14cc110-e74f-4cb7-a998-041e3f9b537b/ovnkube-controller/3.log" Dec 11 08:17:22 crc kubenswrapper[4881]: I1211 08:17:22.524773 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-wf8q8_f14cc110-e74f-4cb7-a998-041e3f9b537b/ovnkube-controller/2.log" Dec 11 08:17:22 crc kubenswrapper[4881]: I1211 08:17:22.528463 4881 generic.go:334] "Generic (PLEG): container finished" podID="f14cc110-e74f-4cb7-a998-041e3f9b537b" containerID="4613d67b59382894af01ae31e8f1a60355dd4362ea6499cdc10741d2abfd078b" exitCode=1 Dec 11 08:17:22 crc kubenswrapper[4881]: I1211 08:17:22.528520 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wf8q8" event={"ID":"f14cc110-e74f-4cb7-a998-041e3f9b537b","Type":"ContainerDied","Data":"4613d67b59382894af01ae31e8f1a60355dd4362ea6499cdc10741d2abfd078b"} Dec 11 08:17:22 crc kubenswrapper[4881]: I1211 08:17:22.528555 4881 scope.go:117] "RemoveContainer" containerID="6293b9bfc7cfe65f9d9c2cb46c40ba8aba252b7f4dfec7b3552d3a9271bfa25e" Dec 11 08:17:22 crc kubenswrapper[4881]: I1211 08:17:22.529427 4881 scope.go:117] "RemoveContainer" containerID="4613d67b59382894af01ae31e8f1a60355dd4362ea6499cdc10741d2abfd078b" Dec 11 08:17:22 crc kubenswrapper[4881]: E1211 08:17:22.529711 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-wf8q8_openshift-ovn-kubernetes(f14cc110-e74f-4cb7-a998-041e3f9b537b)\"" pod="openshift-ovn-kubernetes/ovnkube-node-wf8q8" podUID="f14cc110-e74f-4cb7-a998-041e3f9b537b" Dec 11 08:17:22 crc kubenswrapper[4881]: I1211 08:17:22.548242 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-cwhxk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"afd0cc21-e31c-47c9-a598-cd93dde96121\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0f0031f6a7c33c9d1c538ec8384083ab5bc7b7fcda9c01c8e5c922a7e375a7e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://44d5ca755969b2434115259f141a9f8f17da8e0c1bef5b61c48475e09dc91601\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://44d5ca755969b2434115259f141a9f8f17da8e0c1bef5b61c48475e09dc91601\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://18c881494d5238565dfdb69e61ecdebb725f60a14c0c651e0bcc65cb2aff297a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://18c881494d5238565dfdb69e61ecdebb725f60a14c0c651e0bcc65cb2aff297a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a13ad4683fb404bf7d973528489e3c39e33c2991d86a80875dab067023626584\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a13ad4683fb404bf7d973528489e3c39e33c2991d86a80875dab067023626584\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b84c7832c1d1df15202c858e63e3d97319f2014f413f98a9d332a3f89a11210\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b84c7832c1d1df15202c858e63e3d97319f2014f413f98a9d332a3f89a11210\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb6df865673b8073f0fa13525833ae978ff17856a9b4149d549e177bc5d9a494\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bb6df865673b8073f0fa13525833ae978ff17856a9b4149d549e177bc5d9a494\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8a6eb6e7ccd64f8d181fe104b44a4299f9b28d0409e46175da23144ef88e4129\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8a6eb6e7ccd64f8d181fe104b44a4299f9b28d0409e46175da23144ef88e4129\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-cwhxk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:22Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:22 crc kubenswrapper[4881]: I1211 08:17:22.563765 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"56d69133-af36-4cbd-af7d-3a58cc4dd8ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b73c9948845a28d7c3cb81e94d962a4f11ac061f4108a18f46d451a554e9adb7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7rwh4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b0321657968185975569a4bf8037ab956988b13493feec0be9c439fbe6c20707\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7rwh4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:21Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-z9nnh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:22Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:22 crc kubenswrapper[4881]: I1211 08:17:22.575243 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-kqw5r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"71774618-27c8-499d-83d9-e88693c86758\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://136c88f0cab9d7a47a8c5925592e8cc09b62e515258829cc096df9ad3bb30615\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6btfk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c48e4553e3eb00f1e725dd9cacf2b25360544772ef82d6846361a4b7c44c6244\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6btfk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-kqw5r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:22Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:22 crc kubenswrapper[4881]: I1211 08:17:22.586552 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-bzslm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3a9b30f4-30b0-4b16-9a4a-135d2e7a9beb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:35Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:35Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:35Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-k86hj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-k86hj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:35Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-bzslm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:22Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:22 crc kubenswrapper[4881]: I1211 08:17:22.589803 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:22 crc kubenswrapper[4881]: I1211 08:17:22.589848 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:22 crc kubenswrapper[4881]: I1211 08:17:22.589862 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:22 crc kubenswrapper[4881]: I1211 08:17:22.589882 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:22 crc kubenswrapper[4881]: I1211 08:17:22.589899 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:22Z","lastTransitionTime":"2025-12-11T08:17:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:22 crc kubenswrapper[4881]: I1211 08:17:22.599056 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:22Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:22 crc kubenswrapper[4881]: I1211 08:17:22.611577 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6ec4e8a28a722463111eeb961e6e80d1f8ef1b776c40fc212a2ca5aaf0c43b63\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6eff6a04afb607c6248352793d1205ae80c194a77222a234d586925a8ca8cd00\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:22Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:22 crc kubenswrapper[4881]: I1211 08:17:22.622655 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:22Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:22 crc kubenswrapper[4881]: I1211 08:17:22.636105 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-g8jhd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"368e635e-0e63-4202-b9e4-4a3a85c6f30c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:17:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:17:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://472f02e542c67bbd11145db9b59f2bae1dc688d45e95099b17a33fa1e27dbac8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f17445543c4ae20340b00c834ecfa381b0d9624196a6da869ea823ee99df132f\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-11T08:17:08Z\\\",\\\"message\\\":\\\"2025-12-11T08:16:23+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_76e76f4f-e849-4b50-a2b6-debceb43335b\\\\n2025-12-11T08:16:23+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_76e76f4f-e849-4b50-a2b6-debceb43335b to /host/opt/cni/bin/\\\\n2025-12-11T08:16:23Z [verbose] multus-daemon started\\\\n2025-12-11T08:16:23Z [verbose] Readiness Indicator file check\\\\n2025-12-11T08:17:08Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:21Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:17:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zrsnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-g8jhd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:22Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:22 crc kubenswrapper[4881]: I1211 08:17:22.648415 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e233ddb-7c1f-4b3d-a111-de8cdc7ccd18\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://221f8c696ca3c5ab5dc2025890b5279fd406a8dfdd4ea482bb12f0c5d304255d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3dec421d26a9373c52bf2fb9434cc3f3b373f409b1d86ec1effca7534acb7262\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7cb4176d384609eb549c9f00057256bd34053ca3eca229c20cd250b93f87f910\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://91541795f1dd997a0d977904a6a3cbeae8b66b8bb57cde8dac344d3703352e1a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:15:53Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:22Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:22 crc kubenswrapper[4881]: I1211 08:17:22.660423 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6c507af2-56f1-45a6-ab18-c6d27c4e3f85\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ae6cead982609bcea650eccda0c847ae1c568061104bda0584984b9d62481b0e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bdda482f14afc374a3ba38a02b0b93bab4faf300cc47f7ee39ef9183af904f22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a2846dae5b0d424649d68bdbeb34b7791ee2a8e1b05a0309876cd0d79c5fca01\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://89d2f7f7873955f51814a4a24f42fc5fff4c2c08480d7ff81bfc18fd73102d7d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://89d2f7f7873955f51814a4a24f42fc5fff4c2c08480d7ff81bfc18fd73102d7d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:15:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:15:54Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:15:53Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:22Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:22 crc kubenswrapper[4881]: I1211 08:17:22.668458 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-847k7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1711214f-683a-4a2e-b9b8-f3fd2dd6dbc2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0ad11e04da8a6d33e46ca8c5f28be920d52c7c2ce22b4539fc8b7b0b85edb425\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2txkz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:20Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-847k7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:22Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:22 crc kubenswrapper[4881]: I1211 08:17:22.679635 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-jzsv5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aacce2d2-7fd3-439a-b46b-51858d3de240\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6277956e6d4873b4105ce03e358e2857371feead7eb74e2b7412ad7ed4de486\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tlbfj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:24Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-jzsv5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:22Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:22 crc kubenswrapper[4881]: I1211 08:17:22.692602 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:22 crc kubenswrapper[4881]: I1211 08:17:22.692642 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:22 crc kubenswrapper[4881]: I1211 08:17:22.692651 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:22 crc kubenswrapper[4881]: I1211 08:17:22.692665 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:22 crc kubenswrapper[4881]: I1211 08:17:22.692673 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:22Z","lastTransitionTime":"2025-12-11T08:17:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:22 crc kubenswrapper[4881]: I1211 08:17:22.693536 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9c3d898f-552a-471b-affd-a0489efc782e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9251be56904cea5a0c24898fa10c30718080f3fe77804166d664b3f40235c80a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e3ddede9a1ba051ed4eca12d7cca1c3309b223ca8c01eb7d0a9499ab012531ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e3ddede9a1ba051ed4eca12d7cca1c3309b223ca8c01eb7d0a9499ab012531ff\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:15:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:15:53Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:22Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:22 crc kubenswrapper[4881]: I1211 08:17:22.719815 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wf8q8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f14cc110-e74f-4cb7-a998-041e3f9b537b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3fece2162ea78bf051c94f0f8adb0b194d05e5d3c5c25cb4e3674096d1204079\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3715d1eacbf7a843b7a1a0e83dee159ddbe5bf62812e94bf5f74f4677da2617b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c23a70dbe6b32be7c8a0c3799f7dd2323f0dcc86d7d58ee3e140cda4ffbb03f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6d8574de8e2431b2aca7954b9a0498e353c00d982b4384c1f198b2099fe527a6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5a0cb039f4c38da8b633dfcd2e0d23afbc3607b41f04d3c230aee5f86cba79b5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5b8e9dda68edf7ec8848da4eb851807ad0dc10a086fb6129e3a22f9a35e368ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4613d67b59382894af01ae31e8f1a60355dd4362ea6499cdc10741d2abfd078b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6293b9bfc7cfe65f9d9c2cb46c40ba8aba252b7f4dfec7b3552d3a9271bfa25e\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-11T08:16:50Z\\\",\\\"message\\\":\\\"d57b-49ff-9cd3-202ed3574b26}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:} {Op:update Table:NAT Row:map[external_ip:192.168.126.11 logical_ip:10.217.0.59 options:{GoMap:map[stateless:false]} type:snat] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {dce28c51-c9f1-478b-97c8-7e209d6e7cbe}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:} {Op:mutate Table:Logical_Router Row:map[] Rows:[] Columns:[] Mutations:[{Column:nat Mutator:insert Value:{GoSet:[{GoUUID:dce28c51-c9f1-478b-97c8-7e209d6e7cbe}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {e3c4661a-36a6-47f0-a6c0-a4ee741f2224}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1211 08:16:50.354677 6553 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nI1211 08:16:50.354696 6553 model_client.go:398] Mutate operations generated as: [{Op:mutate Table:Port_Group Row:map[] Rows:[] Columns:[] Mutations:[{Column:ports Mutator:insert Value:{GoSet:[{GoUUID:61897e97-c771-4738-8709-09636387cb00}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {c02bd945-d57b-49ff-9cd3-202ed3574b26}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nF1211 08:16:50.354771 6553 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:49Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4613d67b59382894af01ae31e8f1a60355dd4362ea6499cdc10741d2abfd078b\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-11T08:17:22Z\\\",\\\"message\\\":\\\"s:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}, built lbs: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-machine-api/cluster-autoscaler-operator_TCP_cluster\\\\\\\", UUID:\\\\\\\"\\\\\\\", Protocol:\\\\\\\"TCP\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-machine-api/cluster-autoscaler-operator\\\\\\\"}, Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.5.245\\\\\\\", Port:443, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}, services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.5.245\\\\\\\", Port:9192, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nI1211 08:17:22.148449 7001 obj_retry.go:386] Retry successful for *v1.Pod openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-kqw5r after 0 failed attempt(s)\\\\nF1211 08:17:22.148457 7001 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-11T08:17:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://88bd335385bc696076b9b2719bfbd27b294003397c545f24ee2c50cd854fcc28\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1794069e7175bc0ce9dc315807ad766fd28fb46e11ed6e7addc3199cccaea63a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1794069e7175bc0ce9dc315807ad766fd28fb46e11ed6e7addc3199cccaea63a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:21Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-wf8q8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:22Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:22 crc kubenswrapper[4881]: I1211 08:17:22.735662 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"712c62b1-8ccd-4aa3-bcf9-678e361454ec\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f4bc17ebdc46bdf12b24515e7a055043646bcea98fe47ea7f5c44e679d02bde9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://daf982782f40db65e6a33e1e1bcfd01493d5a385c52013546d38d921ac4dcad3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3ceeda38e245a549205af6a7f2468c682ca1420574f2d3b375dfba7d93a3c669\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://204b876ced5657df64e1a6074ba7ddbb3e286df0e12630d709ff64d2905b8487\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ff7ad52ec7f42bde82d0f5b9a6ef051a56c37004bb4172dbc504afde1e5f69bf\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-11T08:16:03Z\\\",\\\"message\\\":\\\"W1211 08:16:02.420586 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1211 08:16:02.420959 1 crypto.go:601] Generating new CA for check-endpoints-signer@1765440962 cert, and key in /tmp/serving-cert-2611071911/serving-signer.crt, /tmp/serving-cert-2611071911/serving-signer.key\\\\nI1211 08:16:03.011861 1 observer_polling.go:159] Starting file observer\\\\nW1211 08:16:03.024183 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1211 08:16:03.024367 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1211 08:16:03.024984 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2611071911/tls.crt::/tmp/serving-cert-2611071911/tls.key\\\\\\\"\\\\nF1211 08:16:03.405647 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:00Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6735b5cd08055deeae614855e5de8a056b276c8006a16c45f2a0fce97da172cb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:57Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70fb9b6123864b3652eabbb833b18032659a0c038f4e5857e7363a3776022396\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://70fb9b6123864b3652eabbb833b18032659a0c038f4e5857e7363a3776022396\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:15:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:15:53Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:22Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:22 crc kubenswrapper[4881]: I1211 08:17:22.749781 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://02d46e407feeba96230ff44b31f513e09434cdc3378124322398587ebda11b4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:22Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:22 crc kubenswrapper[4881]: I1211 08:17:22.760543 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:22Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:22 crc kubenswrapper[4881]: I1211 08:17:22.772284 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:20Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:20Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://35b9fdb7a609d6a955abda7e49e7bceec55811d33e14295ba7258c0e085cc517\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:22Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:22 crc kubenswrapper[4881]: I1211 08:17:22.795252 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:22 crc kubenswrapper[4881]: I1211 08:17:22.795324 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:22 crc kubenswrapper[4881]: I1211 08:17:22.795363 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:22 crc kubenswrapper[4881]: I1211 08:17:22.795385 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:22 crc kubenswrapper[4881]: I1211 08:17:22.795401 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:22Z","lastTransitionTime":"2025-12-11T08:17:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:22 crc kubenswrapper[4881]: I1211 08:17:22.898378 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:22 crc kubenswrapper[4881]: I1211 08:17:22.898420 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:22 crc kubenswrapper[4881]: I1211 08:17:22.898429 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:22 crc kubenswrapper[4881]: I1211 08:17:22.898444 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:22 crc kubenswrapper[4881]: I1211 08:17:22.898454 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:22Z","lastTransitionTime":"2025-12-11T08:17:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:23 crc kubenswrapper[4881]: I1211 08:17:23.001616 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:23 crc kubenswrapper[4881]: I1211 08:17:23.001715 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:23 crc kubenswrapper[4881]: I1211 08:17:23.001749 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:23 crc kubenswrapper[4881]: I1211 08:17:23.001781 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:23 crc kubenswrapper[4881]: I1211 08:17:23.001805 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:23Z","lastTransitionTime":"2025-12-11T08:17:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:23 crc kubenswrapper[4881]: I1211 08:17:23.004327 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 11 08:17:23 crc kubenswrapper[4881]: I1211 08:17:23.004471 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bzslm" Dec 11 08:17:23 crc kubenswrapper[4881]: E1211 08:17:23.004680 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 11 08:17:23 crc kubenswrapper[4881]: I1211 08:17:23.004762 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 11 08:17:23 crc kubenswrapper[4881]: E1211 08:17:23.004816 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bzslm" podUID="3a9b30f4-30b0-4b16-9a4a-135d2e7a9beb" Dec 11 08:17:23 crc kubenswrapper[4881]: E1211 08:17:23.004922 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 11 08:17:23 crc kubenswrapper[4881]: I1211 08:17:23.028412 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"712c62b1-8ccd-4aa3-bcf9-678e361454ec\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f4bc17ebdc46bdf12b24515e7a055043646bcea98fe47ea7f5c44e679d02bde9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://daf982782f40db65e6a33e1e1bcfd01493d5a385c52013546d38d921ac4dcad3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3ceeda38e245a549205af6a7f2468c682ca1420574f2d3b375dfba7d93a3c669\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://204b876ced5657df64e1a6074ba7ddbb3e286df0e12630d709ff64d2905b8487\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ff7ad52ec7f42bde82d0f5b9a6ef051a56c37004bb4172dbc504afde1e5f69bf\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-11T08:16:03Z\\\",\\\"message\\\":\\\"W1211 08:16:02.420586 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1211 08:16:02.420959 1 crypto.go:601] Generating new CA for check-endpoints-signer@1765440962 cert, and key in /tmp/serving-cert-2611071911/serving-signer.crt, /tmp/serving-cert-2611071911/serving-signer.key\\\\nI1211 08:16:03.011861 1 observer_polling.go:159] Starting file observer\\\\nW1211 08:16:03.024183 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1211 08:16:03.024367 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1211 08:16:03.024984 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2611071911/tls.crt::/tmp/serving-cert-2611071911/tls.key\\\\\\\"\\\\nF1211 08:16:03.405647 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:00Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6735b5cd08055deeae614855e5de8a056b276c8006a16c45f2a0fce97da172cb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:57Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70fb9b6123864b3652eabbb833b18032659a0c038f4e5857e7363a3776022396\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://70fb9b6123864b3652eabbb833b18032659a0c038f4e5857e7363a3776022396\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:15:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:15:53Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:23Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:23 crc kubenswrapper[4881]: I1211 08:17:23.043769 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://02d46e407feeba96230ff44b31f513e09434cdc3378124322398587ebda11b4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:23Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:23 crc kubenswrapper[4881]: I1211 08:17:23.057925 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:23Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:23 crc kubenswrapper[4881]: I1211 08:17:23.072110 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:20Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:20Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://35b9fdb7a609d6a955abda7e49e7bceec55811d33e14295ba7258c0e085cc517\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:23Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:23 crc kubenswrapper[4881]: I1211 08:17:23.097959 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wf8q8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f14cc110-e74f-4cb7-a998-041e3f9b537b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3fece2162ea78bf051c94f0f8adb0b194d05e5d3c5c25cb4e3674096d1204079\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3715d1eacbf7a843b7a1a0e83dee159ddbe5bf62812e94bf5f74f4677da2617b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c23a70dbe6b32be7c8a0c3799f7dd2323f0dcc86d7d58ee3e140cda4ffbb03f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6d8574de8e2431b2aca7954b9a0498e353c00d982b4384c1f198b2099fe527a6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5a0cb039f4c38da8b633dfcd2e0d23afbc3607b41f04d3c230aee5f86cba79b5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5b8e9dda68edf7ec8848da4eb851807ad0dc10a086fb6129e3a22f9a35e368ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4613d67b59382894af01ae31e8f1a60355dd4362ea6499cdc10741d2abfd078b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6293b9bfc7cfe65f9d9c2cb46c40ba8aba252b7f4dfec7b3552d3a9271bfa25e\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-11T08:16:50Z\\\",\\\"message\\\":\\\"d57b-49ff-9cd3-202ed3574b26}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:} {Op:update Table:NAT Row:map[external_ip:192.168.126.11 logical_ip:10.217.0.59 options:{GoMap:map[stateless:false]} type:snat] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {dce28c51-c9f1-478b-97c8-7e209d6e7cbe}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:} {Op:mutate Table:Logical_Router Row:map[] Rows:[] Columns:[] Mutations:[{Column:nat Mutator:insert Value:{GoSet:[{GoUUID:dce28c51-c9f1-478b-97c8-7e209d6e7cbe}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {e3c4661a-36a6-47f0-a6c0-a4ee741f2224}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1211 08:16:50.354677 6553 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nI1211 08:16:50.354696 6553 model_client.go:398] Mutate operations generated as: [{Op:mutate Table:Port_Group Row:map[] Rows:[] Columns:[] Mutations:[{Column:ports Mutator:insert Value:{GoSet:[{GoUUID:61897e97-c771-4738-8709-09636387cb00}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {c02bd945-d57b-49ff-9cd3-202ed3574b26}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nF1211 08:16:50.354771 6553 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:49Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4613d67b59382894af01ae31e8f1a60355dd4362ea6499cdc10741d2abfd078b\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-11T08:17:22Z\\\",\\\"message\\\":\\\"s:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}, built lbs: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-machine-api/cluster-autoscaler-operator_TCP_cluster\\\\\\\", UUID:\\\\\\\"\\\\\\\", Protocol:\\\\\\\"TCP\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-machine-api/cluster-autoscaler-operator\\\\\\\"}, Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.5.245\\\\\\\", Port:443, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}, services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.5.245\\\\\\\", Port:9192, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nI1211 08:17:22.148449 7001 obj_retry.go:386] Retry successful for *v1.Pod openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-kqw5r after 0 failed attempt(s)\\\\nF1211 08:17:22.148457 7001 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-11T08:17:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://88bd335385bc696076b9b2719bfbd27b294003397c545f24ee2c50cd854fcc28\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1794069e7175bc0ce9dc315807ad766fd28fb46e11ed6e7addc3199cccaea63a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1794069e7175bc0ce9dc315807ad766fd28fb46e11ed6e7addc3199cccaea63a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:21Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-wf8q8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:23Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:23 crc kubenswrapper[4881]: I1211 08:17:23.104318 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:23 crc kubenswrapper[4881]: I1211 08:17:23.104372 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:23 crc kubenswrapper[4881]: I1211 08:17:23.104382 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:23 crc kubenswrapper[4881]: I1211 08:17:23.104397 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:23 crc kubenswrapper[4881]: I1211 08:17:23.104407 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:23Z","lastTransitionTime":"2025-12-11T08:17:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:23 crc kubenswrapper[4881]: I1211 08:17:23.111449 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-bzslm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3a9b30f4-30b0-4b16-9a4a-135d2e7a9beb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:35Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:35Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:35Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-k86hj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-k86hj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:35Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-bzslm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:23Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:23 crc kubenswrapper[4881]: I1211 08:17:23.130584 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:23Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:23 crc kubenswrapper[4881]: I1211 08:17:23.144033 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6ec4e8a28a722463111eeb961e6e80d1f8ef1b776c40fc212a2ca5aaf0c43b63\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6eff6a04afb607c6248352793d1205ae80c194a77222a234d586925a8ca8cd00\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:23Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:23 crc kubenswrapper[4881]: I1211 08:17:23.159551 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:23Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:23 crc kubenswrapper[4881]: I1211 08:17:23.171132 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-g8jhd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"368e635e-0e63-4202-b9e4-4a3a85c6f30c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:17:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:17:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://472f02e542c67bbd11145db9b59f2bae1dc688d45e95099b17a33fa1e27dbac8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f17445543c4ae20340b00c834ecfa381b0d9624196a6da869ea823ee99df132f\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-11T08:17:08Z\\\",\\\"message\\\":\\\"2025-12-11T08:16:23+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_76e76f4f-e849-4b50-a2b6-debceb43335b\\\\n2025-12-11T08:16:23+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_76e76f4f-e849-4b50-a2b6-debceb43335b to /host/opt/cni/bin/\\\\n2025-12-11T08:16:23Z [verbose] multus-daemon started\\\\n2025-12-11T08:16:23Z [verbose] Readiness Indicator file check\\\\n2025-12-11T08:17:08Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:21Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:17:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zrsnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-g8jhd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:23Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:23 crc kubenswrapper[4881]: I1211 08:17:23.185025 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-cwhxk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"afd0cc21-e31c-47c9-a598-cd93dde96121\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0f0031f6a7c33c9d1c538ec8384083ab5bc7b7fcda9c01c8e5c922a7e375a7e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://44d5ca755969b2434115259f141a9f8f17da8e0c1bef5b61c48475e09dc91601\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://44d5ca755969b2434115259f141a9f8f17da8e0c1bef5b61c48475e09dc91601\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://18c881494d5238565dfdb69e61ecdebb725f60a14c0c651e0bcc65cb2aff297a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://18c881494d5238565dfdb69e61ecdebb725f60a14c0c651e0bcc65cb2aff297a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a13ad4683fb404bf7d973528489e3c39e33c2991d86a80875dab067023626584\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a13ad4683fb404bf7d973528489e3c39e33c2991d86a80875dab067023626584\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b84c7832c1d1df15202c858e63e3d97319f2014f413f98a9d332a3f89a11210\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b84c7832c1d1df15202c858e63e3d97319f2014f413f98a9d332a3f89a11210\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb6df865673b8073f0fa13525833ae978ff17856a9b4149d549e177bc5d9a494\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bb6df865673b8073f0fa13525833ae978ff17856a9b4149d549e177bc5d9a494\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8a6eb6e7ccd64f8d181fe104b44a4299f9b28d0409e46175da23144ef88e4129\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8a6eb6e7ccd64f8d181fe104b44a4299f9b28d0409e46175da23144ef88e4129\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-cwhxk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:23Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:23 crc kubenswrapper[4881]: I1211 08:17:23.195830 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"56d69133-af36-4cbd-af7d-3a58cc4dd8ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b73c9948845a28d7c3cb81e94d962a4f11ac061f4108a18f46d451a554e9adb7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7rwh4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b0321657968185975569a4bf8037ab956988b13493feec0be9c439fbe6c20707\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7rwh4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:21Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-z9nnh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:23Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:23 crc kubenswrapper[4881]: I1211 08:17:23.205031 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-kqw5r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"71774618-27c8-499d-83d9-e88693c86758\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://136c88f0cab9d7a47a8c5925592e8cc09b62e515258829cc096df9ad3bb30615\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6btfk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c48e4553e3eb00f1e725dd9cacf2b25360544772ef82d6846361a4b7c44c6244\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6btfk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-kqw5r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:23Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:23 crc kubenswrapper[4881]: I1211 08:17:23.206522 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:23 crc kubenswrapper[4881]: I1211 08:17:23.206569 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:23 crc kubenswrapper[4881]: I1211 08:17:23.206579 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:23 crc kubenswrapper[4881]: I1211 08:17:23.206595 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:23 crc kubenswrapper[4881]: I1211 08:17:23.206607 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:23Z","lastTransitionTime":"2025-12-11T08:17:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:23 crc kubenswrapper[4881]: I1211 08:17:23.216172 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e233ddb-7c1f-4b3d-a111-de8cdc7ccd18\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://221f8c696ca3c5ab5dc2025890b5279fd406a8dfdd4ea482bb12f0c5d304255d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3dec421d26a9373c52bf2fb9434cc3f3b373f409b1d86ec1effca7534acb7262\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7cb4176d384609eb549c9f00057256bd34053ca3eca229c20cd250b93f87f910\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://91541795f1dd997a0d977904a6a3cbeae8b66b8bb57cde8dac344d3703352e1a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:15:53Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:23Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:23 crc kubenswrapper[4881]: I1211 08:17:23.228427 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6c507af2-56f1-45a6-ab18-c6d27c4e3f85\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ae6cead982609bcea650eccda0c847ae1c568061104bda0584984b9d62481b0e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bdda482f14afc374a3ba38a02b0b93bab4faf300cc47f7ee39ef9183af904f22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a2846dae5b0d424649d68bdbeb34b7791ee2a8e1b05a0309876cd0d79c5fca01\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://89d2f7f7873955f51814a4a24f42fc5fff4c2c08480d7ff81bfc18fd73102d7d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://89d2f7f7873955f51814a4a24f42fc5fff4c2c08480d7ff81bfc18fd73102d7d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:15:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:15:54Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:15:53Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:23Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:23 crc kubenswrapper[4881]: I1211 08:17:23.238692 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-847k7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1711214f-683a-4a2e-b9b8-f3fd2dd6dbc2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0ad11e04da8a6d33e46ca8c5f28be920d52c7c2ce22b4539fc8b7b0b85edb425\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2txkz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:20Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-847k7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:23Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:23 crc kubenswrapper[4881]: I1211 08:17:23.247086 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-jzsv5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aacce2d2-7fd3-439a-b46b-51858d3de240\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6277956e6d4873b4105ce03e358e2857371feead7eb74e2b7412ad7ed4de486\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tlbfj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:24Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-jzsv5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:23Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:23 crc kubenswrapper[4881]: I1211 08:17:23.257211 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9c3d898f-552a-471b-affd-a0489efc782e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9251be56904cea5a0c24898fa10c30718080f3fe77804166d664b3f40235c80a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e3ddede9a1ba051ed4eca12d7cca1c3309b223ca8c01eb7d0a9499ab012531ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e3ddede9a1ba051ed4eca12d7cca1c3309b223ca8c01eb7d0a9499ab012531ff\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:15:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:15:53Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:23Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:23 crc kubenswrapper[4881]: I1211 08:17:23.309267 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:23 crc kubenswrapper[4881]: I1211 08:17:23.309330 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:23 crc kubenswrapper[4881]: I1211 08:17:23.309363 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:23 crc kubenswrapper[4881]: I1211 08:17:23.309390 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:23 crc kubenswrapper[4881]: I1211 08:17:23.309402 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:23Z","lastTransitionTime":"2025-12-11T08:17:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:23 crc kubenswrapper[4881]: I1211 08:17:23.412030 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:23 crc kubenswrapper[4881]: I1211 08:17:23.412080 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:23 crc kubenswrapper[4881]: I1211 08:17:23.412094 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:23 crc kubenswrapper[4881]: I1211 08:17:23.412108 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:23 crc kubenswrapper[4881]: I1211 08:17:23.412117 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:23Z","lastTransitionTime":"2025-12-11T08:17:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:23 crc kubenswrapper[4881]: I1211 08:17:23.514956 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:23 crc kubenswrapper[4881]: I1211 08:17:23.515208 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:23 crc kubenswrapper[4881]: I1211 08:17:23.515321 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:23 crc kubenswrapper[4881]: I1211 08:17:23.515480 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:23 crc kubenswrapper[4881]: I1211 08:17:23.515580 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:23Z","lastTransitionTime":"2025-12-11T08:17:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:23 crc kubenswrapper[4881]: I1211 08:17:23.534854 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-wf8q8_f14cc110-e74f-4cb7-a998-041e3f9b537b/ovnkube-controller/3.log" Dec 11 08:17:23 crc kubenswrapper[4881]: I1211 08:17:23.539819 4881 scope.go:117] "RemoveContainer" containerID="4613d67b59382894af01ae31e8f1a60355dd4362ea6499cdc10741d2abfd078b" Dec 11 08:17:23 crc kubenswrapper[4881]: E1211 08:17:23.540082 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-wf8q8_openshift-ovn-kubernetes(f14cc110-e74f-4cb7-a998-041e3f9b537b)\"" pod="openshift-ovn-kubernetes/ovnkube-node-wf8q8" podUID="f14cc110-e74f-4cb7-a998-041e3f9b537b" Dec 11 08:17:23 crc kubenswrapper[4881]: I1211 08:17:23.556612 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9c3d898f-552a-471b-affd-a0489efc782e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9251be56904cea5a0c24898fa10c30718080f3fe77804166d664b3f40235c80a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e3ddede9a1ba051ed4eca12d7cca1c3309b223ca8c01eb7d0a9499ab012531ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e3ddede9a1ba051ed4eca12d7cca1c3309b223ca8c01eb7d0a9499ab012531ff\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:15:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:15:53Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:23Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:23 crc kubenswrapper[4881]: I1211 08:17:23.580889 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://02d46e407feeba96230ff44b31f513e09434cdc3378124322398587ebda11b4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:23Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:23 crc kubenswrapper[4881]: I1211 08:17:23.598235 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:23Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:23 crc kubenswrapper[4881]: I1211 08:17:23.613721 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:20Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:20Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://35b9fdb7a609d6a955abda7e49e7bceec55811d33e14295ba7258c0e085cc517\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:23Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:23 crc kubenswrapper[4881]: I1211 08:17:23.618502 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:23 crc kubenswrapper[4881]: I1211 08:17:23.618550 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:23 crc kubenswrapper[4881]: I1211 08:17:23.618567 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:23 crc kubenswrapper[4881]: I1211 08:17:23.618590 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:23 crc kubenswrapper[4881]: I1211 08:17:23.618604 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:23Z","lastTransitionTime":"2025-12-11T08:17:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:23 crc kubenswrapper[4881]: I1211 08:17:23.640114 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wf8q8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f14cc110-e74f-4cb7-a998-041e3f9b537b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3fece2162ea78bf051c94f0f8adb0b194d05e5d3c5c25cb4e3674096d1204079\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3715d1eacbf7a843b7a1a0e83dee159ddbe5bf62812e94bf5f74f4677da2617b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c23a70dbe6b32be7c8a0c3799f7dd2323f0dcc86d7d58ee3e140cda4ffbb03f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6d8574de8e2431b2aca7954b9a0498e353c00d982b4384c1f198b2099fe527a6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5a0cb039f4c38da8b633dfcd2e0d23afbc3607b41f04d3c230aee5f86cba79b5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5b8e9dda68edf7ec8848da4eb851807ad0dc10a086fb6129e3a22f9a35e368ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4613d67b59382894af01ae31e8f1a60355dd4362ea6499cdc10741d2abfd078b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4613d67b59382894af01ae31e8f1a60355dd4362ea6499cdc10741d2abfd078b\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-11T08:17:22Z\\\",\\\"message\\\":\\\"s:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}, built lbs: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-machine-api/cluster-autoscaler-operator_TCP_cluster\\\\\\\", UUID:\\\\\\\"\\\\\\\", Protocol:\\\\\\\"TCP\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-machine-api/cluster-autoscaler-operator\\\\\\\"}, Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.5.245\\\\\\\", Port:443, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}, services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.5.245\\\\\\\", Port:9192, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nI1211 08:17:22.148449 7001 obj_retry.go:386] Retry successful for *v1.Pod openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-kqw5r after 0 failed attempt(s)\\\\nF1211 08:17:22.148457 7001 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-11T08:17:21Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-wf8q8_openshift-ovn-kubernetes(f14cc110-e74f-4cb7-a998-041e3f9b537b)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://88bd335385bc696076b9b2719bfbd27b294003397c545f24ee2c50cd854fcc28\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1794069e7175bc0ce9dc315807ad766fd28fb46e11ed6e7addc3199cccaea63a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1794069e7175bc0ce9dc315807ad766fd28fb46e11ed6e7addc3199cccaea63a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:21Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-wf8q8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:23Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:23 crc kubenswrapper[4881]: I1211 08:17:23.674088 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"712c62b1-8ccd-4aa3-bcf9-678e361454ec\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f4bc17ebdc46bdf12b24515e7a055043646bcea98fe47ea7f5c44e679d02bde9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://daf982782f40db65e6a33e1e1bcfd01493d5a385c52013546d38d921ac4dcad3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3ceeda38e245a549205af6a7f2468c682ca1420574f2d3b375dfba7d93a3c669\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://204b876ced5657df64e1a6074ba7ddbb3e286df0e12630d709ff64d2905b8487\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ff7ad52ec7f42bde82d0f5b9a6ef051a56c37004bb4172dbc504afde1e5f69bf\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-11T08:16:03Z\\\",\\\"message\\\":\\\"W1211 08:16:02.420586 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1211 08:16:02.420959 1 crypto.go:601] Generating new CA for check-endpoints-signer@1765440962 cert, and key in /tmp/serving-cert-2611071911/serving-signer.crt, /tmp/serving-cert-2611071911/serving-signer.key\\\\nI1211 08:16:03.011861 1 observer_polling.go:159] Starting file observer\\\\nW1211 08:16:03.024183 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1211 08:16:03.024367 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1211 08:16:03.024984 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2611071911/tls.crt::/tmp/serving-cert-2611071911/tls.key\\\\\\\"\\\\nF1211 08:16:03.405647 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:00Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6735b5cd08055deeae614855e5de8a056b276c8006a16c45f2a0fce97da172cb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:57Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70fb9b6123864b3652eabbb833b18032659a0c038f4e5857e7363a3776022396\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://70fb9b6123864b3652eabbb833b18032659a0c038f4e5857e7363a3776022396\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:15:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:15:53Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:23Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:23 crc kubenswrapper[4881]: I1211 08:17:23.714833 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6ec4e8a28a722463111eeb961e6e80d1f8ef1b776c40fc212a2ca5aaf0c43b63\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6eff6a04afb607c6248352793d1205ae80c194a77222a234d586925a8ca8cd00\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:23Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:23 crc kubenswrapper[4881]: I1211 08:17:23.721181 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:23 crc kubenswrapper[4881]: I1211 08:17:23.721220 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:23 crc kubenswrapper[4881]: I1211 08:17:23.721228 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:23 crc kubenswrapper[4881]: I1211 08:17:23.721243 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:23 crc kubenswrapper[4881]: I1211 08:17:23.721252 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:23Z","lastTransitionTime":"2025-12-11T08:17:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:23 crc kubenswrapper[4881]: I1211 08:17:23.729650 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:23Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:23 crc kubenswrapper[4881]: I1211 08:17:23.741702 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-g8jhd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"368e635e-0e63-4202-b9e4-4a3a85c6f30c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:17:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:17:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://472f02e542c67bbd11145db9b59f2bae1dc688d45e95099b17a33fa1e27dbac8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f17445543c4ae20340b00c834ecfa381b0d9624196a6da869ea823ee99df132f\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-11T08:17:08Z\\\",\\\"message\\\":\\\"2025-12-11T08:16:23+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_76e76f4f-e849-4b50-a2b6-debceb43335b\\\\n2025-12-11T08:16:23+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_76e76f4f-e849-4b50-a2b6-debceb43335b to /host/opt/cni/bin/\\\\n2025-12-11T08:16:23Z [verbose] multus-daemon started\\\\n2025-12-11T08:16:23Z [verbose] Readiness Indicator file check\\\\n2025-12-11T08:17:08Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:21Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:17:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zrsnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-g8jhd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:23Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:23 crc kubenswrapper[4881]: I1211 08:17:23.754422 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-cwhxk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"afd0cc21-e31c-47c9-a598-cd93dde96121\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0f0031f6a7c33c9d1c538ec8384083ab5bc7b7fcda9c01c8e5c922a7e375a7e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://44d5ca755969b2434115259f141a9f8f17da8e0c1bef5b61c48475e09dc91601\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://44d5ca755969b2434115259f141a9f8f17da8e0c1bef5b61c48475e09dc91601\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://18c881494d5238565dfdb69e61ecdebb725f60a14c0c651e0bcc65cb2aff297a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://18c881494d5238565dfdb69e61ecdebb725f60a14c0c651e0bcc65cb2aff297a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a13ad4683fb404bf7d973528489e3c39e33c2991d86a80875dab067023626584\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a13ad4683fb404bf7d973528489e3c39e33c2991d86a80875dab067023626584\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b84c7832c1d1df15202c858e63e3d97319f2014f413f98a9d332a3f89a11210\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b84c7832c1d1df15202c858e63e3d97319f2014f413f98a9d332a3f89a11210\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb6df865673b8073f0fa13525833ae978ff17856a9b4149d549e177bc5d9a494\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bb6df865673b8073f0fa13525833ae978ff17856a9b4149d549e177bc5d9a494\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8a6eb6e7ccd64f8d181fe104b44a4299f9b28d0409e46175da23144ef88e4129\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8a6eb6e7ccd64f8d181fe104b44a4299f9b28d0409e46175da23144ef88e4129\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-cwhxk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:23Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:23 crc kubenswrapper[4881]: I1211 08:17:23.764787 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"56d69133-af36-4cbd-af7d-3a58cc4dd8ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b73c9948845a28d7c3cb81e94d962a4f11ac061f4108a18f46d451a554e9adb7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7rwh4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b0321657968185975569a4bf8037ab956988b13493feec0be9c439fbe6c20707\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7rwh4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:21Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-z9nnh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:23Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:23 crc kubenswrapper[4881]: I1211 08:17:23.778643 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-kqw5r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"71774618-27c8-499d-83d9-e88693c86758\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://136c88f0cab9d7a47a8c5925592e8cc09b62e515258829cc096df9ad3bb30615\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6btfk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c48e4553e3eb00f1e725dd9cacf2b25360544772ef82d6846361a4b7c44c6244\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6btfk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-kqw5r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:23Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:23 crc kubenswrapper[4881]: I1211 08:17:23.792596 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-bzslm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3a9b30f4-30b0-4b16-9a4a-135d2e7a9beb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:35Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:35Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:35Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-k86hj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-k86hj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:35Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-bzslm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:23Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:23 crc kubenswrapper[4881]: I1211 08:17:23.806047 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:23Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:23 crc kubenswrapper[4881]: I1211 08:17:23.817453 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6c507af2-56f1-45a6-ab18-c6d27c4e3f85\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ae6cead982609bcea650eccda0c847ae1c568061104bda0584984b9d62481b0e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bdda482f14afc374a3ba38a02b0b93bab4faf300cc47f7ee39ef9183af904f22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a2846dae5b0d424649d68bdbeb34b7791ee2a8e1b05a0309876cd0d79c5fca01\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://89d2f7f7873955f51814a4a24f42fc5fff4c2c08480d7ff81bfc18fd73102d7d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://89d2f7f7873955f51814a4a24f42fc5fff4c2c08480d7ff81bfc18fd73102d7d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:15:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:15:54Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:15:53Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:23Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:23 crc kubenswrapper[4881]: I1211 08:17:23.824231 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:23 crc kubenswrapper[4881]: I1211 08:17:23.824267 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:23 crc kubenswrapper[4881]: I1211 08:17:23.824277 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:23 crc kubenswrapper[4881]: I1211 08:17:23.824293 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:23 crc kubenswrapper[4881]: I1211 08:17:23.824304 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:23Z","lastTransitionTime":"2025-12-11T08:17:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:23 crc kubenswrapper[4881]: I1211 08:17:23.828276 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-847k7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1711214f-683a-4a2e-b9b8-f3fd2dd6dbc2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0ad11e04da8a6d33e46ca8c5f28be920d52c7c2ce22b4539fc8b7b0b85edb425\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2txkz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:20Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-847k7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:23Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:23 crc kubenswrapper[4881]: I1211 08:17:23.840815 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-jzsv5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aacce2d2-7fd3-439a-b46b-51858d3de240\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6277956e6d4873b4105ce03e358e2857371feead7eb74e2b7412ad7ed4de486\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tlbfj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:24Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-jzsv5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:23Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:23 crc kubenswrapper[4881]: I1211 08:17:23.860888 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e233ddb-7c1f-4b3d-a111-de8cdc7ccd18\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://221f8c696ca3c5ab5dc2025890b5279fd406a8dfdd4ea482bb12f0c5d304255d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3dec421d26a9373c52bf2fb9434cc3f3b373f409b1d86ec1effca7534acb7262\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7cb4176d384609eb549c9f00057256bd34053ca3eca229c20cd250b93f87f910\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://91541795f1dd997a0d977904a6a3cbeae8b66b8bb57cde8dac344d3703352e1a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:15:53Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:23Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:23 crc kubenswrapper[4881]: I1211 08:17:23.928784 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:23 crc kubenswrapper[4881]: I1211 08:17:23.928833 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:23 crc kubenswrapper[4881]: I1211 08:17:23.928847 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:23 crc kubenswrapper[4881]: I1211 08:17:23.928889 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:23 crc kubenswrapper[4881]: I1211 08:17:23.928906 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:23Z","lastTransitionTime":"2025-12-11T08:17:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:24 crc kubenswrapper[4881]: I1211 08:17:24.005007 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 11 08:17:24 crc kubenswrapper[4881]: E1211 08:17:24.005186 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 11 08:17:24 crc kubenswrapper[4881]: I1211 08:17:24.032412 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:24 crc kubenswrapper[4881]: I1211 08:17:24.032505 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:24 crc kubenswrapper[4881]: I1211 08:17:24.032525 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:24 crc kubenswrapper[4881]: I1211 08:17:24.032552 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:24 crc kubenswrapper[4881]: I1211 08:17:24.032571 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:24Z","lastTransitionTime":"2025-12-11T08:17:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:24 crc kubenswrapper[4881]: I1211 08:17:24.080079 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-wf8q8" Dec 11 08:17:24 crc kubenswrapper[4881]: I1211 08:17:24.135642 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:24 crc kubenswrapper[4881]: I1211 08:17:24.135681 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:24 crc kubenswrapper[4881]: I1211 08:17:24.135690 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:24 crc kubenswrapper[4881]: I1211 08:17:24.135704 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:24 crc kubenswrapper[4881]: I1211 08:17:24.135713 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:24Z","lastTransitionTime":"2025-12-11T08:17:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:24 crc kubenswrapper[4881]: I1211 08:17:24.237838 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:24 crc kubenswrapper[4881]: I1211 08:17:24.237884 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:24 crc kubenswrapper[4881]: I1211 08:17:24.237897 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:24 crc kubenswrapper[4881]: I1211 08:17:24.237916 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:24 crc kubenswrapper[4881]: I1211 08:17:24.237929 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:24Z","lastTransitionTime":"2025-12-11T08:17:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:24 crc kubenswrapper[4881]: I1211 08:17:24.340775 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:24 crc kubenswrapper[4881]: I1211 08:17:24.340807 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:24 crc kubenswrapper[4881]: I1211 08:17:24.340817 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:24 crc kubenswrapper[4881]: I1211 08:17:24.340834 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:24 crc kubenswrapper[4881]: I1211 08:17:24.340845 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:24Z","lastTransitionTime":"2025-12-11T08:17:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:24 crc kubenswrapper[4881]: I1211 08:17:24.444031 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:24 crc kubenswrapper[4881]: I1211 08:17:24.444090 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:24 crc kubenswrapper[4881]: I1211 08:17:24.444101 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:24 crc kubenswrapper[4881]: I1211 08:17:24.444119 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:24 crc kubenswrapper[4881]: I1211 08:17:24.444133 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:24Z","lastTransitionTime":"2025-12-11T08:17:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:24 crc kubenswrapper[4881]: I1211 08:17:24.543207 4881 scope.go:117] "RemoveContainer" containerID="4613d67b59382894af01ae31e8f1a60355dd4362ea6499cdc10741d2abfd078b" Dec 11 08:17:24 crc kubenswrapper[4881]: E1211 08:17:24.543490 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-wf8q8_openshift-ovn-kubernetes(f14cc110-e74f-4cb7-a998-041e3f9b537b)\"" pod="openshift-ovn-kubernetes/ovnkube-node-wf8q8" podUID="f14cc110-e74f-4cb7-a998-041e3f9b537b" Dec 11 08:17:24 crc kubenswrapper[4881]: I1211 08:17:24.546998 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:24 crc kubenswrapper[4881]: I1211 08:17:24.547064 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:24 crc kubenswrapper[4881]: I1211 08:17:24.547087 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:24 crc kubenswrapper[4881]: I1211 08:17:24.547114 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:24 crc kubenswrapper[4881]: I1211 08:17:24.547136 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:24Z","lastTransitionTime":"2025-12-11T08:17:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:24 crc kubenswrapper[4881]: I1211 08:17:24.650439 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:24 crc kubenswrapper[4881]: I1211 08:17:24.650552 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:24 crc kubenswrapper[4881]: I1211 08:17:24.650608 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:24 crc kubenswrapper[4881]: I1211 08:17:24.650636 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:24 crc kubenswrapper[4881]: I1211 08:17:24.650656 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:24Z","lastTransitionTime":"2025-12-11T08:17:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:24 crc kubenswrapper[4881]: I1211 08:17:24.754028 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:24 crc kubenswrapper[4881]: I1211 08:17:24.754470 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:24 crc kubenswrapper[4881]: I1211 08:17:24.754670 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:24 crc kubenswrapper[4881]: I1211 08:17:24.754865 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:24 crc kubenswrapper[4881]: I1211 08:17:24.755016 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:24Z","lastTransitionTime":"2025-12-11T08:17:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:24 crc kubenswrapper[4881]: I1211 08:17:24.857810 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:24 crc kubenswrapper[4881]: I1211 08:17:24.858060 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:24 crc kubenswrapper[4881]: I1211 08:17:24.858143 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:24 crc kubenswrapper[4881]: I1211 08:17:24.858271 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:24 crc kubenswrapper[4881]: I1211 08:17:24.858395 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:24Z","lastTransitionTime":"2025-12-11T08:17:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:24 crc kubenswrapper[4881]: I1211 08:17:24.960864 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:24 crc kubenswrapper[4881]: I1211 08:17:24.960924 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:24 crc kubenswrapper[4881]: I1211 08:17:24.960940 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:24 crc kubenswrapper[4881]: I1211 08:17:24.960959 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:24 crc kubenswrapper[4881]: I1211 08:17:24.960971 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:24Z","lastTransitionTime":"2025-12-11T08:17:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:25 crc kubenswrapper[4881]: I1211 08:17:25.004658 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bzslm" Dec 11 08:17:25 crc kubenswrapper[4881]: I1211 08:17:25.004709 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 11 08:17:25 crc kubenswrapper[4881]: E1211 08:17:25.004832 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bzslm" podUID="3a9b30f4-30b0-4b16-9a4a-135d2e7a9beb" Dec 11 08:17:25 crc kubenswrapper[4881]: I1211 08:17:25.004846 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 11 08:17:25 crc kubenswrapper[4881]: E1211 08:17:25.004929 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 11 08:17:25 crc kubenswrapper[4881]: E1211 08:17:25.004997 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 11 08:17:25 crc kubenswrapper[4881]: I1211 08:17:25.063771 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:25 crc kubenswrapper[4881]: I1211 08:17:25.064039 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:25 crc kubenswrapper[4881]: I1211 08:17:25.064107 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:25 crc kubenswrapper[4881]: I1211 08:17:25.064183 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:25 crc kubenswrapper[4881]: I1211 08:17:25.064243 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:25Z","lastTransitionTime":"2025-12-11T08:17:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:25 crc kubenswrapper[4881]: I1211 08:17:25.109601 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:25 crc kubenswrapper[4881]: I1211 08:17:25.109672 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:25 crc kubenswrapper[4881]: I1211 08:17:25.109689 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:25 crc kubenswrapper[4881]: I1211 08:17:25.109715 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:25 crc kubenswrapper[4881]: I1211 08:17:25.109731 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:25Z","lastTransitionTime":"2025-12-11T08:17:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:25 crc kubenswrapper[4881]: E1211 08:17:25.127435 4881 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:17:25Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T08:17:25Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:17:25Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T08:17:25Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:17:25Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T08:17:25Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:17:25Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T08:17:25Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"46a6300e-52c1-447e-8230-6662e62288c7\\\",\\\"systemUUID\\\":\\\"fece3d29-5045-4c4f-98be-52739a921bd2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:25Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:25 crc kubenswrapper[4881]: I1211 08:17:25.132722 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:25 crc kubenswrapper[4881]: I1211 08:17:25.132777 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:25 crc kubenswrapper[4881]: I1211 08:17:25.132789 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:25 crc kubenswrapper[4881]: I1211 08:17:25.132808 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:25 crc kubenswrapper[4881]: I1211 08:17:25.132822 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:25Z","lastTransitionTime":"2025-12-11T08:17:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:25 crc kubenswrapper[4881]: E1211 08:17:25.152897 4881 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:17:25Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T08:17:25Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:17:25Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T08:17:25Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:17:25Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T08:17:25Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:17:25Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T08:17:25Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"46a6300e-52c1-447e-8230-6662e62288c7\\\",\\\"systemUUID\\\":\\\"fece3d29-5045-4c4f-98be-52739a921bd2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:25Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:25 crc kubenswrapper[4881]: I1211 08:17:25.158225 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:25 crc kubenswrapper[4881]: I1211 08:17:25.158316 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:25 crc kubenswrapper[4881]: I1211 08:17:25.158359 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:25 crc kubenswrapper[4881]: I1211 08:17:25.158393 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:25 crc kubenswrapper[4881]: I1211 08:17:25.158414 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:25Z","lastTransitionTime":"2025-12-11T08:17:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:25 crc kubenswrapper[4881]: E1211 08:17:25.176566 4881 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:17:25Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T08:17:25Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:17:25Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T08:17:25Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:17:25Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T08:17:25Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:17:25Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T08:17:25Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"46a6300e-52c1-447e-8230-6662e62288c7\\\",\\\"systemUUID\\\":\\\"fece3d29-5045-4c4f-98be-52739a921bd2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:25Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:25 crc kubenswrapper[4881]: I1211 08:17:25.180773 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:25 crc kubenswrapper[4881]: I1211 08:17:25.180871 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:25 crc kubenswrapper[4881]: I1211 08:17:25.180992 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:25 crc kubenswrapper[4881]: I1211 08:17:25.181081 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:25 crc kubenswrapper[4881]: I1211 08:17:25.181140 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:25Z","lastTransitionTime":"2025-12-11T08:17:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:25 crc kubenswrapper[4881]: E1211 08:17:25.195827 4881 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:17:25Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T08:17:25Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:17:25Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T08:17:25Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:17:25Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T08:17:25Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:17:25Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T08:17:25Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"46a6300e-52c1-447e-8230-6662e62288c7\\\",\\\"systemUUID\\\":\\\"fece3d29-5045-4c4f-98be-52739a921bd2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:25Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:25 crc kubenswrapper[4881]: I1211 08:17:25.200393 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:25 crc kubenswrapper[4881]: I1211 08:17:25.200480 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:25 crc kubenswrapper[4881]: I1211 08:17:25.200499 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:25 crc kubenswrapper[4881]: I1211 08:17:25.200523 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:25 crc kubenswrapper[4881]: I1211 08:17:25.200572 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:25Z","lastTransitionTime":"2025-12-11T08:17:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:25 crc kubenswrapper[4881]: E1211 08:17:25.225216 4881 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:17:25Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T08:17:25Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:17:25Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T08:17:25Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:17:25Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T08:17:25Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:17:25Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T08:17:25Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"46a6300e-52c1-447e-8230-6662e62288c7\\\",\\\"systemUUID\\\":\\\"fece3d29-5045-4c4f-98be-52739a921bd2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:25Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:25 crc kubenswrapper[4881]: E1211 08:17:25.225366 4881 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Dec 11 08:17:25 crc kubenswrapper[4881]: I1211 08:17:25.226666 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:25 crc kubenswrapper[4881]: I1211 08:17:25.226696 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:25 crc kubenswrapper[4881]: I1211 08:17:25.226705 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:25 crc kubenswrapper[4881]: I1211 08:17:25.226719 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:25 crc kubenswrapper[4881]: I1211 08:17:25.226730 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:25Z","lastTransitionTime":"2025-12-11T08:17:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:25 crc kubenswrapper[4881]: I1211 08:17:25.329930 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:25 crc kubenswrapper[4881]: I1211 08:17:25.329970 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:25 crc kubenswrapper[4881]: I1211 08:17:25.329981 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:25 crc kubenswrapper[4881]: I1211 08:17:25.330023 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:25 crc kubenswrapper[4881]: I1211 08:17:25.330035 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:25Z","lastTransitionTime":"2025-12-11T08:17:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:25 crc kubenswrapper[4881]: I1211 08:17:25.432114 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:25 crc kubenswrapper[4881]: I1211 08:17:25.432171 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:25 crc kubenswrapper[4881]: I1211 08:17:25.432188 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:25 crc kubenswrapper[4881]: I1211 08:17:25.432214 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:25 crc kubenswrapper[4881]: I1211 08:17:25.432232 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:25Z","lastTransitionTime":"2025-12-11T08:17:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:25 crc kubenswrapper[4881]: I1211 08:17:25.535008 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:25 crc kubenswrapper[4881]: I1211 08:17:25.535093 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:25 crc kubenswrapper[4881]: I1211 08:17:25.535104 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:25 crc kubenswrapper[4881]: I1211 08:17:25.535125 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:25 crc kubenswrapper[4881]: I1211 08:17:25.535138 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:25Z","lastTransitionTime":"2025-12-11T08:17:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:25 crc kubenswrapper[4881]: I1211 08:17:25.638421 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:25 crc kubenswrapper[4881]: I1211 08:17:25.638499 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:25 crc kubenswrapper[4881]: I1211 08:17:25.638562 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:25 crc kubenswrapper[4881]: I1211 08:17:25.638598 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:25 crc kubenswrapper[4881]: I1211 08:17:25.638618 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:25Z","lastTransitionTime":"2025-12-11T08:17:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:25 crc kubenswrapper[4881]: I1211 08:17:25.741895 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:25 crc kubenswrapper[4881]: I1211 08:17:25.741937 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:25 crc kubenswrapper[4881]: I1211 08:17:25.741946 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:25 crc kubenswrapper[4881]: I1211 08:17:25.741960 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:25 crc kubenswrapper[4881]: I1211 08:17:25.741968 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:25Z","lastTransitionTime":"2025-12-11T08:17:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:25 crc kubenswrapper[4881]: I1211 08:17:25.844916 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:25 crc kubenswrapper[4881]: I1211 08:17:25.844971 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:25 crc kubenswrapper[4881]: I1211 08:17:25.844982 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:25 crc kubenswrapper[4881]: I1211 08:17:25.844999 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:25 crc kubenswrapper[4881]: I1211 08:17:25.845010 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:25Z","lastTransitionTime":"2025-12-11T08:17:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:25 crc kubenswrapper[4881]: I1211 08:17:25.948460 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:25 crc kubenswrapper[4881]: I1211 08:17:25.948503 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:25 crc kubenswrapper[4881]: I1211 08:17:25.948511 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:25 crc kubenswrapper[4881]: I1211 08:17:25.948525 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:25 crc kubenswrapper[4881]: I1211 08:17:25.948535 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:25Z","lastTransitionTime":"2025-12-11T08:17:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:26 crc kubenswrapper[4881]: I1211 08:17:26.004486 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 11 08:17:26 crc kubenswrapper[4881]: E1211 08:17:26.004677 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 11 08:17:26 crc kubenswrapper[4881]: I1211 08:17:26.051958 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:26 crc kubenswrapper[4881]: I1211 08:17:26.052025 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:26 crc kubenswrapper[4881]: I1211 08:17:26.052042 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:26 crc kubenswrapper[4881]: I1211 08:17:26.052067 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:26 crc kubenswrapper[4881]: I1211 08:17:26.052085 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:26Z","lastTransitionTime":"2025-12-11T08:17:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:26 crc kubenswrapper[4881]: I1211 08:17:26.154897 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:26 crc kubenswrapper[4881]: I1211 08:17:26.154955 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:26 crc kubenswrapper[4881]: I1211 08:17:26.154971 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:26 crc kubenswrapper[4881]: I1211 08:17:26.154996 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:26 crc kubenswrapper[4881]: I1211 08:17:26.155011 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:26Z","lastTransitionTime":"2025-12-11T08:17:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:26 crc kubenswrapper[4881]: I1211 08:17:26.257954 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:26 crc kubenswrapper[4881]: I1211 08:17:26.258016 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:26 crc kubenswrapper[4881]: I1211 08:17:26.258034 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:26 crc kubenswrapper[4881]: I1211 08:17:26.258058 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:26 crc kubenswrapper[4881]: I1211 08:17:26.258075 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:26Z","lastTransitionTime":"2025-12-11T08:17:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:26 crc kubenswrapper[4881]: I1211 08:17:26.361993 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:26 crc kubenswrapper[4881]: I1211 08:17:26.362072 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:26 crc kubenswrapper[4881]: I1211 08:17:26.362090 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:26 crc kubenswrapper[4881]: I1211 08:17:26.362115 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:26 crc kubenswrapper[4881]: I1211 08:17:26.362133 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:26Z","lastTransitionTime":"2025-12-11T08:17:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:26 crc kubenswrapper[4881]: I1211 08:17:26.464895 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:26 crc kubenswrapper[4881]: I1211 08:17:26.464935 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:26 crc kubenswrapper[4881]: I1211 08:17:26.464947 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:26 crc kubenswrapper[4881]: I1211 08:17:26.464963 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:26 crc kubenswrapper[4881]: I1211 08:17:26.464973 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:26Z","lastTransitionTime":"2025-12-11T08:17:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:26 crc kubenswrapper[4881]: I1211 08:17:26.571245 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:26 crc kubenswrapper[4881]: I1211 08:17:26.571325 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:26 crc kubenswrapper[4881]: I1211 08:17:26.571362 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:26 crc kubenswrapper[4881]: I1211 08:17:26.571383 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:26 crc kubenswrapper[4881]: I1211 08:17:26.571400 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:26Z","lastTransitionTime":"2025-12-11T08:17:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:26 crc kubenswrapper[4881]: I1211 08:17:26.673790 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:26 crc kubenswrapper[4881]: I1211 08:17:26.673834 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:26 crc kubenswrapper[4881]: I1211 08:17:26.673847 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:26 crc kubenswrapper[4881]: I1211 08:17:26.673910 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:26 crc kubenswrapper[4881]: I1211 08:17:26.673927 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:26Z","lastTransitionTime":"2025-12-11T08:17:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:26 crc kubenswrapper[4881]: I1211 08:17:26.776642 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:26 crc kubenswrapper[4881]: I1211 08:17:26.776694 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:26 crc kubenswrapper[4881]: I1211 08:17:26.776709 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:26 crc kubenswrapper[4881]: I1211 08:17:26.776723 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:26 crc kubenswrapper[4881]: I1211 08:17:26.776733 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:26Z","lastTransitionTime":"2025-12-11T08:17:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:26 crc kubenswrapper[4881]: I1211 08:17:26.879757 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:26 crc kubenswrapper[4881]: I1211 08:17:26.879802 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:26 crc kubenswrapper[4881]: I1211 08:17:26.879814 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:26 crc kubenswrapper[4881]: I1211 08:17:26.879831 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:26 crc kubenswrapper[4881]: I1211 08:17:26.879842 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:26Z","lastTransitionTime":"2025-12-11T08:17:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:26 crc kubenswrapper[4881]: I1211 08:17:26.982741 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:26 crc kubenswrapper[4881]: I1211 08:17:26.982824 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:26 crc kubenswrapper[4881]: I1211 08:17:26.982846 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:26 crc kubenswrapper[4881]: I1211 08:17:26.982876 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:26 crc kubenswrapper[4881]: I1211 08:17:26.982899 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:26Z","lastTransitionTime":"2025-12-11T08:17:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:27 crc kubenswrapper[4881]: I1211 08:17:27.004859 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 11 08:17:27 crc kubenswrapper[4881]: I1211 08:17:27.004908 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bzslm" Dec 11 08:17:27 crc kubenswrapper[4881]: I1211 08:17:27.005050 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 11 08:17:27 crc kubenswrapper[4881]: E1211 08:17:27.005248 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 11 08:17:27 crc kubenswrapper[4881]: E1211 08:17:27.005406 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bzslm" podUID="3a9b30f4-30b0-4b16-9a4a-135d2e7a9beb" Dec 11 08:17:27 crc kubenswrapper[4881]: E1211 08:17:27.005990 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 11 08:17:27 crc kubenswrapper[4881]: I1211 08:17:27.028605 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-etcd/etcd-crc"] Dec 11 08:17:27 crc kubenswrapper[4881]: I1211 08:17:27.086025 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:27 crc kubenswrapper[4881]: I1211 08:17:27.086070 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:27 crc kubenswrapper[4881]: I1211 08:17:27.086100 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:27 crc kubenswrapper[4881]: I1211 08:17:27.086117 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:27 crc kubenswrapper[4881]: I1211 08:17:27.086131 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:27Z","lastTransitionTime":"2025-12-11T08:17:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:27 crc kubenswrapper[4881]: I1211 08:17:27.189308 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:27 crc kubenswrapper[4881]: I1211 08:17:27.189421 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:27 crc kubenswrapper[4881]: I1211 08:17:27.189436 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:27 crc kubenswrapper[4881]: I1211 08:17:27.189457 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:27 crc kubenswrapper[4881]: I1211 08:17:27.189473 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:27Z","lastTransitionTime":"2025-12-11T08:17:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:27 crc kubenswrapper[4881]: I1211 08:17:27.292113 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:27 crc kubenswrapper[4881]: I1211 08:17:27.292158 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:27 crc kubenswrapper[4881]: I1211 08:17:27.292172 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:27 crc kubenswrapper[4881]: I1211 08:17:27.292194 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:27 crc kubenswrapper[4881]: I1211 08:17:27.292208 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:27Z","lastTransitionTime":"2025-12-11T08:17:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:27 crc kubenswrapper[4881]: I1211 08:17:27.395004 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:27 crc kubenswrapper[4881]: I1211 08:17:27.395065 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:27 crc kubenswrapper[4881]: I1211 08:17:27.395077 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:27 crc kubenswrapper[4881]: I1211 08:17:27.395094 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:27 crc kubenswrapper[4881]: I1211 08:17:27.395106 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:27Z","lastTransitionTime":"2025-12-11T08:17:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:27 crc kubenswrapper[4881]: I1211 08:17:27.498128 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:27 crc kubenswrapper[4881]: I1211 08:17:27.498193 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:27 crc kubenswrapper[4881]: I1211 08:17:27.498209 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:27 crc kubenswrapper[4881]: I1211 08:17:27.498241 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:27 crc kubenswrapper[4881]: I1211 08:17:27.498258 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:27Z","lastTransitionTime":"2025-12-11T08:17:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:27 crc kubenswrapper[4881]: I1211 08:17:27.602046 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:27 crc kubenswrapper[4881]: I1211 08:17:27.602133 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:27 crc kubenswrapper[4881]: I1211 08:17:27.602152 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:27 crc kubenswrapper[4881]: I1211 08:17:27.602177 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:27 crc kubenswrapper[4881]: I1211 08:17:27.602194 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:27Z","lastTransitionTime":"2025-12-11T08:17:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:27 crc kubenswrapper[4881]: I1211 08:17:27.704217 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:27 crc kubenswrapper[4881]: I1211 08:17:27.704259 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:27 crc kubenswrapper[4881]: I1211 08:17:27.704268 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:27 crc kubenswrapper[4881]: I1211 08:17:27.704283 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:27 crc kubenswrapper[4881]: I1211 08:17:27.704292 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:27Z","lastTransitionTime":"2025-12-11T08:17:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:27 crc kubenswrapper[4881]: I1211 08:17:27.807201 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:27 crc kubenswrapper[4881]: I1211 08:17:27.807242 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:27 crc kubenswrapper[4881]: I1211 08:17:27.807253 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:27 crc kubenswrapper[4881]: I1211 08:17:27.807272 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:27 crc kubenswrapper[4881]: I1211 08:17:27.807283 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:27Z","lastTransitionTime":"2025-12-11T08:17:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:27 crc kubenswrapper[4881]: I1211 08:17:27.913848 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:27 crc kubenswrapper[4881]: I1211 08:17:27.913903 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:27 crc kubenswrapper[4881]: I1211 08:17:27.913925 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:27 crc kubenswrapper[4881]: I1211 08:17:27.913950 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:27 crc kubenswrapper[4881]: I1211 08:17:27.913974 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:27Z","lastTransitionTime":"2025-12-11T08:17:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:28 crc kubenswrapper[4881]: I1211 08:17:28.005048 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 11 08:17:28 crc kubenswrapper[4881]: E1211 08:17:28.005187 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 11 08:17:28 crc kubenswrapper[4881]: I1211 08:17:28.017229 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:28 crc kubenswrapper[4881]: I1211 08:17:28.017266 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:28 crc kubenswrapper[4881]: I1211 08:17:28.017278 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:28 crc kubenswrapper[4881]: I1211 08:17:28.017292 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:28 crc kubenswrapper[4881]: I1211 08:17:28.017304 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:28Z","lastTransitionTime":"2025-12-11T08:17:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:28 crc kubenswrapper[4881]: I1211 08:17:28.120904 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:28 crc kubenswrapper[4881]: I1211 08:17:28.120968 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:28 crc kubenswrapper[4881]: I1211 08:17:28.120985 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:28 crc kubenswrapper[4881]: I1211 08:17:28.121006 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:28 crc kubenswrapper[4881]: I1211 08:17:28.121021 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:28Z","lastTransitionTime":"2025-12-11T08:17:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:28 crc kubenswrapper[4881]: I1211 08:17:28.223782 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:28 crc kubenswrapper[4881]: I1211 08:17:28.223977 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:28 crc kubenswrapper[4881]: I1211 08:17:28.223997 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:28 crc kubenswrapper[4881]: I1211 08:17:28.224061 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:28 crc kubenswrapper[4881]: I1211 08:17:28.224082 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:28Z","lastTransitionTime":"2025-12-11T08:17:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:28 crc kubenswrapper[4881]: I1211 08:17:28.327634 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:28 crc kubenswrapper[4881]: I1211 08:17:28.327694 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:28 crc kubenswrapper[4881]: I1211 08:17:28.327711 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:28 crc kubenswrapper[4881]: I1211 08:17:28.327771 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:28 crc kubenswrapper[4881]: I1211 08:17:28.327789 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:28Z","lastTransitionTime":"2025-12-11T08:17:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:28 crc kubenswrapper[4881]: I1211 08:17:28.430892 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:28 crc kubenswrapper[4881]: I1211 08:17:28.430960 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:28 crc kubenswrapper[4881]: I1211 08:17:28.430982 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:28 crc kubenswrapper[4881]: I1211 08:17:28.431012 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:28 crc kubenswrapper[4881]: I1211 08:17:28.431034 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:28Z","lastTransitionTime":"2025-12-11T08:17:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:28 crc kubenswrapper[4881]: I1211 08:17:28.534428 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:28 crc kubenswrapper[4881]: I1211 08:17:28.534488 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:28 crc kubenswrapper[4881]: I1211 08:17:28.534503 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:28 crc kubenswrapper[4881]: I1211 08:17:28.534527 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:28 crc kubenswrapper[4881]: I1211 08:17:28.534542 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:28Z","lastTransitionTime":"2025-12-11T08:17:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:28 crc kubenswrapper[4881]: I1211 08:17:28.637111 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:28 crc kubenswrapper[4881]: I1211 08:17:28.637158 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:28 crc kubenswrapper[4881]: I1211 08:17:28.637170 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:28 crc kubenswrapper[4881]: I1211 08:17:28.637187 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:28 crc kubenswrapper[4881]: I1211 08:17:28.637199 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:28Z","lastTransitionTime":"2025-12-11T08:17:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:28 crc kubenswrapper[4881]: I1211 08:17:28.739591 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:28 crc kubenswrapper[4881]: I1211 08:17:28.739638 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:28 crc kubenswrapper[4881]: I1211 08:17:28.739672 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:28 crc kubenswrapper[4881]: I1211 08:17:28.739686 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:28 crc kubenswrapper[4881]: I1211 08:17:28.739694 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:28Z","lastTransitionTime":"2025-12-11T08:17:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:28 crc kubenswrapper[4881]: I1211 08:17:28.842694 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:28 crc kubenswrapper[4881]: I1211 08:17:28.842776 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:28 crc kubenswrapper[4881]: I1211 08:17:28.842800 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:28 crc kubenswrapper[4881]: I1211 08:17:28.842835 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:28 crc kubenswrapper[4881]: I1211 08:17:28.842860 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:28Z","lastTransitionTime":"2025-12-11T08:17:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:28 crc kubenswrapper[4881]: I1211 08:17:28.945818 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:28 crc kubenswrapper[4881]: I1211 08:17:28.945862 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:28 crc kubenswrapper[4881]: I1211 08:17:28.945871 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:28 crc kubenswrapper[4881]: I1211 08:17:28.945887 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:28 crc kubenswrapper[4881]: I1211 08:17:28.945897 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:28Z","lastTransitionTime":"2025-12-11T08:17:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:29 crc kubenswrapper[4881]: I1211 08:17:29.005403 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 11 08:17:29 crc kubenswrapper[4881]: I1211 08:17:29.005554 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bzslm" Dec 11 08:17:29 crc kubenswrapper[4881]: I1211 08:17:29.005582 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 11 08:17:29 crc kubenswrapper[4881]: E1211 08:17:29.005736 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 11 08:17:29 crc kubenswrapper[4881]: E1211 08:17:29.005838 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bzslm" podUID="3a9b30f4-30b0-4b16-9a4a-135d2e7a9beb" Dec 11 08:17:29 crc kubenswrapper[4881]: E1211 08:17:29.006082 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 11 08:17:29 crc kubenswrapper[4881]: I1211 08:17:29.048590 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:29 crc kubenswrapper[4881]: I1211 08:17:29.048672 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:29 crc kubenswrapper[4881]: I1211 08:17:29.048696 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:29 crc kubenswrapper[4881]: I1211 08:17:29.048730 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:29 crc kubenswrapper[4881]: I1211 08:17:29.048753 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:29Z","lastTransitionTime":"2025-12-11T08:17:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:29 crc kubenswrapper[4881]: I1211 08:17:29.151388 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:29 crc kubenswrapper[4881]: I1211 08:17:29.151447 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:29 crc kubenswrapper[4881]: I1211 08:17:29.151461 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:29 crc kubenswrapper[4881]: I1211 08:17:29.151482 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:29 crc kubenswrapper[4881]: I1211 08:17:29.151496 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:29Z","lastTransitionTime":"2025-12-11T08:17:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:29 crc kubenswrapper[4881]: I1211 08:17:29.254225 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:29 crc kubenswrapper[4881]: I1211 08:17:29.254276 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:29 crc kubenswrapper[4881]: I1211 08:17:29.254291 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:29 crc kubenswrapper[4881]: I1211 08:17:29.254310 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:29 crc kubenswrapper[4881]: I1211 08:17:29.254323 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:29Z","lastTransitionTime":"2025-12-11T08:17:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:29 crc kubenswrapper[4881]: I1211 08:17:29.357656 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:29 crc kubenswrapper[4881]: I1211 08:17:29.357954 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:29 crc kubenswrapper[4881]: I1211 08:17:29.358038 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:29 crc kubenswrapper[4881]: I1211 08:17:29.358151 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:29 crc kubenswrapper[4881]: I1211 08:17:29.358232 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:29Z","lastTransitionTime":"2025-12-11T08:17:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:29 crc kubenswrapper[4881]: I1211 08:17:29.461811 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:29 crc kubenswrapper[4881]: I1211 08:17:29.461877 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:29 crc kubenswrapper[4881]: I1211 08:17:29.461897 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:29 crc kubenswrapper[4881]: I1211 08:17:29.461919 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:29 crc kubenswrapper[4881]: I1211 08:17:29.461936 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:29Z","lastTransitionTime":"2025-12-11T08:17:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:29 crc kubenswrapper[4881]: I1211 08:17:29.564766 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:29 crc kubenswrapper[4881]: I1211 08:17:29.564813 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:29 crc kubenswrapper[4881]: I1211 08:17:29.564825 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:29 crc kubenswrapper[4881]: I1211 08:17:29.564843 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:29 crc kubenswrapper[4881]: I1211 08:17:29.564857 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:29Z","lastTransitionTime":"2025-12-11T08:17:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:29 crc kubenswrapper[4881]: I1211 08:17:29.667973 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:29 crc kubenswrapper[4881]: I1211 08:17:29.668029 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:29 crc kubenswrapper[4881]: I1211 08:17:29.668045 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:29 crc kubenswrapper[4881]: I1211 08:17:29.668070 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:29 crc kubenswrapper[4881]: I1211 08:17:29.668095 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:29Z","lastTransitionTime":"2025-12-11T08:17:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:29 crc kubenswrapper[4881]: I1211 08:17:29.771243 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:29 crc kubenswrapper[4881]: I1211 08:17:29.771296 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:29 crc kubenswrapper[4881]: I1211 08:17:29.771314 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:29 crc kubenswrapper[4881]: I1211 08:17:29.771383 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:29 crc kubenswrapper[4881]: I1211 08:17:29.771422 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:29Z","lastTransitionTime":"2025-12-11T08:17:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:29 crc kubenswrapper[4881]: I1211 08:17:29.873829 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:29 crc kubenswrapper[4881]: I1211 08:17:29.873881 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:29 crc kubenswrapper[4881]: I1211 08:17:29.873899 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:29 crc kubenswrapper[4881]: I1211 08:17:29.873927 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:29 crc kubenswrapper[4881]: I1211 08:17:29.873955 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:29Z","lastTransitionTime":"2025-12-11T08:17:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:29 crc kubenswrapper[4881]: I1211 08:17:29.976869 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:29 crc kubenswrapper[4881]: I1211 08:17:29.976955 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:29 crc kubenswrapper[4881]: I1211 08:17:29.976987 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:29 crc kubenswrapper[4881]: I1211 08:17:29.977019 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:29 crc kubenswrapper[4881]: I1211 08:17:29.977040 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:29Z","lastTransitionTime":"2025-12-11T08:17:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:30 crc kubenswrapper[4881]: I1211 08:17:30.004983 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 11 08:17:30 crc kubenswrapper[4881]: E1211 08:17:30.005488 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 11 08:17:30 crc kubenswrapper[4881]: I1211 08:17:30.081154 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:30 crc kubenswrapper[4881]: I1211 08:17:30.081319 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:30 crc kubenswrapper[4881]: I1211 08:17:30.081425 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:30 crc kubenswrapper[4881]: I1211 08:17:30.081501 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:30 crc kubenswrapper[4881]: I1211 08:17:30.081529 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:30Z","lastTransitionTime":"2025-12-11T08:17:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:30 crc kubenswrapper[4881]: I1211 08:17:30.184712 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:30 crc kubenswrapper[4881]: I1211 08:17:30.184795 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:30 crc kubenswrapper[4881]: I1211 08:17:30.184821 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:30 crc kubenswrapper[4881]: I1211 08:17:30.184852 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:30 crc kubenswrapper[4881]: I1211 08:17:30.184874 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:30Z","lastTransitionTime":"2025-12-11T08:17:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:30 crc kubenswrapper[4881]: I1211 08:17:30.288038 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:30 crc kubenswrapper[4881]: I1211 08:17:30.288094 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:30 crc kubenswrapper[4881]: I1211 08:17:30.288109 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:30 crc kubenswrapper[4881]: I1211 08:17:30.288129 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:30 crc kubenswrapper[4881]: I1211 08:17:30.288143 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:30Z","lastTransitionTime":"2025-12-11T08:17:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:30 crc kubenswrapper[4881]: I1211 08:17:30.390792 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:30 crc kubenswrapper[4881]: I1211 08:17:30.390850 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:30 crc kubenswrapper[4881]: I1211 08:17:30.390866 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:30 crc kubenswrapper[4881]: I1211 08:17:30.390886 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:30 crc kubenswrapper[4881]: I1211 08:17:30.390900 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:30Z","lastTransitionTime":"2025-12-11T08:17:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:30 crc kubenswrapper[4881]: I1211 08:17:30.493957 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:30 crc kubenswrapper[4881]: I1211 08:17:30.494118 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:30 crc kubenswrapper[4881]: I1211 08:17:30.494151 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:30 crc kubenswrapper[4881]: I1211 08:17:30.494181 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:30 crc kubenswrapper[4881]: I1211 08:17:30.494202 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:30Z","lastTransitionTime":"2025-12-11T08:17:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:30 crc kubenswrapper[4881]: I1211 08:17:30.597846 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:30 crc kubenswrapper[4881]: I1211 08:17:30.597895 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:30 crc kubenswrapper[4881]: I1211 08:17:30.597906 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:30 crc kubenswrapper[4881]: I1211 08:17:30.597935 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:30 crc kubenswrapper[4881]: I1211 08:17:30.597955 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:30Z","lastTransitionTime":"2025-12-11T08:17:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:30 crc kubenswrapper[4881]: I1211 08:17:30.701172 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:30 crc kubenswrapper[4881]: I1211 08:17:30.701238 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:30 crc kubenswrapper[4881]: I1211 08:17:30.701253 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:30 crc kubenswrapper[4881]: I1211 08:17:30.701274 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:30 crc kubenswrapper[4881]: I1211 08:17:30.701329 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:30Z","lastTransitionTime":"2025-12-11T08:17:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:30 crc kubenswrapper[4881]: I1211 08:17:30.803633 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:30 crc kubenswrapper[4881]: I1211 08:17:30.803673 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:30 crc kubenswrapper[4881]: I1211 08:17:30.803687 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:30 crc kubenswrapper[4881]: I1211 08:17:30.803705 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:30 crc kubenswrapper[4881]: I1211 08:17:30.803721 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:30Z","lastTransitionTime":"2025-12-11T08:17:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:30 crc kubenswrapper[4881]: I1211 08:17:30.906589 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:30 crc kubenswrapper[4881]: I1211 08:17:30.906647 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:30 crc kubenswrapper[4881]: I1211 08:17:30.906661 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:30 crc kubenswrapper[4881]: I1211 08:17:30.906685 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:30 crc kubenswrapper[4881]: I1211 08:17:30.906706 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:30Z","lastTransitionTime":"2025-12-11T08:17:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:31 crc kubenswrapper[4881]: I1211 08:17:31.004797 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 11 08:17:31 crc kubenswrapper[4881]: I1211 08:17:31.004826 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 11 08:17:31 crc kubenswrapper[4881]: I1211 08:17:31.004938 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bzslm" Dec 11 08:17:31 crc kubenswrapper[4881]: E1211 08:17:31.005136 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 11 08:17:31 crc kubenswrapper[4881]: E1211 08:17:31.005244 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 11 08:17:31 crc kubenswrapper[4881]: E1211 08:17:31.005618 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bzslm" podUID="3a9b30f4-30b0-4b16-9a4a-135d2e7a9beb" Dec 11 08:17:31 crc kubenswrapper[4881]: I1211 08:17:31.009871 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:31 crc kubenswrapper[4881]: I1211 08:17:31.009942 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:31 crc kubenswrapper[4881]: I1211 08:17:31.009967 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:31 crc kubenswrapper[4881]: I1211 08:17:31.009996 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:31 crc kubenswrapper[4881]: I1211 08:17:31.010018 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:31Z","lastTransitionTime":"2025-12-11T08:17:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:31 crc kubenswrapper[4881]: I1211 08:17:31.112461 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:31 crc kubenswrapper[4881]: I1211 08:17:31.112534 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:31 crc kubenswrapper[4881]: I1211 08:17:31.112559 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:31 crc kubenswrapper[4881]: I1211 08:17:31.112592 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:31 crc kubenswrapper[4881]: I1211 08:17:31.112616 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:31Z","lastTransitionTime":"2025-12-11T08:17:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:31 crc kubenswrapper[4881]: I1211 08:17:31.215983 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:31 crc kubenswrapper[4881]: I1211 08:17:31.216056 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:31 crc kubenswrapper[4881]: I1211 08:17:31.216075 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:31 crc kubenswrapper[4881]: I1211 08:17:31.216100 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:31 crc kubenswrapper[4881]: I1211 08:17:31.216118 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:31Z","lastTransitionTime":"2025-12-11T08:17:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:31 crc kubenswrapper[4881]: I1211 08:17:31.318906 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:31 crc kubenswrapper[4881]: I1211 08:17:31.318939 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:31 crc kubenswrapper[4881]: I1211 08:17:31.318948 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:31 crc kubenswrapper[4881]: I1211 08:17:31.318962 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:31 crc kubenswrapper[4881]: I1211 08:17:31.318973 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:31Z","lastTransitionTime":"2025-12-11T08:17:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:31 crc kubenswrapper[4881]: I1211 08:17:31.422786 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:31 crc kubenswrapper[4881]: I1211 08:17:31.422848 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:31 crc kubenswrapper[4881]: I1211 08:17:31.422865 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:31 crc kubenswrapper[4881]: I1211 08:17:31.422888 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:31 crc kubenswrapper[4881]: I1211 08:17:31.422905 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:31Z","lastTransitionTime":"2025-12-11T08:17:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:31 crc kubenswrapper[4881]: I1211 08:17:31.526815 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:31 crc kubenswrapper[4881]: I1211 08:17:31.526902 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:31 crc kubenswrapper[4881]: I1211 08:17:31.526929 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:31 crc kubenswrapper[4881]: I1211 08:17:31.526960 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:31 crc kubenswrapper[4881]: I1211 08:17:31.526979 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:31Z","lastTransitionTime":"2025-12-11T08:17:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:31 crc kubenswrapper[4881]: I1211 08:17:31.629695 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:31 crc kubenswrapper[4881]: I1211 08:17:31.629773 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:31 crc kubenswrapper[4881]: I1211 08:17:31.629784 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:31 crc kubenswrapper[4881]: I1211 08:17:31.629800 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:31 crc kubenswrapper[4881]: I1211 08:17:31.629812 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:31Z","lastTransitionTime":"2025-12-11T08:17:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:31 crc kubenswrapper[4881]: I1211 08:17:31.732562 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:31 crc kubenswrapper[4881]: I1211 08:17:31.732624 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:31 crc kubenswrapper[4881]: I1211 08:17:31.732642 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:31 crc kubenswrapper[4881]: I1211 08:17:31.732668 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:31 crc kubenswrapper[4881]: I1211 08:17:31.732686 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:31Z","lastTransitionTime":"2025-12-11T08:17:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:31 crc kubenswrapper[4881]: I1211 08:17:31.835455 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:31 crc kubenswrapper[4881]: I1211 08:17:31.835499 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:31 crc kubenswrapper[4881]: I1211 08:17:31.835508 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:31 crc kubenswrapper[4881]: I1211 08:17:31.835523 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:31 crc kubenswrapper[4881]: I1211 08:17:31.835532 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:31Z","lastTransitionTime":"2025-12-11T08:17:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:31 crc kubenswrapper[4881]: I1211 08:17:31.938083 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:31 crc kubenswrapper[4881]: I1211 08:17:31.938126 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:31 crc kubenswrapper[4881]: I1211 08:17:31.938136 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:31 crc kubenswrapper[4881]: I1211 08:17:31.938149 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:31 crc kubenswrapper[4881]: I1211 08:17:31.938158 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:31Z","lastTransitionTime":"2025-12-11T08:17:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:32 crc kubenswrapper[4881]: I1211 08:17:32.005084 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 11 08:17:32 crc kubenswrapper[4881]: E1211 08:17:32.005255 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 11 08:17:32 crc kubenswrapper[4881]: I1211 08:17:32.041448 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:32 crc kubenswrapper[4881]: I1211 08:17:32.041483 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:32 crc kubenswrapper[4881]: I1211 08:17:32.041493 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:32 crc kubenswrapper[4881]: I1211 08:17:32.041509 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:32 crc kubenswrapper[4881]: I1211 08:17:32.041519 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:32Z","lastTransitionTime":"2025-12-11T08:17:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:32 crc kubenswrapper[4881]: I1211 08:17:32.145158 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:32 crc kubenswrapper[4881]: I1211 08:17:32.145228 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:32 crc kubenswrapper[4881]: I1211 08:17:32.145272 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:32 crc kubenswrapper[4881]: I1211 08:17:32.145302 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:32 crc kubenswrapper[4881]: I1211 08:17:32.145323 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:32Z","lastTransitionTime":"2025-12-11T08:17:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:32 crc kubenswrapper[4881]: I1211 08:17:32.247843 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:32 crc kubenswrapper[4881]: I1211 08:17:32.247908 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:32 crc kubenswrapper[4881]: I1211 08:17:32.247925 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:32 crc kubenswrapper[4881]: I1211 08:17:32.247947 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:32 crc kubenswrapper[4881]: I1211 08:17:32.247967 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:32Z","lastTransitionTime":"2025-12-11T08:17:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:32 crc kubenswrapper[4881]: I1211 08:17:32.351012 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:32 crc kubenswrapper[4881]: I1211 08:17:32.351090 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:32 crc kubenswrapper[4881]: I1211 08:17:32.351114 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:32 crc kubenswrapper[4881]: I1211 08:17:32.351143 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:32 crc kubenswrapper[4881]: I1211 08:17:32.351161 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:32Z","lastTransitionTime":"2025-12-11T08:17:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:32 crc kubenswrapper[4881]: I1211 08:17:32.453534 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:32 crc kubenswrapper[4881]: I1211 08:17:32.453996 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:32 crc kubenswrapper[4881]: I1211 08:17:32.454015 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:32 crc kubenswrapper[4881]: I1211 08:17:32.454039 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:32 crc kubenswrapper[4881]: I1211 08:17:32.454056 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:32Z","lastTransitionTime":"2025-12-11T08:17:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:32 crc kubenswrapper[4881]: I1211 08:17:32.556528 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:32 crc kubenswrapper[4881]: I1211 08:17:32.556571 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:32 crc kubenswrapper[4881]: I1211 08:17:32.556582 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:32 crc kubenswrapper[4881]: I1211 08:17:32.556598 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:32 crc kubenswrapper[4881]: I1211 08:17:32.556609 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:32Z","lastTransitionTime":"2025-12-11T08:17:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:32 crc kubenswrapper[4881]: I1211 08:17:32.659309 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:32 crc kubenswrapper[4881]: I1211 08:17:32.659447 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:32 crc kubenswrapper[4881]: I1211 08:17:32.659468 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:32 crc kubenswrapper[4881]: I1211 08:17:32.659494 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:32 crc kubenswrapper[4881]: I1211 08:17:32.659511 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:32Z","lastTransitionTime":"2025-12-11T08:17:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:32 crc kubenswrapper[4881]: I1211 08:17:32.762749 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:32 crc kubenswrapper[4881]: I1211 08:17:32.762824 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:32 crc kubenswrapper[4881]: I1211 08:17:32.762847 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:32 crc kubenswrapper[4881]: I1211 08:17:32.762877 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:32 crc kubenswrapper[4881]: I1211 08:17:32.762899 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:32Z","lastTransitionTime":"2025-12-11T08:17:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:32 crc kubenswrapper[4881]: I1211 08:17:32.865733 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:32 crc kubenswrapper[4881]: I1211 08:17:32.865792 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:32 crc kubenswrapper[4881]: I1211 08:17:32.865810 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:32 crc kubenswrapper[4881]: I1211 08:17:32.865833 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:32 crc kubenswrapper[4881]: I1211 08:17:32.865851 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:32Z","lastTransitionTime":"2025-12-11T08:17:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:32 crc kubenswrapper[4881]: I1211 08:17:32.969374 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:32 crc kubenswrapper[4881]: I1211 08:17:32.969456 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:32 crc kubenswrapper[4881]: I1211 08:17:32.969480 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:32 crc kubenswrapper[4881]: I1211 08:17:32.969508 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:32 crc kubenswrapper[4881]: I1211 08:17:32.969525 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:32Z","lastTransitionTime":"2025-12-11T08:17:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:33 crc kubenswrapper[4881]: I1211 08:17:33.004811 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bzslm" Dec 11 08:17:33 crc kubenswrapper[4881]: I1211 08:17:33.005034 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 11 08:17:33 crc kubenswrapper[4881]: E1211 08:17:33.005218 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bzslm" podUID="3a9b30f4-30b0-4b16-9a4a-135d2e7a9beb" Dec 11 08:17:33 crc kubenswrapper[4881]: I1211 08:17:33.005234 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 11 08:17:33 crc kubenswrapper[4881]: E1211 08:17:33.005418 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 11 08:17:33 crc kubenswrapper[4881]: E1211 08:17:33.005642 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 11 08:17:33 crc kubenswrapper[4881]: I1211 08:17:33.043586 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6aee5a89-07ec-4856-8355-8577073ba5d6\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://75e9bba95e748021cbf04000b3b00e27d8f75cd0d9e2409761d8a045c6eb3e28\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6db11aa4e0b3067beae81e57c640307f34554f8a692c588951dab34fc753921d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bbd836df49f273865e522c55b4d5e453be84c1ed5e8276a17f82b03e74521df3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://968879a7801be5095d46d91a9fbbf2e5d951fda34f49e034cc84262231cbea44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6b11c3ebb26dbe8ee0a22dec8ee186c2dd013cc3566a9bd14a49cc7e24c2213d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba57aaaaf5a0258b44859937fd4deececf010c96c7753f4b97e69ae2b11584e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba57aaaaf5a0258b44859937fd4deececf010c96c7753f4b97e69ae2b11584e1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:15:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://16c230355458e43c12c58fbd75de648c7f553961e1202314e793170f3fb03fb3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://16c230355458e43c12c58fbd75de648c7f553961e1202314e793170f3fb03fb3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:15:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://734b87f641d1c4082b0611017a6e2d2a1b3ae995238cde641391325f8caaafe9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://734b87f641d1c4082b0611017a6e2d2a1b3ae995238cde641391325f8caaafe9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:15:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:15:53Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:33Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:33 crc kubenswrapper[4881]: I1211 08:17:33.056937 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9c3d898f-552a-471b-affd-a0489efc782e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9251be56904cea5a0c24898fa10c30718080f3fe77804166d664b3f40235c80a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e3ddede9a1ba051ed4eca12d7cca1c3309b223ca8c01eb7d0a9499ab012531ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e3ddede9a1ba051ed4eca12d7cca1c3309b223ca8c01eb7d0a9499ab012531ff\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:15:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:15:53Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:33Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:33 crc kubenswrapper[4881]: I1211 08:17:33.073512 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:33 crc kubenswrapper[4881]: I1211 08:17:33.073584 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:33 crc kubenswrapper[4881]: I1211 08:17:33.073600 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:33 crc kubenswrapper[4881]: I1211 08:17:33.073619 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:33 crc kubenswrapper[4881]: I1211 08:17:33.073632 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:33Z","lastTransitionTime":"2025-12-11T08:17:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:33 crc kubenswrapper[4881]: I1211 08:17:33.079016 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wf8q8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f14cc110-e74f-4cb7-a998-041e3f9b537b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3fece2162ea78bf051c94f0f8adb0b194d05e5d3c5c25cb4e3674096d1204079\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3715d1eacbf7a843b7a1a0e83dee159ddbe5bf62812e94bf5f74f4677da2617b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c23a70dbe6b32be7c8a0c3799f7dd2323f0dcc86d7d58ee3e140cda4ffbb03f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6d8574de8e2431b2aca7954b9a0498e353c00d982b4384c1f198b2099fe527a6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5a0cb039f4c38da8b633dfcd2e0d23afbc3607b41f04d3c230aee5f86cba79b5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5b8e9dda68edf7ec8848da4eb851807ad0dc10a086fb6129e3a22f9a35e368ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4613d67b59382894af01ae31e8f1a60355dd4362ea6499cdc10741d2abfd078b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4613d67b59382894af01ae31e8f1a60355dd4362ea6499cdc10741d2abfd078b\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-11T08:17:22Z\\\",\\\"message\\\":\\\"s:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}, built lbs: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-machine-api/cluster-autoscaler-operator_TCP_cluster\\\\\\\", UUID:\\\\\\\"\\\\\\\", Protocol:\\\\\\\"TCP\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-machine-api/cluster-autoscaler-operator\\\\\\\"}, Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.5.245\\\\\\\", Port:443, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}, services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.5.245\\\\\\\", Port:9192, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nI1211 08:17:22.148449 7001 obj_retry.go:386] Retry successful for *v1.Pod openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-kqw5r after 0 failed attempt(s)\\\\nF1211 08:17:22.148457 7001 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-11T08:17:21Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-wf8q8_openshift-ovn-kubernetes(f14cc110-e74f-4cb7-a998-041e3f9b537b)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://88bd335385bc696076b9b2719bfbd27b294003397c545f24ee2c50cd854fcc28\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1794069e7175bc0ce9dc315807ad766fd28fb46e11ed6e7addc3199cccaea63a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1794069e7175bc0ce9dc315807ad766fd28fb46e11ed6e7addc3199cccaea63a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:21Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-wf8q8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:33Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:33 crc kubenswrapper[4881]: I1211 08:17:33.093471 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"712c62b1-8ccd-4aa3-bcf9-678e361454ec\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f4bc17ebdc46bdf12b24515e7a055043646bcea98fe47ea7f5c44e679d02bde9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://daf982782f40db65e6a33e1e1bcfd01493d5a385c52013546d38d921ac4dcad3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3ceeda38e245a549205af6a7f2468c682ca1420574f2d3b375dfba7d93a3c669\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://204b876ced5657df64e1a6074ba7ddbb3e286df0e12630d709ff64d2905b8487\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ff7ad52ec7f42bde82d0f5b9a6ef051a56c37004bb4172dbc504afde1e5f69bf\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-11T08:16:03Z\\\",\\\"message\\\":\\\"W1211 08:16:02.420586 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1211 08:16:02.420959 1 crypto.go:601] Generating new CA for check-endpoints-signer@1765440962 cert, and key in /tmp/serving-cert-2611071911/serving-signer.crt, /tmp/serving-cert-2611071911/serving-signer.key\\\\nI1211 08:16:03.011861 1 observer_polling.go:159] Starting file observer\\\\nW1211 08:16:03.024183 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1211 08:16:03.024367 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1211 08:16:03.024984 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2611071911/tls.crt::/tmp/serving-cert-2611071911/tls.key\\\\\\\"\\\\nF1211 08:16:03.405647 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:00Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6735b5cd08055deeae614855e5de8a056b276c8006a16c45f2a0fce97da172cb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:57Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70fb9b6123864b3652eabbb833b18032659a0c038f4e5857e7363a3776022396\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://70fb9b6123864b3652eabbb833b18032659a0c038f4e5857e7363a3776022396\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:15:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:15:53Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:33Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:33 crc kubenswrapper[4881]: I1211 08:17:33.108569 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://02d46e407feeba96230ff44b31f513e09434cdc3378124322398587ebda11b4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:33Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:33 crc kubenswrapper[4881]: I1211 08:17:33.124569 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:33Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:33 crc kubenswrapper[4881]: I1211 08:17:33.142315 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:20Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:20Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://35b9fdb7a609d6a955abda7e49e7bceec55811d33e14295ba7258c0e085cc517\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:33Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:33 crc kubenswrapper[4881]: I1211 08:17:33.160877 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-cwhxk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"afd0cc21-e31c-47c9-a598-cd93dde96121\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0f0031f6a7c33c9d1c538ec8384083ab5bc7b7fcda9c01c8e5c922a7e375a7e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://44d5ca755969b2434115259f141a9f8f17da8e0c1bef5b61c48475e09dc91601\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://44d5ca755969b2434115259f141a9f8f17da8e0c1bef5b61c48475e09dc91601\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://18c881494d5238565dfdb69e61ecdebb725f60a14c0c651e0bcc65cb2aff297a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://18c881494d5238565dfdb69e61ecdebb725f60a14c0c651e0bcc65cb2aff297a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a13ad4683fb404bf7d973528489e3c39e33c2991d86a80875dab067023626584\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a13ad4683fb404bf7d973528489e3c39e33c2991d86a80875dab067023626584\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b84c7832c1d1df15202c858e63e3d97319f2014f413f98a9d332a3f89a11210\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b84c7832c1d1df15202c858e63e3d97319f2014f413f98a9d332a3f89a11210\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb6df865673b8073f0fa13525833ae978ff17856a9b4149d549e177bc5d9a494\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bb6df865673b8073f0fa13525833ae978ff17856a9b4149d549e177bc5d9a494\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8a6eb6e7ccd64f8d181fe104b44a4299f9b28d0409e46175da23144ef88e4129\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8a6eb6e7ccd64f8d181fe104b44a4299f9b28d0409e46175da23144ef88e4129\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-cwhxk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:33Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:33 crc kubenswrapper[4881]: I1211 08:17:33.174260 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"56d69133-af36-4cbd-af7d-3a58cc4dd8ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b73c9948845a28d7c3cb81e94d962a4f11ac061f4108a18f46d451a554e9adb7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7rwh4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b0321657968185975569a4bf8037ab956988b13493feec0be9c439fbe6c20707\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7rwh4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:21Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-z9nnh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:33Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:33 crc kubenswrapper[4881]: I1211 08:17:33.176722 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:33 crc kubenswrapper[4881]: I1211 08:17:33.176749 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:33 crc kubenswrapper[4881]: I1211 08:17:33.176759 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:33 crc kubenswrapper[4881]: I1211 08:17:33.176773 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:33 crc kubenswrapper[4881]: I1211 08:17:33.176782 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:33Z","lastTransitionTime":"2025-12-11T08:17:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:33 crc kubenswrapper[4881]: I1211 08:17:33.187710 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-kqw5r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"71774618-27c8-499d-83d9-e88693c86758\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://136c88f0cab9d7a47a8c5925592e8cc09b62e515258829cc096df9ad3bb30615\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6btfk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c48e4553e3eb00f1e725dd9cacf2b25360544772ef82d6846361a4b7c44c6244\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6btfk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-kqw5r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:33Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:33 crc kubenswrapper[4881]: I1211 08:17:33.201301 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-bzslm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3a9b30f4-30b0-4b16-9a4a-135d2e7a9beb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:35Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:35Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:35Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-k86hj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-k86hj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:35Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-bzslm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:33Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:33 crc kubenswrapper[4881]: I1211 08:17:33.214736 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:33Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:33 crc kubenswrapper[4881]: I1211 08:17:33.231696 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6ec4e8a28a722463111eeb961e6e80d1f8ef1b776c40fc212a2ca5aaf0c43b63\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6eff6a04afb607c6248352793d1205ae80c194a77222a234d586925a8ca8cd00\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:33Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:33 crc kubenswrapper[4881]: I1211 08:17:33.245805 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:33Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:33 crc kubenswrapper[4881]: I1211 08:17:33.262562 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-g8jhd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"368e635e-0e63-4202-b9e4-4a3a85c6f30c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:17:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:17:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://472f02e542c67bbd11145db9b59f2bae1dc688d45e95099b17a33fa1e27dbac8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f17445543c4ae20340b00c834ecfa381b0d9624196a6da869ea823ee99df132f\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-11T08:17:08Z\\\",\\\"message\\\":\\\"2025-12-11T08:16:23+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_76e76f4f-e849-4b50-a2b6-debceb43335b\\\\n2025-12-11T08:16:23+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_76e76f4f-e849-4b50-a2b6-debceb43335b to /host/opt/cni/bin/\\\\n2025-12-11T08:16:23Z [verbose] multus-daemon started\\\\n2025-12-11T08:16:23Z [verbose] Readiness Indicator file check\\\\n2025-12-11T08:17:08Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:21Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:17:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zrsnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-g8jhd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:33Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:33 crc kubenswrapper[4881]: I1211 08:17:33.280065 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e233ddb-7c1f-4b3d-a111-de8cdc7ccd18\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://221f8c696ca3c5ab5dc2025890b5279fd406a8dfdd4ea482bb12f0c5d304255d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3dec421d26a9373c52bf2fb9434cc3f3b373f409b1d86ec1effca7534acb7262\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7cb4176d384609eb549c9f00057256bd34053ca3eca229c20cd250b93f87f910\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://91541795f1dd997a0d977904a6a3cbeae8b66b8bb57cde8dac344d3703352e1a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:15:53Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:33Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:33 crc kubenswrapper[4881]: I1211 08:17:33.281232 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:33 crc kubenswrapper[4881]: I1211 08:17:33.281417 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:33 crc kubenswrapper[4881]: I1211 08:17:33.281554 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:33 crc kubenswrapper[4881]: I1211 08:17:33.281740 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:33 crc kubenswrapper[4881]: I1211 08:17:33.281762 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:33Z","lastTransitionTime":"2025-12-11T08:17:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:33 crc kubenswrapper[4881]: I1211 08:17:33.297161 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6c507af2-56f1-45a6-ab18-c6d27c4e3f85\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ae6cead982609bcea650eccda0c847ae1c568061104bda0584984b9d62481b0e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bdda482f14afc374a3ba38a02b0b93bab4faf300cc47f7ee39ef9183af904f22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a2846dae5b0d424649d68bdbeb34b7791ee2a8e1b05a0309876cd0d79c5fca01\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://89d2f7f7873955f51814a4a24f42fc5fff4c2c08480d7ff81bfc18fd73102d7d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://89d2f7f7873955f51814a4a24f42fc5fff4c2c08480d7ff81bfc18fd73102d7d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:15:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:15:54Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:15:53Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:33Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:33 crc kubenswrapper[4881]: I1211 08:17:33.311779 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-847k7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1711214f-683a-4a2e-b9b8-f3fd2dd6dbc2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0ad11e04da8a6d33e46ca8c5f28be920d52c7c2ce22b4539fc8b7b0b85edb425\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2txkz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:20Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-847k7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:33Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:33 crc kubenswrapper[4881]: I1211 08:17:33.325962 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-jzsv5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aacce2d2-7fd3-439a-b46b-51858d3de240\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6277956e6d4873b4105ce03e358e2857371feead7eb74e2b7412ad7ed4de486\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tlbfj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:24Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-jzsv5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:33Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:33 crc kubenswrapper[4881]: I1211 08:17:33.385186 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:33 crc kubenswrapper[4881]: I1211 08:17:33.385240 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:33 crc kubenswrapper[4881]: I1211 08:17:33.385257 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:33 crc kubenswrapper[4881]: I1211 08:17:33.385280 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:33 crc kubenswrapper[4881]: I1211 08:17:33.385297 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:33Z","lastTransitionTime":"2025-12-11T08:17:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:33 crc kubenswrapper[4881]: I1211 08:17:33.487715 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:33 crc kubenswrapper[4881]: I1211 08:17:33.487779 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:33 crc kubenswrapper[4881]: I1211 08:17:33.487798 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:33 crc kubenswrapper[4881]: I1211 08:17:33.487824 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:33 crc kubenswrapper[4881]: I1211 08:17:33.487842 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:33Z","lastTransitionTime":"2025-12-11T08:17:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:33 crc kubenswrapper[4881]: I1211 08:17:33.590615 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:33 crc kubenswrapper[4881]: I1211 08:17:33.590649 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:33 crc kubenswrapper[4881]: I1211 08:17:33.590661 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:33 crc kubenswrapper[4881]: I1211 08:17:33.590676 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:33 crc kubenswrapper[4881]: I1211 08:17:33.590687 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:33Z","lastTransitionTime":"2025-12-11T08:17:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:33 crc kubenswrapper[4881]: I1211 08:17:33.694067 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:33 crc kubenswrapper[4881]: I1211 08:17:33.694121 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:33 crc kubenswrapper[4881]: I1211 08:17:33.694138 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:33 crc kubenswrapper[4881]: I1211 08:17:33.694161 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:33 crc kubenswrapper[4881]: I1211 08:17:33.694177 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:33Z","lastTransitionTime":"2025-12-11T08:17:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:33 crc kubenswrapper[4881]: I1211 08:17:33.797566 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:33 crc kubenswrapper[4881]: I1211 08:17:33.797646 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:33 crc kubenswrapper[4881]: I1211 08:17:33.797683 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:33 crc kubenswrapper[4881]: I1211 08:17:33.797782 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:33 crc kubenswrapper[4881]: I1211 08:17:33.797800 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:33Z","lastTransitionTime":"2025-12-11T08:17:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:33 crc kubenswrapper[4881]: I1211 08:17:33.901038 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:33 crc kubenswrapper[4881]: I1211 08:17:33.901106 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:33 crc kubenswrapper[4881]: I1211 08:17:33.901129 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:33 crc kubenswrapper[4881]: I1211 08:17:33.901160 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:33 crc kubenswrapper[4881]: I1211 08:17:33.901199 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:33Z","lastTransitionTime":"2025-12-11T08:17:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:34 crc kubenswrapper[4881]: I1211 08:17:34.004437 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 11 08:17:34 crc kubenswrapper[4881]: E1211 08:17:34.004710 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 11 08:17:34 crc kubenswrapper[4881]: I1211 08:17:34.004786 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:34 crc kubenswrapper[4881]: I1211 08:17:34.004878 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:34 crc kubenswrapper[4881]: I1211 08:17:34.004904 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:34 crc kubenswrapper[4881]: I1211 08:17:34.004992 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:34 crc kubenswrapper[4881]: I1211 08:17:34.005063 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:34Z","lastTransitionTime":"2025-12-11T08:17:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:34 crc kubenswrapper[4881]: I1211 08:17:34.107901 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:34 crc kubenswrapper[4881]: I1211 08:17:34.107969 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:34 crc kubenswrapper[4881]: I1211 08:17:34.108008 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:34 crc kubenswrapper[4881]: I1211 08:17:34.108039 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:34 crc kubenswrapper[4881]: I1211 08:17:34.108059 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:34Z","lastTransitionTime":"2025-12-11T08:17:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:34 crc kubenswrapper[4881]: I1211 08:17:34.210731 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:34 crc kubenswrapper[4881]: I1211 08:17:34.210785 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:34 crc kubenswrapper[4881]: I1211 08:17:34.210799 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:34 crc kubenswrapper[4881]: I1211 08:17:34.210822 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:34 crc kubenswrapper[4881]: I1211 08:17:34.210836 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:34Z","lastTransitionTime":"2025-12-11T08:17:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:34 crc kubenswrapper[4881]: I1211 08:17:34.313429 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:34 crc kubenswrapper[4881]: I1211 08:17:34.313499 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:34 crc kubenswrapper[4881]: I1211 08:17:34.313521 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:34 crc kubenswrapper[4881]: I1211 08:17:34.313551 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:34 crc kubenswrapper[4881]: I1211 08:17:34.313574 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:34Z","lastTransitionTime":"2025-12-11T08:17:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:34 crc kubenswrapper[4881]: I1211 08:17:34.416239 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:34 crc kubenswrapper[4881]: I1211 08:17:34.416320 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:34 crc kubenswrapper[4881]: I1211 08:17:34.416399 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:34 crc kubenswrapper[4881]: I1211 08:17:34.416435 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:34 crc kubenswrapper[4881]: I1211 08:17:34.416460 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:34Z","lastTransitionTime":"2025-12-11T08:17:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:34 crc kubenswrapper[4881]: I1211 08:17:34.520191 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:34 crc kubenswrapper[4881]: I1211 08:17:34.520270 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:34 crc kubenswrapper[4881]: I1211 08:17:34.520294 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:34 crc kubenswrapper[4881]: I1211 08:17:34.520363 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:34 crc kubenswrapper[4881]: I1211 08:17:34.520388 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:34Z","lastTransitionTime":"2025-12-11T08:17:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:34 crc kubenswrapper[4881]: I1211 08:17:34.623756 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:34 crc kubenswrapper[4881]: I1211 08:17:34.623815 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:34 crc kubenswrapper[4881]: I1211 08:17:34.623831 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:34 crc kubenswrapper[4881]: I1211 08:17:34.623857 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:34 crc kubenswrapper[4881]: I1211 08:17:34.623875 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:34Z","lastTransitionTime":"2025-12-11T08:17:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:34 crc kubenswrapper[4881]: I1211 08:17:34.727380 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:34 crc kubenswrapper[4881]: I1211 08:17:34.727454 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:34 crc kubenswrapper[4881]: I1211 08:17:34.727475 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:34 crc kubenswrapper[4881]: I1211 08:17:34.727507 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:34 crc kubenswrapper[4881]: I1211 08:17:34.727532 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:34Z","lastTransitionTime":"2025-12-11T08:17:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:34 crc kubenswrapper[4881]: I1211 08:17:34.830460 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:34 crc kubenswrapper[4881]: I1211 08:17:34.830542 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:34 crc kubenswrapper[4881]: I1211 08:17:34.830561 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:34 crc kubenswrapper[4881]: I1211 08:17:34.830586 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:34 crc kubenswrapper[4881]: I1211 08:17:34.830603 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:34Z","lastTransitionTime":"2025-12-11T08:17:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:34 crc kubenswrapper[4881]: I1211 08:17:34.946813 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:34 crc kubenswrapper[4881]: I1211 08:17:34.946876 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:34 crc kubenswrapper[4881]: I1211 08:17:34.946899 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:34 crc kubenswrapper[4881]: I1211 08:17:34.946928 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:34 crc kubenswrapper[4881]: I1211 08:17:34.946953 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:34Z","lastTransitionTime":"2025-12-11T08:17:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:35 crc kubenswrapper[4881]: I1211 08:17:35.004455 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bzslm" Dec 11 08:17:35 crc kubenswrapper[4881]: I1211 08:17:35.004529 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 11 08:17:35 crc kubenswrapper[4881]: E1211 08:17:35.004660 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bzslm" podUID="3a9b30f4-30b0-4b16-9a4a-135d2e7a9beb" Dec 11 08:17:35 crc kubenswrapper[4881]: I1211 08:17:35.004757 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 11 08:17:35 crc kubenswrapper[4881]: E1211 08:17:35.004893 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 11 08:17:35 crc kubenswrapper[4881]: E1211 08:17:35.005008 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 11 08:17:35 crc kubenswrapper[4881]: I1211 08:17:35.049685 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:35 crc kubenswrapper[4881]: I1211 08:17:35.049733 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:35 crc kubenswrapper[4881]: I1211 08:17:35.049742 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:35 crc kubenswrapper[4881]: I1211 08:17:35.049760 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:35 crc kubenswrapper[4881]: I1211 08:17:35.049772 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:35Z","lastTransitionTime":"2025-12-11T08:17:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:35 crc kubenswrapper[4881]: I1211 08:17:35.153735 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:35 crc kubenswrapper[4881]: I1211 08:17:35.153789 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:35 crc kubenswrapper[4881]: I1211 08:17:35.153801 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:35 crc kubenswrapper[4881]: I1211 08:17:35.153823 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:35 crc kubenswrapper[4881]: I1211 08:17:35.153837 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:35Z","lastTransitionTime":"2025-12-11T08:17:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:35 crc kubenswrapper[4881]: I1211 08:17:35.257447 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:35 crc kubenswrapper[4881]: I1211 08:17:35.257517 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:35 crc kubenswrapper[4881]: I1211 08:17:35.257540 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:35 crc kubenswrapper[4881]: I1211 08:17:35.257568 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:35 crc kubenswrapper[4881]: I1211 08:17:35.257589 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:35Z","lastTransitionTime":"2025-12-11T08:17:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:35 crc kubenswrapper[4881]: I1211 08:17:35.361967 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:35 crc kubenswrapper[4881]: I1211 08:17:35.362411 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:35 crc kubenswrapper[4881]: I1211 08:17:35.362447 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:35 crc kubenswrapper[4881]: I1211 08:17:35.362478 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:35 crc kubenswrapper[4881]: I1211 08:17:35.362504 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:35Z","lastTransitionTime":"2025-12-11T08:17:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:35 crc kubenswrapper[4881]: I1211 08:17:35.466427 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:35 crc kubenswrapper[4881]: I1211 08:17:35.466490 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:35 crc kubenswrapper[4881]: I1211 08:17:35.466507 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:35 crc kubenswrapper[4881]: I1211 08:17:35.466529 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:35 crc kubenswrapper[4881]: I1211 08:17:35.466548 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:35Z","lastTransitionTime":"2025-12-11T08:17:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:35 crc kubenswrapper[4881]: I1211 08:17:35.570026 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:35 crc kubenswrapper[4881]: I1211 08:17:35.570082 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:35 crc kubenswrapper[4881]: I1211 08:17:35.570099 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:35 crc kubenswrapper[4881]: I1211 08:17:35.570122 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:35 crc kubenswrapper[4881]: I1211 08:17:35.570140 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:35Z","lastTransitionTime":"2025-12-11T08:17:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:35 crc kubenswrapper[4881]: I1211 08:17:35.580327 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:35 crc kubenswrapper[4881]: I1211 08:17:35.580424 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:35 crc kubenswrapper[4881]: I1211 08:17:35.580442 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:35 crc kubenswrapper[4881]: I1211 08:17:35.580464 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:35 crc kubenswrapper[4881]: I1211 08:17:35.580479 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:35Z","lastTransitionTime":"2025-12-11T08:17:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:35 crc kubenswrapper[4881]: E1211 08:17:35.602433 4881 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:17:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T08:17:35Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:17:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T08:17:35Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:17:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T08:17:35Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:17:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T08:17:35Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"46a6300e-52c1-447e-8230-6662e62288c7\\\",\\\"systemUUID\\\":\\\"fece3d29-5045-4c4f-98be-52739a921bd2\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:35Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:35 crc kubenswrapper[4881]: I1211 08:17:35.608959 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:35 crc kubenswrapper[4881]: I1211 08:17:35.609123 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:35 crc kubenswrapper[4881]: I1211 08:17:35.609156 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:35 crc kubenswrapper[4881]: I1211 08:17:35.609187 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:35 crc kubenswrapper[4881]: I1211 08:17:35.609210 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:35Z","lastTransitionTime":"2025-12-11T08:17:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:35 crc kubenswrapper[4881]: E1211 08:17:35.627817 4881 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:17:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T08:17:35Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:17:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T08:17:35Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:17:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T08:17:35Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:17:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T08:17:35Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"46a6300e-52c1-447e-8230-6662e62288c7\\\",\\\"systemUUID\\\":\\\"fece3d29-5045-4c4f-98be-52739a921bd2\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:35Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:35 crc kubenswrapper[4881]: I1211 08:17:35.633660 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:35 crc kubenswrapper[4881]: I1211 08:17:35.633704 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:35 crc kubenswrapper[4881]: I1211 08:17:35.633714 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:35 crc kubenswrapper[4881]: I1211 08:17:35.633732 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:35 crc kubenswrapper[4881]: I1211 08:17:35.633741 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:35Z","lastTransitionTime":"2025-12-11T08:17:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:35 crc kubenswrapper[4881]: E1211 08:17:35.650692 4881 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:17:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T08:17:35Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:17:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T08:17:35Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:17:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T08:17:35Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:17:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T08:17:35Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"46a6300e-52c1-447e-8230-6662e62288c7\\\",\\\"systemUUID\\\":\\\"fece3d29-5045-4c4f-98be-52739a921bd2\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:35Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:35 crc kubenswrapper[4881]: I1211 08:17:35.655865 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:35 crc kubenswrapper[4881]: I1211 08:17:35.655962 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:35 crc kubenswrapper[4881]: I1211 08:17:35.655987 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:35 crc kubenswrapper[4881]: I1211 08:17:35.656018 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:35 crc kubenswrapper[4881]: I1211 08:17:35.656040 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:35Z","lastTransitionTime":"2025-12-11T08:17:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:35 crc kubenswrapper[4881]: E1211 08:17:35.673423 4881 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:17:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T08:17:35Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:17:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T08:17:35Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:17:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T08:17:35Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:17:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T08:17:35Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"46a6300e-52c1-447e-8230-6662e62288c7\\\",\\\"systemUUID\\\":\\\"fece3d29-5045-4c4f-98be-52739a921bd2\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:35Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:35 crc kubenswrapper[4881]: I1211 08:17:35.678272 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:35 crc kubenswrapper[4881]: I1211 08:17:35.678318 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:35 crc kubenswrapper[4881]: I1211 08:17:35.678363 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:35 crc kubenswrapper[4881]: I1211 08:17:35.678398 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:35 crc kubenswrapper[4881]: I1211 08:17:35.678423 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:35Z","lastTransitionTime":"2025-12-11T08:17:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:35 crc kubenswrapper[4881]: E1211 08:17:35.699467 4881 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:17:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T08:17:35Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:17:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T08:17:35Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:17:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T08:17:35Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:17:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T08:17:35Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"46a6300e-52c1-447e-8230-6662e62288c7\\\",\\\"systemUUID\\\":\\\"fece3d29-5045-4c4f-98be-52739a921bd2\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:35Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:35 crc kubenswrapper[4881]: E1211 08:17:35.699758 4881 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Dec 11 08:17:35 crc kubenswrapper[4881]: I1211 08:17:35.701519 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:35 crc kubenswrapper[4881]: I1211 08:17:35.701559 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:35 crc kubenswrapper[4881]: I1211 08:17:35.701567 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:35 crc kubenswrapper[4881]: I1211 08:17:35.701581 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:35 crc kubenswrapper[4881]: I1211 08:17:35.701590 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:35Z","lastTransitionTime":"2025-12-11T08:17:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:35 crc kubenswrapper[4881]: I1211 08:17:35.804308 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:35 crc kubenswrapper[4881]: I1211 08:17:35.804430 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:35 crc kubenswrapper[4881]: I1211 08:17:35.804454 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:35 crc kubenswrapper[4881]: I1211 08:17:35.804481 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:35 crc kubenswrapper[4881]: I1211 08:17:35.804504 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:35Z","lastTransitionTime":"2025-12-11T08:17:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:35 crc kubenswrapper[4881]: I1211 08:17:35.908193 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:35 crc kubenswrapper[4881]: I1211 08:17:35.908274 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:35 crc kubenswrapper[4881]: I1211 08:17:35.908299 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:35 crc kubenswrapper[4881]: I1211 08:17:35.908373 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:35 crc kubenswrapper[4881]: I1211 08:17:35.908398 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:35Z","lastTransitionTime":"2025-12-11T08:17:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:36 crc kubenswrapper[4881]: I1211 08:17:36.005026 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 11 08:17:36 crc kubenswrapper[4881]: E1211 08:17:36.005197 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 11 08:17:36 crc kubenswrapper[4881]: I1211 08:17:36.011380 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:36 crc kubenswrapper[4881]: I1211 08:17:36.011450 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:36 crc kubenswrapper[4881]: I1211 08:17:36.011471 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:36 crc kubenswrapper[4881]: I1211 08:17:36.011497 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:36 crc kubenswrapper[4881]: I1211 08:17:36.011515 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:36Z","lastTransitionTime":"2025-12-11T08:17:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:36 crc kubenswrapper[4881]: I1211 08:17:36.113988 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:36 crc kubenswrapper[4881]: I1211 08:17:36.114490 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:36 crc kubenswrapper[4881]: I1211 08:17:36.114741 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:36 crc kubenswrapper[4881]: I1211 08:17:36.114986 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:36 crc kubenswrapper[4881]: I1211 08:17:36.115219 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:36Z","lastTransitionTime":"2025-12-11T08:17:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:36 crc kubenswrapper[4881]: I1211 08:17:36.219293 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:36 crc kubenswrapper[4881]: I1211 08:17:36.219381 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:36 crc kubenswrapper[4881]: I1211 08:17:36.219392 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:36 crc kubenswrapper[4881]: I1211 08:17:36.219411 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:36 crc kubenswrapper[4881]: I1211 08:17:36.219425 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:36Z","lastTransitionTime":"2025-12-11T08:17:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:36 crc kubenswrapper[4881]: I1211 08:17:36.321769 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:36 crc kubenswrapper[4881]: I1211 08:17:36.321833 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:36 crc kubenswrapper[4881]: I1211 08:17:36.321850 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:36 crc kubenswrapper[4881]: I1211 08:17:36.321879 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:36 crc kubenswrapper[4881]: I1211 08:17:36.321900 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:36Z","lastTransitionTime":"2025-12-11T08:17:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:36 crc kubenswrapper[4881]: I1211 08:17:36.424698 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:36 crc kubenswrapper[4881]: I1211 08:17:36.424756 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:36 crc kubenswrapper[4881]: I1211 08:17:36.424779 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:36 crc kubenswrapper[4881]: I1211 08:17:36.424809 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:36 crc kubenswrapper[4881]: I1211 08:17:36.424830 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:36Z","lastTransitionTime":"2025-12-11T08:17:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:36 crc kubenswrapper[4881]: I1211 08:17:36.528078 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:36 crc kubenswrapper[4881]: I1211 08:17:36.528129 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:36 crc kubenswrapper[4881]: I1211 08:17:36.528141 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:36 crc kubenswrapper[4881]: I1211 08:17:36.528156 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:36 crc kubenswrapper[4881]: I1211 08:17:36.528167 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:36Z","lastTransitionTime":"2025-12-11T08:17:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:36 crc kubenswrapper[4881]: I1211 08:17:36.631240 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:36 crc kubenswrapper[4881]: I1211 08:17:36.631298 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:36 crc kubenswrapper[4881]: I1211 08:17:36.631316 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:36 crc kubenswrapper[4881]: I1211 08:17:36.631371 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:36 crc kubenswrapper[4881]: I1211 08:17:36.631389 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:36Z","lastTransitionTime":"2025-12-11T08:17:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:36 crc kubenswrapper[4881]: I1211 08:17:36.734936 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:36 crc kubenswrapper[4881]: I1211 08:17:36.734989 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:36 crc kubenswrapper[4881]: I1211 08:17:36.735008 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:36 crc kubenswrapper[4881]: I1211 08:17:36.735031 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:36 crc kubenswrapper[4881]: I1211 08:17:36.735047 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:36Z","lastTransitionTime":"2025-12-11T08:17:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:36 crc kubenswrapper[4881]: I1211 08:17:36.839032 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:36 crc kubenswrapper[4881]: I1211 08:17:36.839115 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:36 crc kubenswrapper[4881]: I1211 08:17:36.839135 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:36 crc kubenswrapper[4881]: I1211 08:17:36.839162 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:36 crc kubenswrapper[4881]: I1211 08:17:36.839181 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:36Z","lastTransitionTime":"2025-12-11T08:17:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:36 crc kubenswrapper[4881]: I1211 08:17:36.942049 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:36 crc kubenswrapper[4881]: I1211 08:17:36.942121 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:36 crc kubenswrapper[4881]: I1211 08:17:36.942148 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:36 crc kubenswrapper[4881]: I1211 08:17:36.942178 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:36 crc kubenswrapper[4881]: I1211 08:17:36.942199 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:36Z","lastTransitionTime":"2025-12-11T08:17:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:37 crc kubenswrapper[4881]: I1211 08:17:37.005372 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 11 08:17:37 crc kubenswrapper[4881]: I1211 08:17:37.005420 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 11 08:17:37 crc kubenswrapper[4881]: I1211 08:17:37.005377 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bzslm" Dec 11 08:17:37 crc kubenswrapper[4881]: E1211 08:17:37.005602 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 11 08:17:37 crc kubenswrapper[4881]: E1211 08:17:37.005704 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bzslm" podUID="3a9b30f4-30b0-4b16-9a4a-135d2e7a9beb" Dec 11 08:17:37 crc kubenswrapper[4881]: E1211 08:17:37.005832 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 11 08:17:37 crc kubenswrapper[4881]: I1211 08:17:37.006939 4881 scope.go:117] "RemoveContainer" containerID="4613d67b59382894af01ae31e8f1a60355dd4362ea6499cdc10741d2abfd078b" Dec 11 08:17:37 crc kubenswrapper[4881]: E1211 08:17:37.007375 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-wf8q8_openshift-ovn-kubernetes(f14cc110-e74f-4cb7-a998-041e3f9b537b)\"" pod="openshift-ovn-kubernetes/ovnkube-node-wf8q8" podUID="f14cc110-e74f-4cb7-a998-041e3f9b537b" Dec 11 08:17:37 crc kubenswrapper[4881]: I1211 08:17:37.045415 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:37 crc kubenswrapper[4881]: I1211 08:17:37.045507 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:37 crc kubenswrapper[4881]: I1211 08:17:37.045521 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:37 crc kubenswrapper[4881]: I1211 08:17:37.045544 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:37 crc kubenswrapper[4881]: I1211 08:17:37.045556 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:37Z","lastTransitionTime":"2025-12-11T08:17:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:37 crc kubenswrapper[4881]: I1211 08:17:37.148849 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:37 crc kubenswrapper[4881]: I1211 08:17:37.148900 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:37 crc kubenswrapper[4881]: I1211 08:17:37.148911 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:37 crc kubenswrapper[4881]: I1211 08:17:37.148931 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:37 crc kubenswrapper[4881]: I1211 08:17:37.148943 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:37Z","lastTransitionTime":"2025-12-11T08:17:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:37 crc kubenswrapper[4881]: I1211 08:17:37.251801 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:37 crc kubenswrapper[4881]: I1211 08:17:37.251872 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:37 crc kubenswrapper[4881]: I1211 08:17:37.251885 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:37 crc kubenswrapper[4881]: I1211 08:17:37.251912 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:37 crc kubenswrapper[4881]: I1211 08:17:37.251925 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:37Z","lastTransitionTime":"2025-12-11T08:17:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:37 crc kubenswrapper[4881]: I1211 08:17:37.356597 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:37 crc kubenswrapper[4881]: I1211 08:17:37.356674 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:37 crc kubenswrapper[4881]: I1211 08:17:37.356693 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:37 crc kubenswrapper[4881]: I1211 08:17:37.356720 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:37 crc kubenswrapper[4881]: I1211 08:17:37.356742 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:37Z","lastTransitionTime":"2025-12-11T08:17:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:37 crc kubenswrapper[4881]: I1211 08:17:37.459141 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:37 crc kubenswrapper[4881]: I1211 08:17:37.459195 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:37 crc kubenswrapper[4881]: I1211 08:17:37.459212 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:37 crc kubenswrapper[4881]: I1211 08:17:37.459235 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:37 crc kubenswrapper[4881]: I1211 08:17:37.459251 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:37Z","lastTransitionTime":"2025-12-11T08:17:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:37 crc kubenswrapper[4881]: I1211 08:17:37.562297 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:37 crc kubenswrapper[4881]: I1211 08:17:37.562378 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:37 crc kubenswrapper[4881]: I1211 08:17:37.562399 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:37 crc kubenswrapper[4881]: I1211 08:17:37.562421 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:37 crc kubenswrapper[4881]: I1211 08:17:37.562437 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:37Z","lastTransitionTime":"2025-12-11T08:17:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:37 crc kubenswrapper[4881]: I1211 08:17:37.665434 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:37 crc kubenswrapper[4881]: I1211 08:17:37.665480 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:37 crc kubenswrapper[4881]: I1211 08:17:37.665492 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:37 crc kubenswrapper[4881]: I1211 08:17:37.665510 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:37 crc kubenswrapper[4881]: I1211 08:17:37.665525 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:37Z","lastTransitionTime":"2025-12-11T08:17:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:37 crc kubenswrapper[4881]: I1211 08:17:37.767793 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:37 crc kubenswrapper[4881]: I1211 08:17:37.767859 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:37 crc kubenswrapper[4881]: I1211 08:17:37.767870 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:37 crc kubenswrapper[4881]: I1211 08:17:37.767894 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:37 crc kubenswrapper[4881]: I1211 08:17:37.767908 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:37Z","lastTransitionTime":"2025-12-11T08:17:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:37 crc kubenswrapper[4881]: I1211 08:17:37.871297 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:37 crc kubenswrapper[4881]: I1211 08:17:37.871732 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:37 crc kubenswrapper[4881]: I1211 08:17:37.871880 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:37 crc kubenswrapper[4881]: I1211 08:17:37.872049 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:37 crc kubenswrapper[4881]: I1211 08:17:37.872206 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:37Z","lastTransitionTime":"2025-12-11T08:17:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:37 crc kubenswrapper[4881]: I1211 08:17:37.976200 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:37 crc kubenswrapper[4881]: I1211 08:17:37.976261 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:37 crc kubenswrapper[4881]: I1211 08:17:37.976279 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:37 crc kubenswrapper[4881]: I1211 08:17:37.976314 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:37 crc kubenswrapper[4881]: I1211 08:17:37.976359 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:37Z","lastTransitionTime":"2025-12-11T08:17:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:38 crc kubenswrapper[4881]: I1211 08:17:38.005136 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 11 08:17:38 crc kubenswrapper[4881]: E1211 08:17:38.005327 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 11 08:17:38 crc kubenswrapper[4881]: I1211 08:17:38.079866 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:38 crc kubenswrapper[4881]: I1211 08:17:38.080419 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:38 crc kubenswrapper[4881]: I1211 08:17:38.080493 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:38 crc kubenswrapper[4881]: I1211 08:17:38.080692 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:38 crc kubenswrapper[4881]: I1211 08:17:38.080749 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:38Z","lastTransitionTime":"2025-12-11T08:17:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:38 crc kubenswrapper[4881]: I1211 08:17:38.184581 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:38 crc kubenswrapper[4881]: I1211 08:17:38.184686 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:38 crc kubenswrapper[4881]: I1211 08:17:38.184720 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:38 crc kubenswrapper[4881]: I1211 08:17:38.184753 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:38 crc kubenswrapper[4881]: I1211 08:17:38.184779 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:38Z","lastTransitionTime":"2025-12-11T08:17:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:38 crc kubenswrapper[4881]: I1211 08:17:38.288097 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:38 crc kubenswrapper[4881]: I1211 08:17:38.288158 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:38 crc kubenswrapper[4881]: I1211 08:17:38.288174 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:38 crc kubenswrapper[4881]: I1211 08:17:38.288203 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:38 crc kubenswrapper[4881]: I1211 08:17:38.288246 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:38Z","lastTransitionTime":"2025-12-11T08:17:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:38 crc kubenswrapper[4881]: I1211 08:17:38.392709 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:38 crc kubenswrapper[4881]: I1211 08:17:38.392763 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:38 crc kubenswrapper[4881]: I1211 08:17:38.392780 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:38 crc kubenswrapper[4881]: I1211 08:17:38.392802 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:38 crc kubenswrapper[4881]: I1211 08:17:38.392819 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:38Z","lastTransitionTime":"2025-12-11T08:17:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:38 crc kubenswrapper[4881]: I1211 08:17:38.495958 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:38 crc kubenswrapper[4881]: I1211 08:17:38.496032 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:38 crc kubenswrapper[4881]: I1211 08:17:38.496054 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:38 crc kubenswrapper[4881]: I1211 08:17:38.496083 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:38 crc kubenswrapper[4881]: I1211 08:17:38.496104 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:38Z","lastTransitionTime":"2025-12-11T08:17:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:38 crc kubenswrapper[4881]: I1211 08:17:38.598219 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:38 crc kubenswrapper[4881]: I1211 08:17:38.598280 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:38 crc kubenswrapper[4881]: I1211 08:17:38.598303 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:38 crc kubenswrapper[4881]: I1211 08:17:38.598367 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:38 crc kubenswrapper[4881]: I1211 08:17:38.598392 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:38Z","lastTransitionTime":"2025-12-11T08:17:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:38 crc kubenswrapper[4881]: I1211 08:17:38.701831 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:38 crc kubenswrapper[4881]: I1211 08:17:38.701891 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:38 crc kubenswrapper[4881]: I1211 08:17:38.701911 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:38 crc kubenswrapper[4881]: I1211 08:17:38.701934 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:38 crc kubenswrapper[4881]: I1211 08:17:38.701951 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:38Z","lastTransitionTime":"2025-12-11T08:17:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:38 crc kubenswrapper[4881]: I1211 08:17:38.806380 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:38 crc kubenswrapper[4881]: I1211 08:17:38.806436 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:38 crc kubenswrapper[4881]: I1211 08:17:38.806452 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:38 crc kubenswrapper[4881]: I1211 08:17:38.806473 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:38 crc kubenswrapper[4881]: I1211 08:17:38.806489 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:38Z","lastTransitionTime":"2025-12-11T08:17:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:38 crc kubenswrapper[4881]: I1211 08:17:38.910146 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:38 crc kubenswrapper[4881]: I1211 08:17:38.910196 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:38 crc kubenswrapper[4881]: I1211 08:17:38.910211 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:38 crc kubenswrapper[4881]: I1211 08:17:38.910232 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:38 crc kubenswrapper[4881]: I1211 08:17:38.910246 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:38Z","lastTransitionTime":"2025-12-11T08:17:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:39 crc kubenswrapper[4881]: I1211 08:17:39.005273 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bzslm" Dec 11 08:17:39 crc kubenswrapper[4881]: I1211 08:17:39.005409 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 11 08:17:39 crc kubenswrapper[4881]: I1211 08:17:39.005551 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 11 08:17:39 crc kubenswrapper[4881]: E1211 08:17:39.005719 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bzslm" podUID="3a9b30f4-30b0-4b16-9a4a-135d2e7a9beb" Dec 11 08:17:39 crc kubenswrapper[4881]: E1211 08:17:39.005959 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 11 08:17:39 crc kubenswrapper[4881]: E1211 08:17:39.006152 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 11 08:17:39 crc kubenswrapper[4881]: I1211 08:17:39.012771 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:39 crc kubenswrapper[4881]: I1211 08:17:39.012821 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:39 crc kubenswrapper[4881]: I1211 08:17:39.012844 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:39 crc kubenswrapper[4881]: I1211 08:17:39.012870 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:39 crc kubenswrapper[4881]: I1211 08:17:39.012892 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:39Z","lastTransitionTime":"2025-12-11T08:17:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:39 crc kubenswrapper[4881]: I1211 08:17:39.115889 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:39 crc kubenswrapper[4881]: I1211 08:17:39.115951 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:39 crc kubenswrapper[4881]: I1211 08:17:39.116006 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:39 crc kubenswrapper[4881]: I1211 08:17:39.116037 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:39 crc kubenswrapper[4881]: I1211 08:17:39.116057 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:39Z","lastTransitionTime":"2025-12-11T08:17:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:39 crc kubenswrapper[4881]: I1211 08:17:39.197097 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/3a9b30f4-30b0-4b16-9a4a-135d2e7a9beb-metrics-certs\") pod \"network-metrics-daemon-bzslm\" (UID: \"3a9b30f4-30b0-4b16-9a4a-135d2e7a9beb\") " pod="openshift-multus/network-metrics-daemon-bzslm" Dec 11 08:17:39 crc kubenswrapper[4881]: E1211 08:17:39.197445 4881 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Dec 11 08:17:39 crc kubenswrapper[4881]: E1211 08:17:39.197553 4881 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/3a9b30f4-30b0-4b16-9a4a-135d2e7a9beb-metrics-certs podName:3a9b30f4-30b0-4b16-9a4a-135d2e7a9beb nodeName:}" failed. No retries permitted until 2025-12-11 08:18:43.197519988 +0000 UTC m=+171.574888725 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/3a9b30f4-30b0-4b16-9a4a-135d2e7a9beb-metrics-certs") pod "network-metrics-daemon-bzslm" (UID: "3a9b30f4-30b0-4b16-9a4a-135d2e7a9beb") : object "openshift-multus"/"metrics-daemon-secret" not registered Dec 11 08:17:39 crc kubenswrapper[4881]: I1211 08:17:39.219583 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:39 crc kubenswrapper[4881]: I1211 08:17:39.219643 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:39 crc kubenswrapper[4881]: I1211 08:17:39.219665 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:39 crc kubenswrapper[4881]: I1211 08:17:39.219694 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:39 crc kubenswrapper[4881]: I1211 08:17:39.219716 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:39Z","lastTransitionTime":"2025-12-11T08:17:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:39 crc kubenswrapper[4881]: I1211 08:17:39.322886 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:39 crc kubenswrapper[4881]: I1211 08:17:39.322950 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:39 crc kubenswrapper[4881]: I1211 08:17:39.322966 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:39 crc kubenswrapper[4881]: I1211 08:17:39.322985 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:39 crc kubenswrapper[4881]: I1211 08:17:39.322999 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:39Z","lastTransitionTime":"2025-12-11T08:17:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:39 crc kubenswrapper[4881]: I1211 08:17:39.425777 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:39 crc kubenswrapper[4881]: I1211 08:17:39.425871 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:39 crc kubenswrapper[4881]: I1211 08:17:39.425890 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:39 crc kubenswrapper[4881]: I1211 08:17:39.425915 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:39 crc kubenswrapper[4881]: I1211 08:17:39.425936 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:39Z","lastTransitionTime":"2025-12-11T08:17:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:39 crc kubenswrapper[4881]: I1211 08:17:39.529203 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:39 crc kubenswrapper[4881]: I1211 08:17:39.529264 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:39 crc kubenswrapper[4881]: I1211 08:17:39.529285 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:39 crc kubenswrapper[4881]: I1211 08:17:39.529310 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:39 crc kubenswrapper[4881]: I1211 08:17:39.529357 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:39Z","lastTransitionTime":"2025-12-11T08:17:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:39 crc kubenswrapper[4881]: I1211 08:17:39.631811 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:39 crc kubenswrapper[4881]: I1211 08:17:39.631865 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:39 crc kubenswrapper[4881]: I1211 08:17:39.631883 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:39 crc kubenswrapper[4881]: I1211 08:17:39.631907 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:39 crc kubenswrapper[4881]: I1211 08:17:39.631925 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:39Z","lastTransitionTime":"2025-12-11T08:17:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:39 crc kubenswrapper[4881]: I1211 08:17:39.735537 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:39 crc kubenswrapper[4881]: I1211 08:17:39.735627 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:39 crc kubenswrapper[4881]: I1211 08:17:39.735667 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:39 crc kubenswrapper[4881]: I1211 08:17:39.735701 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:39 crc kubenswrapper[4881]: I1211 08:17:39.735769 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:39Z","lastTransitionTime":"2025-12-11T08:17:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:39 crc kubenswrapper[4881]: I1211 08:17:39.839178 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:39 crc kubenswrapper[4881]: I1211 08:17:39.839244 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:39 crc kubenswrapper[4881]: I1211 08:17:39.839266 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:39 crc kubenswrapper[4881]: I1211 08:17:39.839295 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:39 crc kubenswrapper[4881]: I1211 08:17:39.839316 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:39Z","lastTransitionTime":"2025-12-11T08:17:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:39 crc kubenswrapper[4881]: I1211 08:17:39.942010 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:39 crc kubenswrapper[4881]: I1211 08:17:39.942080 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:39 crc kubenswrapper[4881]: I1211 08:17:39.942103 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:39 crc kubenswrapper[4881]: I1211 08:17:39.942133 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:39 crc kubenswrapper[4881]: I1211 08:17:39.942155 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:39Z","lastTransitionTime":"2025-12-11T08:17:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:40 crc kubenswrapper[4881]: I1211 08:17:40.004562 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 11 08:17:40 crc kubenswrapper[4881]: E1211 08:17:40.004756 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 11 08:17:40 crc kubenswrapper[4881]: I1211 08:17:40.044766 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:40 crc kubenswrapper[4881]: I1211 08:17:40.044829 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:40 crc kubenswrapper[4881]: I1211 08:17:40.044850 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:40 crc kubenswrapper[4881]: I1211 08:17:40.044879 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:40 crc kubenswrapper[4881]: I1211 08:17:40.044901 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:40Z","lastTransitionTime":"2025-12-11T08:17:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:40 crc kubenswrapper[4881]: I1211 08:17:40.147125 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:40 crc kubenswrapper[4881]: I1211 08:17:40.147199 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:40 crc kubenswrapper[4881]: I1211 08:17:40.147223 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:40 crc kubenswrapper[4881]: I1211 08:17:40.147251 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:40 crc kubenswrapper[4881]: I1211 08:17:40.147273 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:40Z","lastTransitionTime":"2025-12-11T08:17:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:40 crc kubenswrapper[4881]: I1211 08:17:40.250592 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:40 crc kubenswrapper[4881]: I1211 08:17:40.250738 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:40 crc kubenswrapper[4881]: I1211 08:17:40.250763 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:40 crc kubenswrapper[4881]: I1211 08:17:40.250793 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:40 crc kubenswrapper[4881]: I1211 08:17:40.250814 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:40Z","lastTransitionTime":"2025-12-11T08:17:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:40 crc kubenswrapper[4881]: I1211 08:17:40.354439 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:40 crc kubenswrapper[4881]: I1211 08:17:40.354510 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:40 crc kubenswrapper[4881]: I1211 08:17:40.354531 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:40 crc kubenswrapper[4881]: I1211 08:17:40.354564 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:40 crc kubenswrapper[4881]: I1211 08:17:40.354581 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:40Z","lastTransitionTime":"2025-12-11T08:17:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:40 crc kubenswrapper[4881]: I1211 08:17:40.458257 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:40 crc kubenswrapper[4881]: I1211 08:17:40.458430 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:40 crc kubenswrapper[4881]: I1211 08:17:40.458454 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:40 crc kubenswrapper[4881]: I1211 08:17:40.458481 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:40 crc kubenswrapper[4881]: I1211 08:17:40.458498 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:40Z","lastTransitionTime":"2025-12-11T08:17:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:40 crc kubenswrapper[4881]: I1211 08:17:40.562181 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:40 crc kubenswrapper[4881]: I1211 08:17:40.562249 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:40 crc kubenswrapper[4881]: I1211 08:17:40.562269 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:40 crc kubenswrapper[4881]: I1211 08:17:40.562298 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:40 crc kubenswrapper[4881]: I1211 08:17:40.562318 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:40Z","lastTransitionTime":"2025-12-11T08:17:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:40 crc kubenswrapper[4881]: I1211 08:17:40.666227 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:40 crc kubenswrapper[4881]: I1211 08:17:40.666425 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:40 crc kubenswrapper[4881]: I1211 08:17:40.666504 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:40 crc kubenswrapper[4881]: I1211 08:17:40.666542 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:40 crc kubenswrapper[4881]: I1211 08:17:40.666615 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:40Z","lastTransitionTime":"2025-12-11T08:17:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:40 crc kubenswrapper[4881]: I1211 08:17:40.769468 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:40 crc kubenswrapper[4881]: I1211 08:17:40.769547 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:40 crc kubenswrapper[4881]: I1211 08:17:40.769564 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:40 crc kubenswrapper[4881]: I1211 08:17:40.769589 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:40 crc kubenswrapper[4881]: I1211 08:17:40.769606 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:40Z","lastTransitionTime":"2025-12-11T08:17:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:40 crc kubenswrapper[4881]: I1211 08:17:40.873624 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:40 crc kubenswrapper[4881]: I1211 08:17:40.873696 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:40 crc kubenswrapper[4881]: I1211 08:17:40.873715 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:40 crc kubenswrapper[4881]: I1211 08:17:40.873740 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:40 crc kubenswrapper[4881]: I1211 08:17:40.873763 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:40Z","lastTransitionTime":"2025-12-11T08:17:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:40 crc kubenswrapper[4881]: I1211 08:17:40.976320 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:40 crc kubenswrapper[4881]: I1211 08:17:40.976441 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:40 crc kubenswrapper[4881]: I1211 08:17:40.976467 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:40 crc kubenswrapper[4881]: I1211 08:17:40.976498 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:40 crc kubenswrapper[4881]: I1211 08:17:40.976521 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:40Z","lastTransitionTime":"2025-12-11T08:17:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:41 crc kubenswrapper[4881]: I1211 08:17:41.004652 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bzslm" Dec 11 08:17:41 crc kubenswrapper[4881]: I1211 08:17:41.004688 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 11 08:17:41 crc kubenswrapper[4881]: E1211 08:17:41.004895 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bzslm" podUID="3a9b30f4-30b0-4b16-9a4a-135d2e7a9beb" Dec 11 08:17:41 crc kubenswrapper[4881]: I1211 08:17:41.004950 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 11 08:17:41 crc kubenswrapper[4881]: E1211 08:17:41.005169 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 11 08:17:41 crc kubenswrapper[4881]: E1211 08:17:41.005535 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 11 08:17:41 crc kubenswrapper[4881]: I1211 08:17:41.079486 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:41 crc kubenswrapper[4881]: I1211 08:17:41.079549 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:41 crc kubenswrapper[4881]: I1211 08:17:41.079567 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:41 crc kubenswrapper[4881]: I1211 08:17:41.079591 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:41 crc kubenswrapper[4881]: I1211 08:17:41.079610 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:41Z","lastTransitionTime":"2025-12-11T08:17:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:41 crc kubenswrapper[4881]: I1211 08:17:41.182672 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:41 crc kubenswrapper[4881]: I1211 08:17:41.182755 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:41 crc kubenswrapper[4881]: I1211 08:17:41.182788 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:41 crc kubenswrapper[4881]: I1211 08:17:41.182820 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:41 crc kubenswrapper[4881]: I1211 08:17:41.182845 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:41Z","lastTransitionTime":"2025-12-11T08:17:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:41 crc kubenswrapper[4881]: I1211 08:17:41.285479 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:41 crc kubenswrapper[4881]: I1211 08:17:41.285518 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:41 crc kubenswrapper[4881]: I1211 08:17:41.285529 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:41 crc kubenswrapper[4881]: I1211 08:17:41.285548 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:41 crc kubenswrapper[4881]: I1211 08:17:41.285561 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:41Z","lastTransitionTime":"2025-12-11T08:17:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:41 crc kubenswrapper[4881]: I1211 08:17:41.388385 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:41 crc kubenswrapper[4881]: I1211 08:17:41.388446 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:41 crc kubenswrapper[4881]: I1211 08:17:41.388467 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:41 crc kubenswrapper[4881]: I1211 08:17:41.388493 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:41 crc kubenswrapper[4881]: I1211 08:17:41.388513 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:41Z","lastTransitionTime":"2025-12-11T08:17:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:41 crc kubenswrapper[4881]: I1211 08:17:41.491976 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:41 crc kubenswrapper[4881]: I1211 08:17:41.492034 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:41 crc kubenswrapper[4881]: I1211 08:17:41.492048 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:41 crc kubenswrapper[4881]: I1211 08:17:41.492066 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:41 crc kubenswrapper[4881]: I1211 08:17:41.492079 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:41Z","lastTransitionTime":"2025-12-11T08:17:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:41 crc kubenswrapper[4881]: I1211 08:17:41.595302 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:41 crc kubenswrapper[4881]: I1211 08:17:41.595367 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:41 crc kubenswrapper[4881]: I1211 08:17:41.595379 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:41 crc kubenswrapper[4881]: I1211 08:17:41.595398 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:41 crc kubenswrapper[4881]: I1211 08:17:41.595410 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:41Z","lastTransitionTime":"2025-12-11T08:17:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:41 crc kubenswrapper[4881]: I1211 08:17:41.698450 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:41 crc kubenswrapper[4881]: I1211 08:17:41.698517 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:41 crc kubenswrapper[4881]: I1211 08:17:41.698536 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:41 crc kubenswrapper[4881]: I1211 08:17:41.698560 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:41 crc kubenswrapper[4881]: I1211 08:17:41.698579 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:41Z","lastTransitionTime":"2025-12-11T08:17:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:41 crc kubenswrapper[4881]: I1211 08:17:41.802236 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:41 crc kubenswrapper[4881]: I1211 08:17:41.802327 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:41 crc kubenswrapper[4881]: I1211 08:17:41.802413 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:41 crc kubenswrapper[4881]: I1211 08:17:41.802531 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:41 crc kubenswrapper[4881]: I1211 08:17:41.802625 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:41Z","lastTransitionTime":"2025-12-11T08:17:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:41 crc kubenswrapper[4881]: I1211 08:17:41.905973 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:41 crc kubenswrapper[4881]: I1211 08:17:41.906044 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:41 crc kubenswrapper[4881]: I1211 08:17:41.906065 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:41 crc kubenswrapper[4881]: I1211 08:17:41.906095 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:41 crc kubenswrapper[4881]: I1211 08:17:41.906116 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:41Z","lastTransitionTime":"2025-12-11T08:17:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:42 crc kubenswrapper[4881]: I1211 08:17:42.004688 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 11 08:17:42 crc kubenswrapper[4881]: E1211 08:17:42.004969 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 11 08:17:42 crc kubenswrapper[4881]: I1211 08:17:42.009195 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:42 crc kubenswrapper[4881]: I1211 08:17:42.009524 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:42 crc kubenswrapper[4881]: I1211 08:17:42.009689 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:42 crc kubenswrapper[4881]: I1211 08:17:42.009876 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:42 crc kubenswrapper[4881]: I1211 08:17:42.010080 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:42Z","lastTransitionTime":"2025-12-11T08:17:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:42 crc kubenswrapper[4881]: I1211 08:17:42.112843 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:42 crc kubenswrapper[4881]: I1211 08:17:42.113141 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:42 crc kubenswrapper[4881]: I1211 08:17:42.113412 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:42 crc kubenswrapper[4881]: I1211 08:17:42.113574 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:42 crc kubenswrapper[4881]: I1211 08:17:42.113761 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:42Z","lastTransitionTime":"2025-12-11T08:17:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:42 crc kubenswrapper[4881]: I1211 08:17:42.217501 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:42 crc kubenswrapper[4881]: I1211 08:17:42.217841 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:42 crc kubenswrapper[4881]: I1211 08:17:42.217992 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:42 crc kubenswrapper[4881]: I1211 08:17:42.218181 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:42 crc kubenswrapper[4881]: I1211 08:17:42.218313 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:42Z","lastTransitionTime":"2025-12-11T08:17:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:42 crc kubenswrapper[4881]: I1211 08:17:42.321664 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:42 crc kubenswrapper[4881]: I1211 08:17:42.321719 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:42 crc kubenswrapper[4881]: I1211 08:17:42.321736 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:42 crc kubenswrapper[4881]: I1211 08:17:42.321760 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:42 crc kubenswrapper[4881]: I1211 08:17:42.321777 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:42Z","lastTransitionTime":"2025-12-11T08:17:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:42 crc kubenswrapper[4881]: I1211 08:17:42.425659 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:42 crc kubenswrapper[4881]: I1211 08:17:42.426132 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:42 crc kubenswrapper[4881]: I1211 08:17:42.426323 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:42 crc kubenswrapper[4881]: I1211 08:17:42.426533 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:42 crc kubenswrapper[4881]: I1211 08:17:42.426685 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:42Z","lastTransitionTime":"2025-12-11T08:17:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:42 crc kubenswrapper[4881]: I1211 08:17:42.529740 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:42 crc kubenswrapper[4881]: I1211 08:17:42.529818 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:42 crc kubenswrapper[4881]: I1211 08:17:42.529837 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:42 crc kubenswrapper[4881]: I1211 08:17:42.529865 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:42 crc kubenswrapper[4881]: I1211 08:17:42.529882 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:42Z","lastTransitionTime":"2025-12-11T08:17:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:42 crc kubenswrapper[4881]: I1211 08:17:42.632993 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:42 crc kubenswrapper[4881]: I1211 08:17:42.633073 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:42 crc kubenswrapper[4881]: I1211 08:17:42.633095 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:42 crc kubenswrapper[4881]: I1211 08:17:42.633452 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:42 crc kubenswrapper[4881]: I1211 08:17:42.633720 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:42Z","lastTransitionTime":"2025-12-11T08:17:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:42 crc kubenswrapper[4881]: I1211 08:17:42.736900 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:42 crc kubenswrapper[4881]: I1211 08:17:42.736965 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:42 crc kubenswrapper[4881]: I1211 08:17:42.736987 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:42 crc kubenswrapper[4881]: I1211 08:17:42.737020 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:42 crc kubenswrapper[4881]: I1211 08:17:42.737044 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:42Z","lastTransitionTime":"2025-12-11T08:17:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:42 crc kubenswrapper[4881]: I1211 08:17:42.840084 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:42 crc kubenswrapper[4881]: I1211 08:17:42.840149 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:42 crc kubenswrapper[4881]: I1211 08:17:42.840169 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:42 crc kubenswrapper[4881]: I1211 08:17:42.840195 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:42 crc kubenswrapper[4881]: I1211 08:17:42.840213 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:42Z","lastTransitionTime":"2025-12-11T08:17:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:42 crc kubenswrapper[4881]: I1211 08:17:42.942719 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:42 crc kubenswrapper[4881]: I1211 08:17:42.942790 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:42 crc kubenswrapper[4881]: I1211 08:17:42.942809 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:42 crc kubenswrapper[4881]: I1211 08:17:42.942836 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:42 crc kubenswrapper[4881]: I1211 08:17:42.942855 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:42Z","lastTransitionTime":"2025-12-11T08:17:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:43 crc kubenswrapper[4881]: I1211 08:17:43.004418 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 11 08:17:43 crc kubenswrapper[4881]: I1211 08:17:43.004483 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bzslm" Dec 11 08:17:43 crc kubenswrapper[4881]: E1211 08:17:43.004596 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 11 08:17:43 crc kubenswrapper[4881]: I1211 08:17:43.004656 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 11 08:17:43 crc kubenswrapper[4881]: E1211 08:17:43.004718 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bzslm" podUID="3a9b30f4-30b0-4b16-9a4a-135d2e7a9beb" Dec 11 08:17:43 crc kubenswrapper[4881]: E1211 08:17:43.004995 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 11 08:17:43 crc kubenswrapper[4881]: I1211 08:17:43.021712 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:43Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:43 crc kubenswrapper[4881]: I1211 08:17:43.036388 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-g8jhd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"368e635e-0e63-4202-b9e4-4a3a85c6f30c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:17:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:17:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://472f02e542c67bbd11145db9b59f2bae1dc688d45e95099b17a33fa1e27dbac8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f17445543c4ae20340b00c834ecfa381b0d9624196a6da869ea823ee99df132f\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-11T08:17:08Z\\\",\\\"message\\\":\\\"2025-12-11T08:16:23+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_76e76f4f-e849-4b50-a2b6-debceb43335b\\\\n2025-12-11T08:16:23+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_76e76f4f-e849-4b50-a2b6-debceb43335b to /host/opt/cni/bin/\\\\n2025-12-11T08:16:23Z [verbose] multus-daemon started\\\\n2025-12-11T08:16:23Z [verbose] Readiness Indicator file check\\\\n2025-12-11T08:17:08Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:21Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:17:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zrsnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-g8jhd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:43Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:43 crc kubenswrapper[4881]: I1211 08:17:43.045698 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:43 crc kubenswrapper[4881]: I1211 08:17:43.045756 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:43 crc kubenswrapper[4881]: I1211 08:17:43.045767 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:43 crc kubenswrapper[4881]: I1211 08:17:43.045786 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:43 crc kubenswrapper[4881]: I1211 08:17:43.045798 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:43Z","lastTransitionTime":"2025-12-11T08:17:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:43 crc kubenswrapper[4881]: I1211 08:17:43.053707 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-cwhxk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"afd0cc21-e31c-47c9-a598-cd93dde96121\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0f0031f6a7c33c9d1c538ec8384083ab5bc7b7fcda9c01c8e5c922a7e375a7e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://44d5ca755969b2434115259f141a9f8f17da8e0c1bef5b61c48475e09dc91601\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://44d5ca755969b2434115259f141a9f8f17da8e0c1bef5b61c48475e09dc91601\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://18c881494d5238565dfdb69e61ecdebb725f60a14c0c651e0bcc65cb2aff297a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://18c881494d5238565dfdb69e61ecdebb725f60a14c0c651e0bcc65cb2aff297a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a13ad4683fb404bf7d973528489e3c39e33c2991d86a80875dab067023626584\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a13ad4683fb404bf7d973528489e3c39e33c2991d86a80875dab067023626584\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b84c7832c1d1df15202c858e63e3d97319f2014f413f98a9d332a3f89a11210\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b84c7832c1d1df15202c858e63e3d97319f2014f413f98a9d332a3f89a11210\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb6df865673b8073f0fa13525833ae978ff17856a9b4149d549e177bc5d9a494\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bb6df865673b8073f0fa13525833ae978ff17856a9b4149d549e177bc5d9a494\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8a6eb6e7ccd64f8d181fe104b44a4299f9b28d0409e46175da23144ef88e4129\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8a6eb6e7ccd64f8d181fe104b44a4299f9b28d0409e46175da23144ef88e4129\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jkbr8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-cwhxk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:43Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:43 crc kubenswrapper[4881]: I1211 08:17:43.066786 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"56d69133-af36-4cbd-af7d-3a58cc4dd8ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b73c9948845a28d7c3cb81e94d962a4f11ac061f4108a18f46d451a554e9adb7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7rwh4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b0321657968185975569a4bf8037ab956988b13493feec0be9c439fbe6c20707\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7rwh4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:21Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-z9nnh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:43Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:43 crc kubenswrapper[4881]: I1211 08:17:43.079236 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-kqw5r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"71774618-27c8-499d-83d9-e88693c86758\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://136c88f0cab9d7a47a8c5925592e8cc09b62e515258829cc096df9ad3bb30615\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6btfk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c48e4553e3eb00f1e725dd9cacf2b25360544772ef82d6846361a4b7c44c6244\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6btfk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-kqw5r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:43Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:43 crc kubenswrapper[4881]: I1211 08:17:43.088640 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-bzslm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3a9b30f4-30b0-4b16-9a4a-135d2e7a9beb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:35Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:35Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:35Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-k86hj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-k86hj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:35Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-bzslm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:43Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:43 crc kubenswrapper[4881]: I1211 08:17:43.099982 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:43Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:43 crc kubenswrapper[4881]: I1211 08:17:43.111607 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6ec4e8a28a722463111eeb961e6e80d1f8ef1b776c40fc212a2ca5aaf0c43b63\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6eff6a04afb607c6248352793d1205ae80c194a77222a234d586925a8ca8cd00\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:43Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:43 crc kubenswrapper[4881]: I1211 08:17:43.121657 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-847k7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1711214f-683a-4a2e-b9b8-f3fd2dd6dbc2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0ad11e04da8a6d33e46ca8c5f28be920d52c7c2ce22b4539fc8b7b0b85edb425\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2txkz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:20Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-847k7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:43Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:43 crc kubenswrapper[4881]: I1211 08:17:43.134712 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-jzsv5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aacce2d2-7fd3-439a-b46b-51858d3de240\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6277956e6d4873b4105ce03e358e2857371feead7eb74e2b7412ad7ed4de486\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tlbfj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:24Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-jzsv5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:43Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:43 crc kubenswrapper[4881]: I1211 08:17:43.148110 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:43 crc kubenswrapper[4881]: I1211 08:17:43.148163 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:43 crc kubenswrapper[4881]: I1211 08:17:43.148179 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:43 crc kubenswrapper[4881]: I1211 08:17:43.148199 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:43 crc kubenswrapper[4881]: I1211 08:17:43.148242 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:43Z","lastTransitionTime":"2025-12-11T08:17:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:43 crc kubenswrapper[4881]: I1211 08:17:43.151268 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e233ddb-7c1f-4b3d-a111-de8cdc7ccd18\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://221f8c696ca3c5ab5dc2025890b5279fd406a8dfdd4ea482bb12f0c5d304255d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3dec421d26a9373c52bf2fb9434cc3f3b373f409b1d86ec1effca7534acb7262\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7cb4176d384609eb549c9f00057256bd34053ca3eca229c20cd250b93f87f910\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://91541795f1dd997a0d977904a6a3cbeae8b66b8bb57cde8dac344d3703352e1a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:15:53Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:43Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:43 crc kubenswrapper[4881]: I1211 08:17:43.167899 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6c507af2-56f1-45a6-ab18-c6d27c4e3f85\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ae6cead982609bcea650eccda0c847ae1c568061104bda0584984b9d62481b0e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bdda482f14afc374a3ba38a02b0b93bab4faf300cc47f7ee39ef9183af904f22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a2846dae5b0d424649d68bdbeb34b7791ee2a8e1b05a0309876cd0d79c5fca01\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://89d2f7f7873955f51814a4a24f42fc5fff4c2c08480d7ff81bfc18fd73102d7d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://89d2f7f7873955f51814a4a24f42fc5fff4c2c08480d7ff81bfc18fd73102d7d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:15:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:15:54Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:15:53Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:43Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:43 crc kubenswrapper[4881]: I1211 08:17:43.193287 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6aee5a89-07ec-4856-8355-8577073ba5d6\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://75e9bba95e748021cbf04000b3b00e27d8f75cd0d9e2409761d8a045c6eb3e28\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6db11aa4e0b3067beae81e57c640307f34554f8a692c588951dab34fc753921d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bbd836df49f273865e522c55b4d5e453be84c1ed5e8276a17f82b03e74521df3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://968879a7801be5095d46d91a9fbbf2e5d951fda34f49e034cc84262231cbea44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6b11c3ebb26dbe8ee0a22dec8ee186c2dd013cc3566a9bd14a49cc7e24c2213d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba57aaaaf5a0258b44859937fd4deececf010c96c7753f4b97e69ae2b11584e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba57aaaaf5a0258b44859937fd4deececf010c96c7753f4b97e69ae2b11584e1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:15:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://16c230355458e43c12c58fbd75de648c7f553961e1202314e793170f3fb03fb3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://16c230355458e43c12c58fbd75de648c7f553961e1202314e793170f3fb03fb3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:15:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://734b87f641d1c4082b0611017a6e2d2a1b3ae995238cde641391325f8caaafe9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://734b87f641d1c4082b0611017a6e2d2a1b3ae995238cde641391325f8caaafe9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:15:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:15:53Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:43Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:43 crc kubenswrapper[4881]: I1211 08:17:43.203903 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9c3d898f-552a-471b-affd-a0489efc782e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9251be56904cea5a0c24898fa10c30718080f3fe77804166d664b3f40235c80a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e3ddede9a1ba051ed4eca12d7cca1c3309b223ca8c01eb7d0a9499ab012531ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e3ddede9a1ba051ed4eca12d7cca1c3309b223ca8c01eb7d0a9499ab012531ff\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:15:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:15:53Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:43Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:43 crc kubenswrapper[4881]: I1211 08:17:43.217328 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:43Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:43 crc kubenswrapper[4881]: I1211 08:17:43.228733 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:20Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:20Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://35b9fdb7a609d6a955abda7e49e7bceec55811d33e14295ba7258c0e085cc517\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:43Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:43 crc kubenswrapper[4881]: I1211 08:17:43.246244 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wf8q8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f14cc110-e74f-4cb7-a998-041e3f9b537b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:21Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3fece2162ea78bf051c94f0f8adb0b194d05e5d3c5c25cb4e3674096d1204079\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3715d1eacbf7a843b7a1a0e83dee159ddbe5bf62812e94bf5f74f4677da2617b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c23a70dbe6b32be7c8a0c3799f7dd2323f0dcc86d7d58ee3e140cda4ffbb03f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6d8574de8e2431b2aca7954b9a0498e353c00d982b4384c1f198b2099fe527a6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5a0cb039f4c38da8b633dfcd2e0d23afbc3607b41f04d3c230aee5f86cba79b5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5b8e9dda68edf7ec8848da4eb851807ad0dc10a086fb6129e3a22f9a35e368ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4613d67b59382894af01ae31e8f1a60355dd4362ea6499cdc10741d2abfd078b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4613d67b59382894af01ae31e8f1a60355dd4362ea6499cdc10741d2abfd078b\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-11T08:17:22Z\\\",\\\"message\\\":\\\"s:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}, built lbs: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-machine-api/cluster-autoscaler-operator_TCP_cluster\\\\\\\", UUID:\\\\\\\"\\\\\\\", Protocol:\\\\\\\"TCP\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-machine-api/cluster-autoscaler-operator\\\\\\\"}, Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.5.245\\\\\\\", Port:443, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}, services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.5.245\\\\\\\", Port:9192, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nI1211 08:17:22.148449 7001 obj_retry.go:386] Retry successful for *v1.Pod openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-kqw5r after 0 failed attempt(s)\\\\nF1211 08:17:22.148457 7001 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-11T08:17:21Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-wf8q8_openshift-ovn-kubernetes(f14cc110-e74f-4cb7-a998-041e3f9b537b)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://88bd335385bc696076b9b2719bfbd27b294003397c545f24ee2c50cd854fcc28\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1794069e7175bc0ce9dc315807ad766fd28fb46e11ed6e7addc3199cccaea63a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1794069e7175bc0ce9dc315807ad766fd28fb46e11ed6e7addc3199cccaea63a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:16:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8rjwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:16:21Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-wf8q8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:43Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:43 crc kubenswrapper[4881]: I1211 08:17:43.250289 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:43 crc kubenswrapper[4881]: I1211 08:17:43.250313 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:43 crc kubenswrapper[4881]: I1211 08:17:43.250321 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:43 crc kubenswrapper[4881]: I1211 08:17:43.250355 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:43 crc kubenswrapper[4881]: I1211 08:17:43.250365 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:43Z","lastTransitionTime":"2025-12-11T08:17:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:43 crc kubenswrapper[4881]: I1211 08:17:43.258723 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"712c62b1-8ccd-4aa3-bcf9-678e361454ec\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T08:15:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f4bc17ebdc46bdf12b24515e7a055043646bcea98fe47ea7f5c44e679d02bde9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://daf982782f40db65e6a33e1e1bcfd01493d5a385c52013546d38d921ac4dcad3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3ceeda38e245a549205af6a7f2468c682ca1420574f2d3b375dfba7d93a3c669\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://204b876ced5657df64e1a6074ba7ddbb3e286df0e12630d709ff64d2905b8487\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ff7ad52ec7f42bde82d0f5b9a6ef051a56c37004bb4172dbc504afde1e5f69bf\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-11T08:16:03Z\\\",\\\"message\\\":\\\"W1211 08:16:02.420586 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1211 08:16:02.420959 1 crypto.go:601] Generating new CA for check-endpoints-signer@1765440962 cert, and key in /tmp/serving-cert-2611071911/serving-signer.crt, /tmp/serving-cert-2611071911/serving-signer.key\\\\nI1211 08:16:03.011861 1 observer_polling.go:159] Starting file observer\\\\nW1211 08:16:03.024183 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1211 08:16:03.024367 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1211 08:16:03.024984 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2611071911/tls.crt::/tmp/serving-cert-2611071911/tls.key\\\\\\\"\\\\nF1211 08:16:03.405647 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-11T08:16:00Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6735b5cd08055deeae614855e5de8a056b276c8006a16c45f2a0fce97da172cb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:15:57Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70fb9b6123864b3652eabbb833b18032659a0c038f4e5857e7363a3776022396\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://70fb9b6123864b3652eabbb833b18032659a0c038f4e5857e7363a3776022396\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T08:15:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T08:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T08:15:53Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:43Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:43 crc kubenswrapper[4881]: I1211 08:17:43.268935 4881 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T08:16:18Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://02d46e407feeba96230ff44b31f513e09434cdc3378124322398587ebda11b4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T08:16:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:43Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:43 crc kubenswrapper[4881]: I1211 08:17:43.353675 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:43 crc kubenswrapper[4881]: I1211 08:17:43.353744 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:43 crc kubenswrapper[4881]: I1211 08:17:43.353768 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:43 crc kubenswrapper[4881]: I1211 08:17:43.353800 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:43 crc kubenswrapper[4881]: I1211 08:17:43.353822 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:43Z","lastTransitionTime":"2025-12-11T08:17:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:43 crc kubenswrapper[4881]: I1211 08:17:43.457232 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:43 crc kubenswrapper[4881]: I1211 08:17:43.457321 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:43 crc kubenswrapper[4881]: I1211 08:17:43.457408 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:43 crc kubenswrapper[4881]: I1211 08:17:43.457439 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:43 crc kubenswrapper[4881]: I1211 08:17:43.457462 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:43Z","lastTransitionTime":"2025-12-11T08:17:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:43 crc kubenswrapper[4881]: I1211 08:17:43.559680 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:43 crc kubenswrapper[4881]: I1211 08:17:43.559743 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:43 crc kubenswrapper[4881]: I1211 08:17:43.559764 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:43 crc kubenswrapper[4881]: I1211 08:17:43.559793 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:43 crc kubenswrapper[4881]: I1211 08:17:43.559816 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:43Z","lastTransitionTime":"2025-12-11T08:17:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:43 crc kubenswrapper[4881]: I1211 08:17:43.662371 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:43 crc kubenswrapper[4881]: I1211 08:17:43.662424 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:43 crc kubenswrapper[4881]: I1211 08:17:43.662440 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:43 crc kubenswrapper[4881]: I1211 08:17:43.662465 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:43 crc kubenswrapper[4881]: I1211 08:17:43.662480 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:43Z","lastTransitionTime":"2025-12-11T08:17:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:43 crc kubenswrapper[4881]: I1211 08:17:43.764967 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:43 crc kubenswrapper[4881]: I1211 08:17:43.765048 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:43 crc kubenswrapper[4881]: I1211 08:17:43.765071 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:43 crc kubenswrapper[4881]: I1211 08:17:43.765105 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:43 crc kubenswrapper[4881]: I1211 08:17:43.765128 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:43Z","lastTransitionTime":"2025-12-11T08:17:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:43 crc kubenswrapper[4881]: I1211 08:17:43.868732 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:43 crc kubenswrapper[4881]: I1211 08:17:43.868810 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:43 crc kubenswrapper[4881]: I1211 08:17:43.868848 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:43 crc kubenswrapper[4881]: I1211 08:17:43.868878 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:43 crc kubenswrapper[4881]: I1211 08:17:43.868896 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:43Z","lastTransitionTime":"2025-12-11T08:17:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:43 crc kubenswrapper[4881]: I1211 08:17:43.972031 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:43 crc kubenswrapper[4881]: I1211 08:17:43.972103 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:43 crc kubenswrapper[4881]: I1211 08:17:43.972121 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:43 crc kubenswrapper[4881]: I1211 08:17:43.972146 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:43 crc kubenswrapper[4881]: I1211 08:17:43.972167 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:43Z","lastTransitionTime":"2025-12-11T08:17:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:44 crc kubenswrapper[4881]: I1211 08:17:44.005480 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 11 08:17:44 crc kubenswrapper[4881]: E1211 08:17:44.005729 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 11 08:17:44 crc kubenswrapper[4881]: I1211 08:17:44.075585 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:44 crc kubenswrapper[4881]: I1211 08:17:44.075679 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:44 crc kubenswrapper[4881]: I1211 08:17:44.075706 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:44 crc kubenswrapper[4881]: I1211 08:17:44.075741 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:44 crc kubenswrapper[4881]: I1211 08:17:44.075766 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:44Z","lastTransitionTime":"2025-12-11T08:17:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:44 crc kubenswrapper[4881]: I1211 08:17:44.179258 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:44 crc kubenswrapper[4881]: I1211 08:17:44.179371 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:44 crc kubenswrapper[4881]: I1211 08:17:44.179396 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:44 crc kubenswrapper[4881]: I1211 08:17:44.179428 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:44 crc kubenswrapper[4881]: I1211 08:17:44.179455 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:44Z","lastTransitionTime":"2025-12-11T08:17:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:44 crc kubenswrapper[4881]: I1211 08:17:44.282438 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:44 crc kubenswrapper[4881]: I1211 08:17:44.282507 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:44 crc kubenswrapper[4881]: I1211 08:17:44.282525 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:44 crc kubenswrapper[4881]: I1211 08:17:44.282550 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:44 crc kubenswrapper[4881]: I1211 08:17:44.282571 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:44Z","lastTransitionTime":"2025-12-11T08:17:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:44 crc kubenswrapper[4881]: I1211 08:17:44.385484 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:44 crc kubenswrapper[4881]: I1211 08:17:44.385568 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:44 crc kubenswrapper[4881]: I1211 08:17:44.385592 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:44 crc kubenswrapper[4881]: I1211 08:17:44.385620 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:44 crc kubenswrapper[4881]: I1211 08:17:44.385637 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:44Z","lastTransitionTime":"2025-12-11T08:17:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:44 crc kubenswrapper[4881]: I1211 08:17:44.487624 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:44 crc kubenswrapper[4881]: I1211 08:17:44.487658 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:44 crc kubenswrapper[4881]: I1211 08:17:44.487666 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:44 crc kubenswrapper[4881]: I1211 08:17:44.487683 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:44 crc kubenswrapper[4881]: I1211 08:17:44.487692 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:44Z","lastTransitionTime":"2025-12-11T08:17:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:44 crc kubenswrapper[4881]: I1211 08:17:44.591224 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:44 crc kubenswrapper[4881]: I1211 08:17:44.591298 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:44 crc kubenswrapper[4881]: I1211 08:17:44.591383 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:44 crc kubenswrapper[4881]: I1211 08:17:44.591403 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:44 crc kubenswrapper[4881]: I1211 08:17:44.591412 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:44Z","lastTransitionTime":"2025-12-11T08:17:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:44 crc kubenswrapper[4881]: I1211 08:17:44.694754 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:44 crc kubenswrapper[4881]: I1211 08:17:44.694831 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:44 crc kubenswrapper[4881]: I1211 08:17:44.694863 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:44 crc kubenswrapper[4881]: I1211 08:17:44.694893 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:44 crc kubenswrapper[4881]: I1211 08:17:44.694913 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:44Z","lastTransitionTime":"2025-12-11T08:17:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:44 crc kubenswrapper[4881]: I1211 08:17:44.798030 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:44 crc kubenswrapper[4881]: I1211 08:17:44.798157 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:44 crc kubenswrapper[4881]: I1211 08:17:44.798232 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:44 crc kubenswrapper[4881]: I1211 08:17:44.798266 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:44 crc kubenswrapper[4881]: I1211 08:17:44.798289 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:44Z","lastTransitionTime":"2025-12-11T08:17:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:44 crc kubenswrapper[4881]: I1211 08:17:44.902267 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:44 crc kubenswrapper[4881]: I1211 08:17:44.902328 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:44 crc kubenswrapper[4881]: I1211 08:17:44.902385 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:44 crc kubenswrapper[4881]: I1211 08:17:44.902410 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:44 crc kubenswrapper[4881]: I1211 08:17:44.902427 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:44Z","lastTransitionTime":"2025-12-11T08:17:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:45 crc kubenswrapper[4881]: I1211 08:17:45.004420 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 11 08:17:45 crc kubenswrapper[4881]: I1211 08:17:45.004532 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 11 08:17:45 crc kubenswrapper[4881]: E1211 08:17:45.004626 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 11 08:17:45 crc kubenswrapper[4881]: E1211 08:17:45.004736 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 11 08:17:45 crc kubenswrapper[4881]: I1211 08:17:45.004852 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bzslm" Dec 11 08:17:45 crc kubenswrapper[4881]: E1211 08:17:45.005329 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bzslm" podUID="3a9b30f4-30b0-4b16-9a4a-135d2e7a9beb" Dec 11 08:17:45 crc kubenswrapper[4881]: I1211 08:17:45.005425 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:45 crc kubenswrapper[4881]: I1211 08:17:45.005463 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:45 crc kubenswrapper[4881]: I1211 08:17:45.005478 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:45 crc kubenswrapper[4881]: I1211 08:17:45.005495 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:45 crc kubenswrapper[4881]: I1211 08:17:45.005509 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:45Z","lastTransitionTime":"2025-12-11T08:17:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:45 crc kubenswrapper[4881]: I1211 08:17:45.108029 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:45 crc kubenswrapper[4881]: I1211 08:17:45.108080 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:45 crc kubenswrapper[4881]: I1211 08:17:45.108092 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:45 crc kubenswrapper[4881]: I1211 08:17:45.108109 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:45 crc kubenswrapper[4881]: I1211 08:17:45.108147 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:45Z","lastTransitionTime":"2025-12-11T08:17:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:45 crc kubenswrapper[4881]: I1211 08:17:45.210831 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:45 crc kubenswrapper[4881]: I1211 08:17:45.210884 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:45 crc kubenswrapper[4881]: I1211 08:17:45.210895 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:45 crc kubenswrapper[4881]: I1211 08:17:45.210911 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:45 crc kubenswrapper[4881]: I1211 08:17:45.210923 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:45Z","lastTransitionTime":"2025-12-11T08:17:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:45 crc kubenswrapper[4881]: I1211 08:17:45.313812 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:45 crc kubenswrapper[4881]: I1211 08:17:45.313888 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:45 crc kubenswrapper[4881]: I1211 08:17:45.313901 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:45 crc kubenswrapper[4881]: I1211 08:17:45.313920 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:45 crc kubenswrapper[4881]: I1211 08:17:45.313932 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:45Z","lastTransitionTime":"2025-12-11T08:17:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:45 crc kubenswrapper[4881]: I1211 08:17:45.417470 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:45 crc kubenswrapper[4881]: I1211 08:17:45.417523 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:45 crc kubenswrapper[4881]: I1211 08:17:45.417538 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:45 crc kubenswrapper[4881]: I1211 08:17:45.417555 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:45 crc kubenswrapper[4881]: I1211 08:17:45.417567 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:45Z","lastTransitionTime":"2025-12-11T08:17:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:45 crc kubenswrapper[4881]: I1211 08:17:45.520970 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:45 crc kubenswrapper[4881]: I1211 08:17:45.521898 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:45 crc kubenswrapper[4881]: I1211 08:17:45.521928 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:45 crc kubenswrapper[4881]: I1211 08:17:45.521965 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:45 crc kubenswrapper[4881]: I1211 08:17:45.521987 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:45Z","lastTransitionTime":"2025-12-11T08:17:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:45 crc kubenswrapper[4881]: I1211 08:17:45.624916 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:45 crc kubenswrapper[4881]: I1211 08:17:45.624977 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:45 crc kubenswrapper[4881]: I1211 08:17:45.625012 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:45 crc kubenswrapper[4881]: I1211 08:17:45.625051 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:45 crc kubenswrapper[4881]: I1211 08:17:45.625074 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:45Z","lastTransitionTime":"2025-12-11T08:17:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:45 crc kubenswrapper[4881]: I1211 08:17:45.727662 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:45 crc kubenswrapper[4881]: I1211 08:17:45.727740 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:45 crc kubenswrapper[4881]: I1211 08:17:45.727763 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:45 crc kubenswrapper[4881]: I1211 08:17:45.727790 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:45 crc kubenswrapper[4881]: I1211 08:17:45.727811 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:45Z","lastTransitionTime":"2025-12-11T08:17:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:45 crc kubenswrapper[4881]: I1211 08:17:45.830419 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:45 crc kubenswrapper[4881]: I1211 08:17:45.830489 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:45 crc kubenswrapper[4881]: I1211 08:17:45.830511 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:45 crc kubenswrapper[4881]: I1211 08:17:45.830568 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:45 crc kubenswrapper[4881]: I1211 08:17:45.830592 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:45Z","lastTransitionTime":"2025-12-11T08:17:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:45 crc kubenswrapper[4881]: I1211 08:17:45.924513 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:45 crc kubenswrapper[4881]: I1211 08:17:45.924574 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:45 crc kubenswrapper[4881]: I1211 08:17:45.924585 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:45 crc kubenswrapper[4881]: I1211 08:17:45.924604 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:45 crc kubenswrapper[4881]: I1211 08:17:45.924616 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:45Z","lastTransitionTime":"2025-12-11T08:17:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:45 crc kubenswrapper[4881]: E1211 08:17:45.941104 4881 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:17:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T08:17:45Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:17:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T08:17:45Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:17:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T08:17:45Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:17:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T08:17:45Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"46a6300e-52c1-447e-8230-6662e62288c7\\\",\\\"systemUUID\\\":\\\"fece3d29-5045-4c4f-98be-52739a921bd2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:45Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:45 crc kubenswrapper[4881]: I1211 08:17:45.943963 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:45 crc kubenswrapper[4881]: I1211 08:17:45.943992 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:45 crc kubenswrapper[4881]: I1211 08:17:45.944000 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:45 crc kubenswrapper[4881]: I1211 08:17:45.944013 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:45 crc kubenswrapper[4881]: I1211 08:17:45.944023 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:45Z","lastTransitionTime":"2025-12-11T08:17:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:45 crc kubenswrapper[4881]: E1211 08:17:45.954078 4881 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:17:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T08:17:45Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:17:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T08:17:45Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:17:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T08:17:45Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:17:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T08:17:45Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"46a6300e-52c1-447e-8230-6662e62288c7\\\",\\\"systemUUID\\\":\\\"fece3d29-5045-4c4f-98be-52739a921bd2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:45Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:45 crc kubenswrapper[4881]: I1211 08:17:45.958832 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:45 crc kubenswrapper[4881]: I1211 08:17:45.958874 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:45 crc kubenswrapper[4881]: I1211 08:17:45.958883 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:45 crc kubenswrapper[4881]: I1211 08:17:45.958896 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:45 crc kubenswrapper[4881]: I1211 08:17:45.958905 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:45Z","lastTransitionTime":"2025-12-11T08:17:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:45 crc kubenswrapper[4881]: E1211 08:17:45.976290 4881 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:17:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T08:17:45Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:17:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T08:17:45Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:17:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T08:17:45Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:17:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T08:17:45Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"46a6300e-52c1-447e-8230-6662e62288c7\\\",\\\"systemUUID\\\":\\\"fece3d29-5045-4c4f-98be-52739a921bd2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:45Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:45 crc kubenswrapper[4881]: I1211 08:17:45.980547 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:45 crc kubenswrapper[4881]: I1211 08:17:45.980862 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:45 crc kubenswrapper[4881]: I1211 08:17:45.980876 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:45 crc kubenswrapper[4881]: I1211 08:17:45.980895 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:45 crc kubenswrapper[4881]: I1211 08:17:45.980907 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:45Z","lastTransitionTime":"2025-12-11T08:17:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:45 crc kubenswrapper[4881]: E1211 08:17:45.997054 4881 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:17:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T08:17:45Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:17:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T08:17:45Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:17:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T08:17:45Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:17:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T08:17:45Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"46a6300e-52c1-447e-8230-6662e62288c7\\\",\\\"systemUUID\\\":\\\"fece3d29-5045-4c4f-98be-52739a921bd2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:45Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:46 crc kubenswrapper[4881]: I1211 08:17:46.000519 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:46 crc kubenswrapper[4881]: I1211 08:17:46.000653 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:46 crc kubenswrapper[4881]: I1211 08:17:46.000735 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:46 crc kubenswrapper[4881]: I1211 08:17:46.000824 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:46 crc kubenswrapper[4881]: I1211 08:17:46.000912 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:46Z","lastTransitionTime":"2025-12-11T08:17:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:46 crc kubenswrapper[4881]: I1211 08:17:46.004680 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 11 08:17:46 crc kubenswrapper[4881]: E1211 08:17:46.004977 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 11 08:17:46 crc kubenswrapper[4881]: E1211 08:17:46.016585 4881 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:17:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T08:17:46Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:17:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T08:17:46Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:17:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T08:17:46Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:17:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T08:17:46Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"46a6300e-52c1-447e-8230-6662e62288c7\\\",\\\"systemUUID\\\":\\\"fece3d29-5045-4c4f-98be-52739a921bd2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T08:17:46Z is after 2025-08-24T17:21:41Z" Dec 11 08:17:46 crc kubenswrapper[4881]: E1211 08:17:46.016801 4881 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Dec 11 08:17:46 crc kubenswrapper[4881]: I1211 08:17:46.018768 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:46 crc kubenswrapper[4881]: I1211 08:17:46.018816 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:46 crc kubenswrapper[4881]: I1211 08:17:46.018835 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:46 crc kubenswrapper[4881]: I1211 08:17:46.018860 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:46 crc kubenswrapper[4881]: I1211 08:17:46.018877 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:46Z","lastTransitionTime":"2025-12-11T08:17:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:46 crc kubenswrapper[4881]: I1211 08:17:46.121405 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:46 crc kubenswrapper[4881]: I1211 08:17:46.121455 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:46 crc kubenswrapper[4881]: I1211 08:17:46.121470 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:46 crc kubenswrapper[4881]: I1211 08:17:46.121493 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:46 crc kubenswrapper[4881]: I1211 08:17:46.121509 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:46Z","lastTransitionTime":"2025-12-11T08:17:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:46 crc kubenswrapper[4881]: I1211 08:17:46.223591 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:46 crc kubenswrapper[4881]: I1211 08:17:46.223628 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:46 crc kubenswrapper[4881]: I1211 08:17:46.223638 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:46 crc kubenswrapper[4881]: I1211 08:17:46.223654 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:46 crc kubenswrapper[4881]: I1211 08:17:46.223668 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:46Z","lastTransitionTime":"2025-12-11T08:17:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:46 crc kubenswrapper[4881]: I1211 08:17:46.326598 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:46 crc kubenswrapper[4881]: I1211 08:17:46.326661 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:46 crc kubenswrapper[4881]: I1211 08:17:46.326680 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:46 crc kubenswrapper[4881]: I1211 08:17:46.326705 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:46 crc kubenswrapper[4881]: I1211 08:17:46.326723 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:46Z","lastTransitionTime":"2025-12-11T08:17:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:46 crc kubenswrapper[4881]: I1211 08:17:46.429831 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:46 crc kubenswrapper[4881]: I1211 08:17:46.429893 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:46 crc kubenswrapper[4881]: I1211 08:17:46.429907 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:46 crc kubenswrapper[4881]: I1211 08:17:46.429927 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:46 crc kubenswrapper[4881]: I1211 08:17:46.429942 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:46Z","lastTransitionTime":"2025-12-11T08:17:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:46 crc kubenswrapper[4881]: I1211 08:17:46.532029 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:46 crc kubenswrapper[4881]: I1211 08:17:46.532099 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:46 crc kubenswrapper[4881]: I1211 08:17:46.532116 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:46 crc kubenswrapper[4881]: I1211 08:17:46.532140 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:46 crc kubenswrapper[4881]: I1211 08:17:46.532161 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:46Z","lastTransitionTime":"2025-12-11T08:17:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:46 crc kubenswrapper[4881]: I1211 08:17:46.634642 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:46 crc kubenswrapper[4881]: I1211 08:17:46.634724 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:46 crc kubenswrapper[4881]: I1211 08:17:46.634736 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:46 crc kubenswrapper[4881]: I1211 08:17:46.634758 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:46 crc kubenswrapper[4881]: I1211 08:17:46.634770 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:46Z","lastTransitionTime":"2025-12-11T08:17:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:46 crc kubenswrapper[4881]: I1211 08:17:46.738130 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:46 crc kubenswrapper[4881]: I1211 08:17:46.738453 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:46 crc kubenswrapper[4881]: I1211 08:17:46.738568 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:46 crc kubenswrapper[4881]: I1211 08:17:46.738666 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:46 crc kubenswrapper[4881]: I1211 08:17:46.738781 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:46Z","lastTransitionTime":"2025-12-11T08:17:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:46 crc kubenswrapper[4881]: I1211 08:17:46.841903 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:46 crc kubenswrapper[4881]: I1211 08:17:46.841947 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:46 crc kubenswrapper[4881]: I1211 08:17:46.841958 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:46 crc kubenswrapper[4881]: I1211 08:17:46.841976 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:46 crc kubenswrapper[4881]: I1211 08:17:46.841988 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:46Z","lastTransitionTime":"2025-12-11T08:17:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:46 crc kubenswrapper[4881]: I1211 08:17:46.945816 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:46 crc kubenswrapper[4881]: I1211 08:17:46.945889 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:46 crc kubenswrapper[4881]: I1211 08:17:46.945926 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:46 crc kubenswrapper[4881]: I1211 08:17:46.945959 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:46 crc kubenswrapper[4881]: I1211 08:17:46.945982 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:46Z","lastTransitionTime":"2025-12-11T08:17:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:47 crc kubenswrapper[4881]: I1211 08:17:47.005125 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 11 08:17:47 crc kubenswrapper[4881]: I1211 08:17:47.005232 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bzslm" Dec 11 08:17:47 crc kubenswrapper[4881]: I1211 08:17:47.005328 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 11 08:17:47 crc kubenswrapper[4881]: E1211 08:17:47.005781 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bzslm" podUID="3a9b30f4-30b0-4b16-9a4a-135d2e7a9beb" Dec 11 08:17:47 crc kubenswrapper[4881]: E1211 08:17:47.006412 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 11 08:17:47 crc kubenswrapper[4881]: E1211 08:17:47.006737 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 11 08:17:47 crc kubenswrapper[4881]: I1211 08:17:47.049082 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:47 crc kubenswrapper[4881]: I1211 08:17:47.049175 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:47 crc kubenswrapper[4881]: I1211 08:17:47.049199 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:47 crc kubenswrapper[4881]: I1211 08:17:47.049226 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:47 crc kubenswrapper[4881]: I1211 08:17:47.049248 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:47Z","lastTransitionTime":"2025-12-11T08:17:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:47 crc kubenswrapper[4881]: I1211 08:17:47.152789 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:47 crc kubenswrapper[4881]: I1211 08:17:47.152834 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:47 crc kubenswrapper[4881]: I1211 08:17:47.152843 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:47 crc kubenswrapper[4881]: I1211 08:17:47.152858 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:47 crc kubenswrapper[4881]: I1211 08:17:47.152868 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:47Z","lastTransitionTime":"2025-12-11T08:17:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:47 crc kubenswrapper[4881]: I1211 08:17:47.256132 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:47 crc kubenswrapper[4881]: I1211 08:17:47.256187 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:47 crc kubenswrapper[4881]: I1211 08:17:47.256202 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:47 crc kubenswrapper[4881]: I1211 08:17:47.256223 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:47 crc kubenswrapper[4881]: I1211 08:17:47.256238 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:47Z","lastTransitionTime":"2025-12-11T08:17:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:47 crc kubenswrapper[4881]: I1211 08:17:47.359180 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:47 crc kubenswrapper[4881]: I1211 08:17:47.359224 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:47 crc kubenswrapper[4881]: I1211 08:17:47.359235 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:47 crc kubenswrapper[4881]: I1211 08:17:47.359251 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:47 crc kubenswrapper[4881]: I1211 08:17:47.359262 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:47Z","lastTransitionTime":"2025-12-11T08:17:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:47 crc kubenswrapper[4881]: I1211 08:17:47.462031 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:47 crc kubenswrapper[4881]: I1211 08:17:47.462128 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:47 crc kubenswrapper[4881]: I1211 08:17:47.462150 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:47 crc kubenswrapper[4881]: I1211 08:17:47.462182 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:47 crc kubenswrapper[4881]: I1211 08:17:47.462208 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:47Z","lastTransitionTime":"2025-12-11T08:17:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:47 crc kubenswrapper[4881]: I1211 08:17:47.565651 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:47 crc kubenswrapper[4881]: I1211 08:17:47.565692 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:47 crc kubenswrapper[4881]: I1211 08:17:47.565703 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:47 crc kubenswrapper[4881]: I1211 08:17:47.565720 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:47 crc kubenswrapper[4881]: I1211 08:17:47.565731 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:47Z","lastTransitionTime":"2025-12-11T08:17:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:47 crc kubenswrapper[4881]: I1211 08:17:47.670005 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:47 crc kubenswrapper[4881]: I1211 08:17:47.670107 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:47 crc kubenswrapper[4881]: I1211 08:17:47.670141 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:47 crc kubenswrapper[4881]: I1211 08:17:47.670269 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:47 crc kubenswrapper[4881]: I1211 08:17:47.670313 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:47Z","lastTransitionTime":"2025-12-11T08:17:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:47 crc kubenswrapper[4881]: I1211 08:17:47.774921 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:47 crc kubenswrapper[4881]: I1211 08:17:47.774985 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:47 crc kubenswrapper[4881]: I1211 08:17:47.774996 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:47 crc kubenswrapper[4881]: I1211 08:17:47.775016 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:47 crc kubenswrapper[4881]: I1211 08:17:47.775032 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:47Z","lastTransitionTime":"2025-12-11T08:17:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:47 crc kubenswrapper[4881]: I1211 08:17:47.878858 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:47 crc kubenswrapper[4881]: I1211 08:17:47.878926 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:47 crc kubenswrapper[4881]: I1211 08:17:47.878944 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:47 crc kubenswrapper[4881]: I1211 08:17:47.878971 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:47 crc kubenswrapper[4881]: I1211 08:17:47.879043 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:47Z","lastTransitionTime":"2025-12-11T08:17:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:47 crc kubenswrapper[4881]: I1211 08:17:47.982036 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:47 crc kubenswrapper[4881]: I1211 08:17:47.982802 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:47 crc kubenswrapper[4881]: I1211 08:17:47.983242 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:47 crc kubenswrapper[4881]: I1211 08:17:47.983515 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:47 crc kubenswrapper[4881]: I1211 08:17:47.983667 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:47Z","lastTransitionTime":"2025-12-11T08:17:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:48 crc kubenswrapper[4881]: I1211 08:17:48.005738 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 11 08:17:48 crc kubenswrapper[4881]: E1211 08:17:48.006399 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 11 08:17:48 crc kubenswrapper[4881]: I1211 08:17:48.087067 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:48 crc kubenswrapper[4881]: I1211 08:17:48.087567 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:48 crc kubenswrapper[4881]: I1211 08:17:48.087746 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:48 crc kubenswrapper[4881]: I1211 08:17:48.087913 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:48 crc kubenswrapper[4881]: I1211 08:17:48.088059 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:48Z","lastTransitionTime":"2025-12-11T08:17:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:48 crc kubenswrapper[4881]: I1211 08:17:48.191444 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:48 crc kubenswrapper[4881]: I1211 08:17:48.191522 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:48 crc kubenswrapper[4881]: I1211 08:17:48.191544 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:48 crc kubenswrapper[4881]: I1211 08:17:48.191587 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:48 crc kubenswrapper[4881]: I1211 08:17:48.191604 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:48Z","lastTransitionTime":"2025-12-11T08:17:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:48 crc kubenswrapper[4881]: I1211 08:17:48.294092 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:48 crc kubenswrapper[4881]: I1211 08:17:48.294159 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:48 crc kubenswrapper[4881]: I1211 08:17:48.294179 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:48 crc kubenswrapper[4881]: I1211 08:17:48.294206 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:48 crc kubenswrapper[4881]: I1211 08:17:48.294228 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:48Z","lastTransitionTime":"2025-12-11T08:17:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:48 crc kubenswrapper[4881]: I1211 08:17:48.397501 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:48 crc kubenswrapper[4881]: I1211 08:17:48.397567 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:48 crc kubenswrapper[4881]: I1211 08:17:48.397594 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:48 crc kubenswrapper[4881]: I1211 08:17:48.397627 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:48 crc kubenswrapper[4881]: I1211 08:17:48.397650 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:48Z","lastTransitionTime":"2025-12-11T08:17:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:48 crc kubenswrapper[4881]: I1211 08:17:48.500751 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:48 crc kubenswrapper[4881]: I1211 08:17:48.500798 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:48 crc kubenswrapper[4881]: I1211 08:17:48.500807 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:48 crc kubenswrapper[4881]: I1211 08:17:48.500826 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:48 crc kubenswrapper[4881]: I1211 08:17:48.500837 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:48Z","lastTransitionTime":"2025-12-11T08:17:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:48 crc kubenswrapper[4881]: I1211 08:17:48.603520 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:48 crc kubenswrapper[4881]: I1211 08:17:48.603581 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:48 crc kubenswrapper[4881]: I1211 08:17:48.603599 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:48 crc kubenswrapper[4881]: I1211 08:17:48.603624 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:48 crc kubenswrapper[4881]: I1211 08:17:48.603642 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:48Z","lastTransitionTime":"2025-12-11T08:17:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:48 crc kubenswrapper[4881]: I1211 08:17:48.707143 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:48 crc kubenswrapper[4881]: I1211 08:17:48.707249 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:48 crc kubenswrapper[4881]: I1211 08:17:48.707271 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:48 crc kubenswrapper[4881]: I1211 08:17:48.707300 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:48 crc kubenswrapper[4881]: I1211 08:17:48.707326 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:48Z","lastTransitionTime":"2025-12-11T08:17:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:48 crc kubenswrapper[4881]: I1211 08:17:48.811457 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:48 crc kubenswrapper[4881]: I1211 08:17:48.811501 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:48 crc kubenswrapper[4881]: I1211 08:17:48.811513 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:48 crc kubenswrapper[4881]: I1211 08:17:48.811531 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:48 crc kubenswrapper[4881]: I1211 08:17:48.811544 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:48Z","lastTransitionTime":"2025-12-11T08:17:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:48 crc kubenswrapper[4881]: I1211 08:17:48.915371 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:48 crc kubenswrapper[4881]: I1211 08:17:48.915424 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:48 crc kubenswrapper[4881]: I1211 08:17:48.915443 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:48 crc kubenswrapper[4881]: I1211 08:17:48.915470 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:48 crc kubenswrapper[4881]: I1211 08:17:48.915488 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:48Z","lastTransitionTime":"2025-12-11T08:17:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:49 crc kubenswrapper[4881]: I1211 08:17:49.006031 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bzslm" Dec 11 08:17:49 crc kubenswrapper[4881]: I1211 08:17:49.006081 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 11 08:17:49 crc kubenswrapper[4881]: I1211 08:17:49.006038 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 11 08:17:49 crc kubenswrapper[4881]: E1211 08:17:49.006276 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bzslm" podUID="3a9b30f4-30b0-4b16-9a4a-135d2e7a9beb" Dec 11 08:17:49 crc kubenswrapper[4881]: E1211 08:17:49.006526 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 11 08:17:49 crc kubenswrapper[4881]: E1211 08:17:49.006689 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 11 08:17:49 crc kubenswrapper[4881]: I1211 08:17:49.007171 4881 scope.go:117] "RemoveContainer" containerID="4613d67b59382894af01ae31e8f1a60355dd4362ea6499cdc10741d2abfd078b" Dec 11 08:17:49 crc kubenswrapper[4881]: E1211 08:17:49.007553 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-wf8q8_openshift-ovn-kubernetes(f14cc110-e74f-4cb7-a998-041e3f9b537b)\"" pod="openshift-ovn-kubernetes/ovnkube-node-wf8q8" podUID="f14cc110-e74f-4cb7-a998-041e3f9b537b" Dec 11 08:17:49 crc kubenswrapper[4881]: I1211 08:17:49.018757 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:49 crc kubenswrapper[4881]: I1211 08:17:49.018844 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:49 crc kubenswrapper[4881]: I1211 08:17:49.018869 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:49 crc kubenswrapper[4881]: I1211 08:17:49.018900 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:49 crc kubenswrapper[4881]: I1211 08:17:49.018923 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:49Z","lastTransitionTime":"2025-12-11T08:17:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:49 crc kubenswrapper[4881]: I1211 08:17:49.122251 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:49 crc kubenswrapper[4881]: I1211 08:17:49.122320 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:49 crc kubenswrapper[4881]: I1211 08:17:49.122372 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:49 crc kubenswrapper[4881]: I1211 08:17:49.122402 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:49 crc kubenswrapper[4881]: I1211 08:17:49.122422 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:49Z","lastTransitionTime":"2025-12-11T08:17:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:49 crc kubenswrapper[4881]: I1211 08:17:49.225882 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:49 crc kubenswrapper[4881]: I1211 08:17:49.225947 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:49 crc kubenswrapper[4881]: I1211 08:17:49.225967 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:49 crc kubenswrapper[4881]: I1211 08:17:49.226030 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:49 crc kubenswrapper[4881]: I1211 08:17:49.226056 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:49Z","lastTransitionTime":"2025-12-11T08:17:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:49 crc kubenswrapper[4881]: I1211 08:17:49.328991 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:49 crc kubenswrapper[4881]: I1211 08:17:49.329033 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:49 crc kubenswrapper[4881]: I1211 08:17:49.329044 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:49 crc kubenswrapper[4881]: I1211 08:17:49.329061 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:49 crc kubenswrapper[4881]: I1211 08:17:49.329073 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:49Z","lastTransitionTime":"2025-12-11T08:17:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:49 crc kubenswrapper[4881]: I1211 08:17:49.431606 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:49 crc kubenswrapper[4881]: I1211 08:17:49.431656 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:49 crc kubenswrapper[4881]: I1211 08:17:49.431667 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:49 crc kubenswrapper[4881]: I1211 08:17:49.431684 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:49 crc kubenswrapper[4881]: I1211 08:17:49.431699 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:49Z","lastTransitionTime":"2025-12-11T08:17:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:49 crc kubenswrapper[4881]: I1211 08:17:49.534610 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:49 crc kubenswrapper[4881]: I1211 08:17:49.534690 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:49 crc kubenswrapper[4881]: I1211 08:17:49.534703 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:49 crc kubenswrapper[4881]: I1211 08:17:49.534721 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:49 crc kubenswrapper[4881]: I1211 08:17:49.534761 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:49Z","lastTransitionTime":"2025-12-11T08:17:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:49 crc kubenswrapper[4881]: I1211 08:17:49.637833 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:49 crc kubenswrapper[4881]: I1211 08:17:49.637902 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:49 crc kubenswrapper[4881]: I1211 08:17:49.637924 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:49 crc kubenswrapper[4881]: I1211 08:17:49.637949 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:49 crc kubenswrapper[4881]: I1211 08:17:49.637966 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:49Z","lastTransitionTime":"2025-12-11T08:17:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:49 crc kubenswrapper[4881]: I1211 08:17:49.741260 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:49 crc kubenswrapper[4881]: I1211 08:17:49.741308 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:49 crc kubenswrapper[4881]: I1211 08:17:49.741317 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:49 crc kubenswrapper[4881]: I1211 08:17:49.741360 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:49 crc kubenswrapper[4881]: I1211 08:17:49.741370 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:49Z","lastTransitionTime":"2025-12-11T08:17:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:49 crc kubenswrapper[4881]: I1211 08:17:49.844267 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:49 crc kubenswrapper[4881]: I1211 08:17:49.844396 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:49 crc kubenswrapper[4881]: I1211 08:17:49.844424 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:49 crc kubenswrapper[4881]: I1211 08:17:49.844451 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:49 crc kubenswrapper[4881]: I1211 08:17:49.844469 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:49Z","lastTransitionTime":"2025-12-11T08:17:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:49 crc kubenswrapper[4881]: I1211 08:17:49.948037 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:49 crc kubenswrapper[4881]: I1211 08:17:49.948095 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:49 crc kubenswrapper[4881]: I1211 08:17:49.948112 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:49 crc kubenswrapper[4881]: I1211 08:17:49.948139 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:49 crc kubenswrapper[4881]: I1211 08:17:49.948156 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:49Z","lastTransitionTime":"2025-12-11T08:17:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:50 crc kubenswrapper[4881]: I1211 08:17:50.004570 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 11 08:17:50 crc kubenswrapper[4881]: E1211 08:17:50.004718 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 11 08:17:50 crc kubenswrapper[4881]: I1211 08:17:50.051559 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:50 crc kubenswrapper[4881]: I1211 08:17:50.051632 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:50 crc kubenswrapper[4881]: I1211 08:17:50.051658 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:50 crc kubenswrapper[4881]: I1211 08:17:50.051690 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:50 crc kubenswrapper[4881]: I1211 08:17:50.051712 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:50Z","lastTransitionTime":"2025-12-11T08:17:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:50 crc kubenswrapper[4881]: I1211 08:17:50.154296 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:50 crc kubenswrapper[4881]: I1211 08:17:50.154418 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:50 crc kubenswrapper[4881]: I1211 08:17:50.154443 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:50 crc kubenswrapper[4881]: I1211 08:17:50.154475 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:50 crc kubenswrapper[4881]: I1211 08:17:50.154497 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:50Z","lastTransitionTime":"2025-12-11T08:17:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:50 crc kubenswrapper[4881]: I1211 08:17:50.258166 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:50 crc kubenswrapper[4881]: I1211 08:17:50.258252 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:50 crc kubenswrapper[4881]: I1211 08:17:50.258270 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:50 crc kubenswrapper[4881]: I1211 08:17:50.258296 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:50 crc kubenswrapper[4881]: I1211 08:17:50.258314 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:50Z","lastTransitionTime":"2025-12-11T08:17:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:50 crc kubenswrapper[4881]: I1211 08:17:50.361508 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:50 crc kubenswrapper[4881]: I1211 08:17:50.361588 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:50 crc kubenswrapper[4881]: I1211 08:17:50.361612 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:50 crc kubenswrapper[4881]: I1211 08:17:50.361646 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:50 crc kubenswrapper[4881]: I1211 08:17:50.361672 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:50Z","lastTransitionTime":"2025-12-11T08:17:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:50 crc kubenswrapper[4881]: I1211 08:17:50.464467 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:50 crc kubenswrapper[4881]: I1211 08:17:50.464531 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:50 crc kubenswrapper[4881]: I1211 08:17:50.464551 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:50 crc kubenswrapper[4881]: I1211 08:17:50.464575 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:50 crc kubenswrapper[4881]: I1211 08:17:50.464594 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:50Z","lastTransitionTime":"2025-12-11T08:17:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:50 crc kubenswrapper[4881]: I1211 08:17:50.567908 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:50 crc kubenswrapper[4881]: I1211 08:17:50.567953 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:50 crc kubenswrapper[4881]: I1211 08:17:50.567965 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:50 crc kubenswrapper[4881]: I1211 08:17:50.567984 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:50 crc kubenswrapper[4881]: I1211 08:17:50.567996 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:50Z","lastTransitionTime":"2025-12-11T08:17:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:50 crc kubenswrapper[4881]: I1211 08:17:50.670754 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:50 crc kubenswrapper[4881]: I1211 08:17:50.670804 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:50 crc kubenswrapper[4881]: I1211 08:17:50.670815 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:50 crc kubenswrapper[4881]: I1211 08:17:50.670833 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:50 crc kubenswrapper[4881]: I1211 08:17:50.670846 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:50Z","lastTransitionTime":"2025-12-11T08:17:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:50 crc kubenswrapper[4881]: I1211 08:17:50.773190 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:50 crc kubenswrapper[4881]: I1211 08:17:50.773270 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:50 crc kubenswrapper[4881]: I1211 08:17:50.773288 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:50 crc kubenswrapper[4881]: I1211 08:17:50.773306 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:50 crc kubenswrapper[4881]: I1211 08:17:50.773319 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:50Z","lastTransitionTime":"2025-12-11T08:17:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:50 crc kubenswrapper[4881]: I1211 08:17:50.876670 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:50 crc kubenswrapper[4881]: I1211 08:17:50.876727 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:50 crc kubenswrapper[4881]: I1211 08:17:50.876753 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:50 crc kubenswrapper[4881]: I1211 08:17:50.876786 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:50 crc kubenswrapper[4881]: I1211 08:17:50.876808 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:50Z","lastTransitionTime":"2025-12-11T08:17:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:50 crc kubenswrapper[4881]: I1211 08:17:50.980032 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:50 crc kubenswrapper[4881]: I1211 08:17:50.980102 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:50 crc kubenswrapper[4881]: I1211 08:17:50.980120 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:50 crc kubenswrapper[4881]: I1211 08:17:50.980147 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:50 crc kubenswrapper[4881]: I1211 08:17:50.980165 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:50Z","lastTransitionTime":"2025-12-11T08:17:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:51 crc kubenswrapper[4881]: I1211 08:17:51.004799 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 11 08:17:51 crc kubenswrapper[4881]: I1211 08:17:51.004933 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bzslm" Dec 11 08:17:51 crc kubenswrapper[4881]: I1211 08:17:51.005117 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 11 08:17:51 crc kubenswrapper[4881]: E1211 08:17:51.005276 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 11 08:17:51 crc kubenswrapper[4881]: E1211 08:17:51.005424 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 11 08:17:51 crc kubenswrapper[4881]: E1211 08:17:51.005783 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bzslm" podUID="3a9b30f4-30b0-4b16-9a4a-135d2e7a9beb" Dec 11 08:17:51 crc kubenswrapper[4881]: I1211 08:17:51.083492 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:51 crc kubenswrapper[4881]: I1211 08:17:51.083545 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:51 crc kubenswrapper[4881]: I1211 08:17:51.083562 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:51 crc kubenswrapper[4881]: I1211 08:17:51.083586 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:51 crc kubenswrapper[4881]: I1211 08:17:51.083602 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:51Z","lastTransitionTime":"2025-12-11T08:17:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:51 crc kubenswrapper[4881]: I1211 08:17:51.186785 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:51 crc kubenswrapper[4881]: I1211 08:17:51.186856 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:51 crc kubenswrapper[4881]: I1211 08:17:51.186877 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:51 crc kubenswrapper[4881]: I1211 08:17:51.186905 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:51 crc kubenswrapper[4881]: I1211 08:17:51.186928 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:51Z","lastTransitionTime":"2025-12-11T08:17:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:51 crc kubenswrapper[4881]: I1211 08:17:51.289606 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:51 crc kubenswrapper[4881]: I1211 08:17:51.289657 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:51 crc kubenswrapper[4881]: I1211 08:17:51.289669 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:51 crc kubenswrapper[4881]: I1211 08:17:51.289690 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:51 crc kubenswrapper[4881]: I1211 08:17:51.289702 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:51Z","lastTransitionTime":"2025-12-11T08:17:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:51 crc kubenswrapper[4881]: I1211 08:17:51.392508 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:51 crc kubenswrapper[4881]: I1211 08:17:51.392548 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:51 crc kubenswrapper[4881]: I1211 08:17:51.392559 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:51 crc kubenswrapper[4881]: I1211 08:17:51.392577 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:51 crc kubenswrapper[4881]: I1211 08:17:51.392589 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:51Z","lastTransitionTime":"2025-12-11T08:17:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:51 crc kubenswrapper[4881]: I1211 08:17:51.495174 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:51 crc kubenswrapper[4881]: I1211 08:17:51.495240 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:51 crc kubenswrapper[4881]: I1211 08:17:51.495261 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:51 crc kubenswrapper[4881]: I1211 08:17:51.495291 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:51 crc kubenswrapper[4881]: I1211 08:17:51.495319 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:51Z","lastTransitionTime":"2025-12-11T08:17:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:51 crc kubenswrapper[4881]: I1211 08:17:51.598376 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:51 crc kubenswrapper[4881]: I1211 08:17:51.598473 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:51 crc kubenswrapper[4881]: I1211 08:17:51.598491 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:51 crc kubenswrapper[4881]: I1211 08:17:51.598516 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:51 crc kubenswrapper[4881]: I1211 08:17:51.598533 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:51Z","lastTransitionTime":"2025-12-11T08:17:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:51 crc kubenswrapper[4881]: I1211 08:17:51.701253 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:51 crc kubenswrapper[4881]: I1211 08:17:51.701329 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:51 crc kubenswrapper[4881]: I1211 08:17:51.701391 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:51 crc kubenswrapper[4881]: I1211 08:17:51.701421 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:51 crc kubenswrapper[4881]: I1211 08:17:51.701442 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:51Z","lastTransitionTime":"2025-12-11T08:17:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:51 crc kubenswrapper[4881]: I1211 08:17:51.804036 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:51 crc kubenswrapper[4881]: I1211 08:17:51.804094 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:51 crc kubenswrapper[4881]: I1211 08:17:51.804111 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:51 crc kubenswrapper[4881]: I1211 08:17:51.804134 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:51 crc kubenswrapper[4881]: I1211 08:17:51.804151 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:51Z","lastTransitionTime":"2025-12-11T08:17:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:51 crc kubenswrapper[4881]: I1211 08:17:51.907249 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:51 crc kubenswrapper[4881]: I1211 08:17:51.907286 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:51 crc kubenswrapper[4881]: I1211 08:17:51.907296 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:51 crc kubenswrapper[4881]: I1211 08:17:51.907312 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:51 crc kubenswrapper[4881]: I1211 08:17:51.907324 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:51Z","lastTransitionTime":"2025-12-11T08:17:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:52 crc kubenswrapper[4881]: I1211 08:17:52.004823 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 11 08:17:52 crc kubenswrapper[4881]: E1211 08:17:52.004968 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 11 08:17:52 crc kubenswrapper[4881]: I1211 08:17:52.010090 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:52 crc kubenswrapper[4881]: I1211 08:17:52.010150 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:52 crc kubenswrapper[4881]: I1211 08:17:52.010178 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:52 crc kubenswrapper[4881]: I1211 08:17:52.010226 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:52 crc kubenswrapper[4881]: I1211 08:17:52.010255 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:52Z","lastTransitionTime":"2025-12-11T08:17:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:52 crc kubenswrapper[4881]: I1211 08:17:52.113222 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:52 crc kubenswrapper[4881]: I1211 08:17:52.113278 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:52 crc kubenswrapper[4881]: I1211 08:17:52.113287 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:52 crc kubenswrapper[4881]: I1211 08:17:52.113301 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:52 crc kubenswrapper[4881]: I1211 08:17:52.113310 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:52Z","lastTransitionTime":"2025-12-11T08:17:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:52 crc kubenswrapper[4881]: I1211 08:17:52.215846 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:52 crc kubenswrapper[4881]: I1211 08:17:52.215897 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:52 crc kubenswrapper[4881]: I1211 08:17:52.215913 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:52 crc kubenswrapper[4881]: I1211 08:17:52.215932 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:52 crc kubenswrapper[4881]: I1211 08:17:52.215942 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:52Z","lastTransitionTime":"2025-12-11T08:17:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:52 crc kubenswrapper[4881]: I1211 08:17:52.318940 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:52 crc kubenswrapper[4881]: I1211 08:17:52.319029 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:52 crc kubenswrapper[4881]: I1211 08:17:52.319052 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:52 crc kubenswrapper[4881]: I1211 08:17:52.319087 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:52 crc kubenswrapper[4881]: I1211 08:17:52.319107 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:52Z","lastTransitionTime":"2025-12-11T08:17:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:52 crc kubenswrapper[4881]: I1211 08:17:52.421671 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:52 crc kubenswrapper[4881]: I1211 08:17:52.421735 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:52 crc kubenswrapper[4881]: I1211 08:17:52.421753 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:52 crc kubenswrapper[4881]: I1211 08:17:52.421776 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:52 crc kubenswrapper[4881]: I1211 08:17:52.421799 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:52Z","lastTransitionTime":"2025-12-11T08:17:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:52 crc kubenswrapper[4881]: I1211 08:17:52.524881 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:52 crc kubenswrapper[4881]: I1211 08:17:52.524957 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:52 crc kubenswrapper[4881]: I1211 08:17:52.524979 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:52 crc kubenswrapper[4881]: I1211 08:17:52.525011 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:52 crc kubenswrapper[4881]: I1211 08:17:52.525033 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:52Z","lastTransitionTime":"2025-12-11T08:17:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:52 crc kubenswrapper[4881]: I1211 08:17:52.628512 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:52 crc kubenswrapper[4881]: I1211 08:17:52.628575 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:52 crc kubenswrapper[4881]: I1211 08:17:52.628593 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:52 crc kubenswrapper[4881]: I1211 08:17:52.628618 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:52 crc kubenswrapper[4881]: I1211 08:17:52.628642 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:52Z","lastTransitionTime":"2025-12-11T08:17:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:52 crc kubenswrapper[4881]: I1211 08:17:52.731859 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:52 crc kubenswrapper[4881]: I1211 08:17:52.731905 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:52 crc kubenswrapper[4881]: I1211 08:17:52.731915 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:52 crc kubenswrapper[4881]: I1211 08:17:52.731931 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:52 crc kubenswrapper[4881]: I1211 08:17:52.731942 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:52Z","lastTransitionTime":"2025-12-11T08:17:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:52 crc kubenswrapper[4881]: I1211 08:17:52.834971 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:52 crc kubenswrapper[4881]: I1211 08:17:52.835025 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:52 crc kubenswrapper[4881]: I1211 08:17:52.835034 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:52 crc kubenswrapper[4881]: I1211 08:17:52.835050 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:52 crc kubenswrapper[4881]: I1211 08:17:52.835060 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:52Z","lastTransitionTime":"2025-12-11T08:17:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:52 crc kubenswrapper[4881]: E1211 08:17:52.935953 4881 kubelet_node_status.go:497] "Node not becoming ready in time after startup" Dec 11 08:17:53 crc kubenswrapper[4881]: I1211 08:17:53.005650 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 11 08:17:53 crc kubenswrapper[4881]: I1211 08:17:53.005700 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bzslm" Dec 11 08:17:53 crc kubenswrapper[4881]: I1211 08:17:53.005798 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 11 08:17:53 crc kubenswrapper[4881]: E1211 08:17:53.006048 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 11 08:17:53 crc kubenswrapper[4881]: E1211 08:17:53.006226 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bzslm" podUID="3a9b30f4-30b0-4b16-9a4a-135d2e7a9beb" Dec 11 08:17:53 crc kubenswrapper[4881]: E1211 08:17:53.006415 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 11 08:17:53 crc kubenswrapper[4881]: I1211 08:17:53.062464 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-crc" podStartSLOduration=97.062445551 podStartE2EDuration="1m37.062445551s" podCreationTimestamp="2025-12-11 08:16:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 08:17:53.040680705 +0000 UTC m=+121.418049462" watchObservedRunningTime="2025-12-11 08:17:53.062445551 +0000 UTC m=+121.439814248" Dec 11 08:17:53 crc kubenswrapper[4881]: E1211 08:17:53.115660 4881 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Dec 11 08:17:53 crc kubenswrapper[4881]: I1211 08:17:53.142477 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podStartSLOduration=93.142453819 podStartE2EDuration="1m33.142453819s" podCreationTimestamp="2025-12-11 08:16:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 08:17:53.142085339 +0000 UTC m=+121.519454036" watchObservedRunningTime="2025-12-11 08:17:53.142453819 +0000 UTC m=+121.519822526" Dec 11 08:17:53 crc kubenswrapper[4881]: I1211 08:17:53.154697 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-kqw5r" podStartSLOduration=92.154676134 podStartE2EDuration="1m32.154676134s" podCreationTimestamp="2025-12-11 08:16:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 08:17:53.154533941 +0000 UTC m=+121.531902648" watchObservedRunningTime="2025-12-11 08:17:53.154676134 +0000 UTC m=+121.532044841" Dec 11 08:17:53 crc kubenswrapper[4881]: I1211 08:17:53.218577 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-g8jhd" podStartSLOduration=93.218556761 podStartE2EDuration="1m33.218556761s" podCreationTimestamp="2025-12-11 08:16:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 08:17:53.217755622 +0000 UTC m=+121.595124359" watchObservedRunningTime="2025-12-11 08:17:53.218556761 +0000 UTC m=+121.595925458" Dec 11 08:17:53 crc kubenswrapper[4881]: I1211 08:17:53.233956 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-additional-cni-plugins-cwhxk" podStartSLOduration=93.233938424 podStartE2EDuration="1m33.233938424s" podCreationTimestamp="2025-12-11 08:16:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 08:17:53.233600226 +0000 UTC m=+121.610968933" watchObservedRunningTime="2025-12-11 08:17:53.233938424 +0000 UTC m=+121.611307121" Dec 11 08:17:53 crc kubenswrapper[4881]: I1211 08:17:53.250956 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podStartSLOduration=96.250941245 podStartE2EDuration="1m36.250941245s" podCreationTimestamp="2025-12-11 08:16:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 08:17:53.250585347 +0000 UTC m=+121.627954044" watchObservedRunningTime="2025-12-11 08:17:53.250941245 +0000 UTC m=+121.628309942" Dec 11 08:17:53 crc kubenswrapper[4881]: I1211 08:17:53.263374 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" podStartSLOduration=70.263358496 podStartE2EDuration="1m10.263358496s" podCreationTimestamp="2025-12-11 08:16:43 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 08:17:53.26310091 +0000 UTC m=+121.640469617" watchObservedRunningTime="2025-12-11 08:17:53.263358496 +0000 UTC m=+121.640727193" Dec 11 08:17:53 crc kubenswrapper[4881]: I1211 08:17:53.273742 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns/node-resolver-847k7" podStartSLOduration=93.273724367 podStartE2EDuration="1m33.273724367s" podCreationTimestamp="2025-12-11 08:16:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 08:17:53.273502872 +0000 UTC m=+121.650871579" watchObservedRunningTime="2025-12-11 08:17:53.273724367 +0000 UTC m=+121.651093064" Dec 11 08:17:53 crc kubenswrapper[4881]: I1211 08:17:53.311501 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-etcd/etcd-crc" podStartSLOduration=26.311480851 podStartE2EDuration="26.311480851s" podCreationTimestamp="2025-12-11 08:17:27 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 08:17:53.310008125 +0000 UTC m=+121.687376832" watchObservedRunningTime="2025-12-11 08:17:53.311480851 +0000 UTC m=+121.688849548" Dec 11 08:17:53 crc kubenswrapper[4881]: I1211 08:17:53.312003 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/node-ca-jzsv5" podStartSLOduration=93.311996084 podStartE2EDuration="1m33.311996084s" podCreationTimestamp="2025-12-11 08:16:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 08:17:53.288042624 +0000 UTC m=+121.665411321" watchObservedRunningTime="2025-12-11 08:17:53.311996084 +0000 UTC m=+121.689364781" Dec 11 08:17:54 crc kubenswrapper[4881]: I1211 08:17:54.005241 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 11 08:17:54 crc kubenswrapper[4881]: E1211 08:17:54.005497 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 11 08:17:55 crc kubenswrapper[4881]: I1211 08:17:55.005468 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 11 08:17:55 crc kubenswrapper[4881]: I1211 08:17:55.005576 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 11 08:17:55 crc kubenswrapper[4881]: I1211 08:17:55.005576 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bzslm" Dec 11 08:17:55 crc kubenswrapper[4881]: E1211 08:17:55.005767 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 11 08:17:55 crc kubenswrapper[4881]: E1211 08:17:55.005888 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 11 08:17:55 crc kubenswrapper[4881]: E1211 08:17:55.005996 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bzslm" podUID="3a9b30f4-30b0-4b16-9a4a-135d2e7a9beb" Dec 11 08:17:56 crc kubenswrapper[4881]: I1211 08:17:56.004809 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 11 08:17:56 crc kubenswrapper[4881]: E1211 08:17:56.005030 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 11 08:17:56 crc kubenswrapper[4881]: I1211 08:17:56.381506 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 08:17:56 crc kubenswrapper[4881]: I1211 08:17:56.381579 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 08:17:56 crc kubenswrapper[4881]: I1211 08:17:56.381600 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 08:17:56 crc kubenswrapper[4881]: I1211 08:17:56.381630 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 08:17:56 crc kubenswrapper[4881]: I1211 08:17:56.381652 4881 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T08:17:56Z","lastTransitionTime":"2025-12-11T08:17:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 08:17:56 crc kubenswrapper[4881]: I1211 08:17:56.455024 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" podStartSLOduration=47.454995654 podStartE2EDuration="47.454995654s" podCreationTimestamp="2025-12-11 08:17:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 08:17:53.321562445 +0000 UTC m=+121.698931152" watchObservedRunningTime="2025-12-11 08:17:56.454995654 +0000 UTC m=+124.832364361" Dec 11 08:17:56 crc kubenswrapper[4881]: I1211 08:17:56.456401 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-version/cluster-version-operator-5c965bbfc6-9l5jh"] Dec 11 08:17:56 crc kubenswrapper[4881]: I1211 08:17:56.457018 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-9l5jh" Dec 11 08:17:56 crc kubenswrapper[4881]: I1211 08:17:56.461212 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"openshift-service-ca.crt" Dec 11 08:17:56 crc kubenswrapper[4881]: I1211 08:17:56.461650 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"cluster-version-operator-serving-cert" Dec 11 08:17:56 crc kubenswrapper[4881]: I1211 08:17:56.461886 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"kube-root-ca.crt" Dec 11 08:17:56 crc kubenswrapper[4881]: I1211 08:17:56.464010 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"default-dockercfg-gxtc4" Dec 11 08:17:56 crc kubenswrapper[4881]: I1211 08:17:56.507961 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/1245ea2a-6123-4afa-ad8d-e70cea197743-service-ca\") pod \"cluster-version-operator-5c965bbfc6-9l5jh\" (UID: \"1245ea2a-6123-4afa-ad8d-e70cea197743\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-9l5jh" Dec 11 08:17:56 crc kubenswrapper[4881]: I1211 08:17:56.508141 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1245ea2a-6123-4afa-ad8d-e70cea197743-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-9l5jh\" (UID: \"1245ea2a-6123-4afa-ad8d-e70cea197743\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-9l5jh" Dec 11 08:17:56 crc kubenswrapper[4881]: I1211 08:17:56.508329 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1245ea2a-6123-4afa-ad8d-e70cea197743-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-9l5jh\" (UID: \"1245ea2a-6123-4afa-ad8d-e70cea197743\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-9l5jh" Dec 11 08:17:56 crc kubenswrapper[4881]: I1211 08:17:56.508508 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/1245ea2a-6123-4afa-ad8d-e70cea197743-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-9l5jh\" (UID: \"1245ea2a-6123-4afa-ad8d-e70cea197743\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-9l5jh" Dec 11 08:17:56 crc kubenswrapper[4881]: I1211 08:17:56.508562 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/1245ea2a-6123-4afa-ad8d-e70cea197743-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-9l5jh\" (UID: \"1245ea2a-6123-4afa-ad8d-e70cea197743\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-9l5jh" Dec 11 08:17:56 crc kubenswrapper[4881]: I1211 08:17:56.610183 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/1245ea2a-6123-4afa-ad8d-e70cea197743-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-9l5jh\" (UID: \"1245ea2a-6123-4afa-ad8d-e70cea197743\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-9l5jh" Dec 11 08:17:56 crc kubenswrapper[4881]: I1211 08:17:56.610260 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/1245ea2a-6123-4afa-ad8d-e70cea197743-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-9l5jh\" (UID: \"1245ea2a-6123-4afa-ad8d-e70cea197743\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-9l5jh" Dec 11 08:17:56 crc kubenswrapper[4881]: I1211 08:17:56.610402 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/1245ea2a-6123-4afa-ad8d-e70cea197743-service-ca\") pod \"cluster-version-operator-5c965bbfc6-9l5jh\" (UID: \"1245ea2a-6123-4afa-ad8d-e70cea197743\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-9l5jh" Dec 11 08:17:56 crc kubenswrapper[4881]: I1211 08:17:56.610446 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/1245ea2a-6123-4afa-ad8d-e70cea197743-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-9l5jh\" (UID: \"1245ea2a-6123-4afa-ad8d-e70cea197743\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-9l5jh" Dec 11 08:17:56 crc kubenswrapper[4881]: I1211 08:17:56.610470 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1245ea2a-6123-4afa-ad8d-e70cea197743-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-9l5jh\" (UID: \"1245ea2a-6123-4afa-ad8d-e70cea197743\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-9l5jh" Dec 11 08:17:56 crc kubenswrapper[4881]: I1211 08:17:56.610615 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1245ea2a-6123-4afa-ad8d-e70cea197743-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-9l5jh\" (UID: \"1245ea2a-6123-4afa-ad8d-e70cea197743\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-9l5jh" Dec 11 08:17:56 crc kubenswrapper[4881]: I1211 08:17:56.610590 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/1245ea2a-6123-4afa-ad8d-e70cea197743-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-9l5jh\" (UID: \"1245ea2a-6123-4afa-ad8d-e70cea197743\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-9l5jh" Dec 11 08:17:56 crc kubenswrapper[4881]: I1211 08:17:56.611713 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/1245ea2a-6123-4afa-ad8d-e70cea197743-service-ca\") pod \"cluster-version-operator-5c965bbfc6-9l5jh\" (UID: \"1245ea2a-6123-4afa-ad8d-e70cea197743\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-9l5jh" Dec 11 08:17:56 crc kubenswrapper[4881]: I1211 08:17:56.620253 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1245ea2a-6123-4afa-ad8d-e70cea197743-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-9l5jh\" (UID: \"1245ea2a-6123-4afa-ad8d-e70cea197743\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-9l5jh" Dec 11 08:17:56 crc kubenswrapper[4881]: I1211 08:17:56.638899 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1245ea2a-6123-4afa-ad8d-e70cea197743-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-9l5jh\" (UID: \"1245ea2a-6123-4afa-ad8d-e70cea197743\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-9l5jh" Dec 11 08:17:56 crc kubenswrapper[4881]: I1211 08:17:56.827789 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-9l5jh" Dec 11 08:17:56 crc kubenswrapper[4881]: W1211 08:17:56.848666 4881 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod1245ea2a_6123_4afa_ad8d_e70cea197743.slice/crio-c8bdde70534de43bbcaa5ae7e9ce1b47e26ea9785945cf4d407281b1fb7df6ee WatchSource:0}: Error finding container c8bdde70534de43bbcaa5ae7e9ce1b47e26ea9785945cf4d407281b1fb7df6ee: Status 404 returned error can't find the container with id c8bdde70534de43bbcaa5ae7e9ce1b47e26ea9785945cf4d407281b1fb7df6ee Dec 11 08:17:57 crc kubenswrapper[4881]: I1211 08:17:57.005231 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 11 08:17:57 crc kubenswrapper[4881]: I1211 08:17:57.005358 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bzslm" Dec 11 08:17:57 crc kubenswrapper[4881]: E1211 08:17:57.005461 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 11 08:17:57 crc kubenswrapper[4881]: I1211 08:17:57.005511 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 11 08:17:57 crc kubenswrapper[4881]: E1211 08:17:57.005638 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bzslm" podUID="3a9b30f4-30b0-4b16-9a4a-135d2e7a9beb" Dec 11 08:17:57 crc kubenswrapper[4881]: E1211 08:17:57.005737 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 11 08:17:57 crc kubenswrapper[4881]: I1211 08:17:57.671986 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-9l5jh" event={"ID":"1245ea2a-6123-4afa-ad8d-e70cea197743","Type":"ContainerStarted","Data":"fcbc248fc2970ce04f78f6d51f6c436862ab31cb1ef45198a8e05cacd85373c2"} Dec 11 08:17:57 crc kubenswrapper[4881]: I1211 08:17:57.672049 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-9l5jh" event={"ID":"1245ea2a-6123-4afa-ad8d-e70cea197743","Type":"ContainerStarted","Data":"c8bdde70534de43bbcaa5ae7e9ce1b47e26ea9785945cf4d407281b1fb7df6ee"} Dec 11 08:17:57 crc kubenswrapper[4881]: I1211 08:17:57.674386 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-g8jhd_368e635e-0e63-4202-b9e4-4a3a85c6f30c/kube-multus/1.log" Dec 11 08:17:57 crc kubenswrapper[4881]: I1211 08:17:57.675324 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-g8jhd_368e635e-0e63-4202-b9e4-4a3a85c6f30c/kube-multus/0.log" Dec 11 08:17:57 crc kubenswrapper[4881]: I1211 08:17:57.675424 4881 generic.go:334] "Generic (PLEG): container finished" podID="368e635e-0e63-4202-b9e4-4a3a85c6f30c" containerID="472f02e542c67bbd11145db9b59f2bae1dc688d45e95099b17a33fa1e27dbac8" exitCode=1 Dec 11 08:17:57 crc kubenswrapper[4881]: I1211 08:17:57.675506 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-g8jhd" event={"ID":"368e635e-0e63-4202-b9e4-4a3a85c6f30c","Type":"ContainerDied","Data":"472f02e542c67bbd11145db9b59f2bae1dc688d45e95099b17a33fa1e27dbac8"} Dec 11 08:17:57 crc kubenswrapper[4881]: I1211 08:17:57.675601 4881 scope.go:117] "RemoveContainer" containerID="f17445543c4ae20340b00c834ecfa381b0d9624196a6da869ea823ee99df132f" Dec 11 08:17:57 crc kubenswrapper[4881]: I1211 08:17:57.675844 4881 scope.go:117] "RemoveContainer" containerID="472f02e542c67bbd11145db9b59f2bae1dc688d45e95099b17a33fa1e27dbac8" Dec 11 08:17:57 crc kubenswrapper[4881]: E1211 08:17:57.676057 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-multus pod=multus-g8jhd_openshift-multus(368e635e-0e63-4202-b9e4-4a3a85c6f30c)\"" pod="openshift-multus/multus-g8jhd" podUID="368e635e-0e63-4202-b9e4-4a3a85c6f30c" Dec 11 08:17:57 crc kubenswrapper[4881]: I1211 08:17:57.699183 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-9l5jh" podStartSLOduration=97.699148609 podStartE2EDuration="1m37.699148609s" podCreationTimestamp="2025-12-11 08:16:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 08:17:57.697839907 +0000 UTC m=+126.075208644" watchObservedRunningTime="2025-12-11 08:17:57.699148609 +0000 UTC m=+126.076517346" Dec 11 08:17:58 crc kubenswrapper[4881]: I1211 08:17:58.005132 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 11 08:17:58 crc kubenswrapper[4881]: E1211 08:17:58.005687 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 11 08:17:58 crc kubenswrapper[4881]: E1211 08:17:58.116718 4881 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Dec 11 08:17:58 crc kubenswrapper[4881]: I1211 08:17:58.680060 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-g8jhd_368e635e-0e63-4202-b9e4-4a3a85c6f30c/kube-multus/1.log" Dec 11 08:17:59 crc kubenswrapper[4881]: I1211 08:17:59.004564 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 11 08:17:59 crc kubenswrapper[4881]: I1211 08:17:59.004638 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bzslm" Dec 11 08:17:59 crc kubenswrapper[4881]: E1211 08:17:59.004724 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 11 08:17:59 crc kubenswrapper[4881]: E1211 08:17:59.004815 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bzslm" podUID="3a9b30f4-30b0-4b16-9a4a-135d2e7a9beb" Dec 11 08:17:59 crc kubenswrapper[4881]: I1211 08:17:59.005179 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 11 08:17:59 crc kubenswrapper[4881]: E1211 08:17:59.005542 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 11 08:18:00 crc kubenswrapper[4881]: I1211 08:18:00.004825 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 11 08:18:00 crc kubenswrapper[4881]: E1211 08:18:00.005006 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 11 08:18:00 crc kubenswrapper[4881]: I1211 08:18:00.006199 4881 scope.go:117] "RemoveContainer" containerID="4613d67b59382894af01ae31e8f1a60355dd4362ea6499cdc10741d2abfd078b" Dec 11 08:18:00 crc kubenswrapper[4881]: E1211 08:18:00.006490 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-wf8q8_openshift-ovn-kubernetes(f14cc110-e74f-4cb7-a998-041e3f9b537b)\"" pod="openshift-ovn-kubernetes/ovnkube-node-wf8q8" podUID="f14cc110-e74f-4cb7-a998-041e3f9b537b" Dec 11 08:18:01 crc kubenswrapper[4881]: I1211 08:18:01.004437 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 11 08:18:01 crc kubenswrapper[4881]: I1211 08:18:01.004567 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bzslm" Dec 11 08:18:01 crc kubenswrapper[4881]: E1211 08:18:01.004642 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 11 08:18:01 crc kubenswrapper[4881]: E1211 08:18:01.004716 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bzslm" podUID="3a9b30f4-30b0-4b16-9a4a-135d2e7a9beb" Dec 11 08:18:01 crc kubenswrapper[4881]: I1211 08:18:01.004763 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 11 08:18:01 crc kubenswrapper[4881]: E1211 08:18:01.004819 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 11 08:18:02 crc kubenswrapper[4881]: I1211 08:18:02.004350 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 11 08:18:02 crc kubenswrapper[4881]: E1211 08:18:02.004707 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 11 08:18:03 crc kubenswrapper[4881]: I1211 08:18:03.004604 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bzslm" Dec 11 08:18:03 crc kubenswrapper[4881]: I1211 08:18:03.004668 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 11 08:18:03 crc kubenswrapper[4881]: I1211 08:18:03.006499 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 11 08:18:03 crc kubenswrapper[4881]: E1211 08:18:03.006479 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bzslm" podUID="3a9b30f4-30b0-4b16-9a4a-135d2e7a9beb" Dec 11 08:18:03 crc kubenswrapper[4881]: E1211 08:18:03.006618 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 11 08:18:03 crc kubenswrapper[4881]: E1211 08:18:03.006757 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 11 08:18:03 crc kubenswrapper[4881]: E1211 08:18:03.117403 4881 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Dec 11 08:18:04 crc kubenswrapper[4881]: I1211 08:18:04.004971 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 11 08:18:04 crc kubenswrapper[4881]: E1211 08:18:04.006007 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 11 08:18:05 crc kubenswrapper[4881]: I1211 08:18:05.005408 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 11 08:18:05 crc kubenswrapper[4881]: I1211 08:18:05.005440 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 11 08:18:05 crc kubenswrapper[4881]: I1211 08:18:05.005408 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bzslm" Dec 11 08:18:05 crc kubenswrapper[4881]: E1211 08:18:05.005535 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 11 08:18:05 crc kubenswrapper[4881]: E1211 08:18:05.005728 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bzslm" podUID="3a9b30f4-30b0-4b16-9a4a-135d2e7a9beb" Dec 11 08:18:05 crc kubenswrapper[4881]: E1211 08:18:05.005982 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 11 08:18:06 crc kubenswrapper[4881]: I1211 08:18:06.004876 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 11 08:18:06 crc kubenswrapper[4881]: E1211 08:18:06.005028 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 11 08:18:07 crc kubenswrapper[4881]: I1211 08:18:07.005448 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bzslm" Dec 11 08:18:07 crc kubenswrapper[4881]: I1211 08:18:07.005500 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 11 08:18:07 crc kubenswrapper[4881]: I1211 08:18:07.005536 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 11 08:18:07 crc kubenswrapper[4881]: E1211 08:18:07.006705 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bzslm" podUID="3a9b30f4-30b0-4b16-9a4a-135d2e7a9beb" Dec 11 08:18:07 crc kubenswrapper[4881]: E1211 08:18:07.006819 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 11 08:18:07 crc kubenswrapper[4881]: E1211 08:18:07.006894 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 11 08:18:08 crc kubenswrapper[4881]: I1211 08:18:08.004643 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 11 08:18:08 crc kubenswrapper[4881]: E1211 08:18:08.005059 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 11 08:18:08 crc kubenswrapper[4881]: E1211 08:18:08.118417 4881 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Dec 11 08:18:09 crc kubenswrapper[4881]: I1211 08:18:09.005084 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 11 08:18:09 crc kubenswrapper[4881]: I1211 08:18:09.005221 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 11 08:18:09 crc kubenswrapper[4881]: E1211 08:18:09.005283 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 11 08:18:09 crc kubenswrapper[4881]: E1211 08:18:09.005436 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 11 08:18:09 crc kubenswrapper[4881]: I1211 08:18:09.005621 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bzslm" Dec 11 08:18:09 crc kubenswrapper[4881]: E1211 08:18:09.005755 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bzslm" podUID="3a9b30f4-30b0-4b16-9a4a-135d2e7a9beb" Dec 11 08:18:10 crc kubenswrapper[4881]: I1211 08:18:10.004717 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 11 08:18:10 crc kubenswrapper[4881]: E1211 08:18:10.004850 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 11 08:18:10 crc kubenswrapper[4881]: I1211 08:18:10.005057 4881 scope.go:117] "RemoveContainer" containerID="472f02e542c67bbd11145db9b59f2bae1dc688d45e95099b17a33fa1e27dbac8" Dec 11 08:18:10 crc kubenswrapper[4881]: I1211 08:18:10.723102 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-g8jhd_368e635e-0e63-4202-b9e4-4a3a85c6f30c/kube-multus/1.log" Dec 11 08:18:10 crc kubenswrapper[4881]: I1211 08:18:10.723156 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-g8jhd" event={"ID":"368e635e-0e63-4202-b9e4-4a3a85c6f30c","Type":"ContainerStarted","Data":"021945edb3416828d6a387f2de7474bbde198cdef1eb1a9aea5de0cd3699a72a"} Dec 11 08:18:11 crc kubenswrapper[4881]: I1211 08:18:11.004647 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bzslm" Dec 11 08:18:11 crc kubenswrapper[4881]: I1211 08:18:11.004702 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 11 08:18:11 crc kubenswrapper[4881]: I1211 08:18:11.004721 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 11 08:18:11 crc kubenswrapper[4881]: E1211 08:18:11.004887 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bzslm" podUID="3a9b30f4-30b0-4b16-9a4a-135d2e7a9beb" Dec 11 08:18:11 crc kubenswrapper[4881]: E1211 08:18:11.005020 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 11 08:18:11 crc kubenswrapper[4881]: E1211 08:18:11.005068 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 11 08:18:12 crc kubenswrapper[4881]: I1211 08:18:12.005077 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 11 08:18:12 crc kubenswrapper[4881]: E1211 08:18:12.005289 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 11 08:18:13 crc kubenswrapper[4881]: I1211 08:18:13.004452 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 11 08:18:13 crc kubenswrapper[4881]: I1211 08:18:13.004458 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bzslm" Dec 11 08:18:13 crc kubenswrapper[4881]: I1211 08:18:13.004505 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 11 08:18:13 crc kubenswrapper[4881]: E1211 08:18:13.005620 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 11 08:18:13 crc kubenswrapper[4881]: E1211 08:18:13.005692 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bzslm" podUID="3a9b30f4-30b0-4b16-9a4a-135d2e7a9beb" Dec 11 08:18:13 crc kubenswrapper[4881]: E1211 08:18:13.005767 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 11 08:18:13 crc kubenswrapper[4881]: E1211 08:18:13.118989 4881 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Dec 11 08:18:14 crc kubenswrapper[4881]: I1211 08:18:14.004982 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 11 08:18:14 crc kubenswrapper[4881]: E1211 08:18:14.005226 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 11 08:18:15 crc kubenswrapper[4881]: I1211 08:18:15.004677 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 11 08:18:15 crc kubenswrapper[4881]: E1211 08:18:15.004886 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 11 08:18:15 crc kubenswrapper[4881]: I1211 08:18:15.006430 4881 scope.go:117] "RemoveContainer" containerID="4613d67b59382894af01ae31e8f1a60355dd4362ea6499cdc10741d2abfd078b" Dec 11 08:18:15 crc kubenswrapper[4881]: I1211 08:18:15.006779 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bzslm" Dec 11 08:18:15 crc kubenswrapper[4881]: E1211 08:18:15.006900 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bzslm" podUID="3a9b30f4-30b0-4b16-9a4a-135d2e7a9beb" Dec 11 08:18:15 crc kubenswrapper[4881]: I1211 08:18:15.007076 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 11 08:18:15 crc kubenswrapper[4881]: E1211 08:18:15.007122 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 11 08:18:15 crc kubenswrapper[4881]: I1211 08:18:15.743848 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-wf8q8_f14cc110-e74f-4cb7-a998-041e3f9b537b/ovnkube-controller/3.log" Dec 11 08:18:15 crc kubenswrapper[4881]: I1211 08:18:15.746764 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wf8q8" event={"ID":"f14cc110-e74f-4cb7-a998-041e3f9b537b","Type":"ContainerStarted","Data":"948f85b80bf82b9419418419a5b8d071a585911d9e4ac3cd9122fe83ee5836e9"} Dec 11 08:18:15 crc kubenswrapper[4881]: I1211 08:18:15.747717 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-wf8q8" Dec 11 08:18:15 crc kubenswrapper[4881]: I1211 08:18:15.781483 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-node-wf8q8" podStartSLOduration=115.781457249 podStartE2EDuration="1m55.781457249s" podCreationTimestamp="2025-12-11 08:16:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 08:18:15.779107611 +0000 UTC m=+144.156476348" watchObservedRunningTime="2025-12-11 08:18:15.781457249 +0000 UTC m=+144.158825986" Dec 11 08:18:16 crc kubenswrapper[4881]: I1211 08:18:16.004886 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 11 08:18:16 crc kubenswrapper[4881]: E1211 08:18:16.005103 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 11 08:18:16 crc kubenswrapper[4881]: I1211 08:18:16.679542 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/network-metrics-daemon-bzslm"] Dec 11 08:18:16 crc kubenswrapper[4881]: I1211 08:18:16.679694 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bzslm" Dec 11 08:18:16 crc kubenswrapper[4881]: E1211 08:18:16.679850 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bzslm" podUID="3a9b30f4-30b0-4b16-9a4a-135d2e7a9beb" Dec 11 08:18:17 crc kubenswrapper[4881]: I1211 08:18:17.004945 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 11 08:18:17 crc kubenswrapper[4881]: I1211 08:18:17.005015 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 11 08:18:17 crc kubenswrapper[4881]: E1211 08:18:17.005187 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 11 08:18:17 crc kubenswrapper[4881]: E1211 08:18:17.005319 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 11 08:18:18 crc kubenswrapper[4881]: I1211 08:18:18.004386 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 11 08:18:18 crc kubenswrapper[4881]: E1211 08:18:18.004535 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 11 08:18:18 crc kubenswrapper[4881]: E1211 08:18:18.121495 4881 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Dec 11 08:18:19 crc kubenswrapper[4881]: I1211 08:18:19.005384 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 11 08:18:19 crc kubenswrapper[4881]: I1211 08:18:19.005458 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 11 08:18:19 crc kubenswrapper[4881]: I1211 08:18:19.005405 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bzslm" Dec 11 08:18:19 crc kubenswrapper[4881]: E1211 08:18:19.005578 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 11 08:18:19 crc kubenswrapper[4881]: E1211 08:18:19.005804 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bzslm" podUID="3a9b30f4-30b0-4b16-9a4a-135d2e7a9beb" Dec 11 08:18:19 crc kubenswrapper[4881]: E1211 08:18:19.005874 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 11 08:18:20 crc kubenswrapper[4881]: I1211 08:18:20.004324 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 11 08:18:20 crc kubenswrapper[4881]: E1211 08:18:20.004690 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 11 08:18:21 crc kubenswrapper[4881]: I1211 08:18:21.004619 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 11 08:18:21 crc kubenswrapper[4881]: I1211 08:18:21.004633 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 11 08:18:21 crc kubenswrapper[4881]: E1211 08:18:21.005123 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 11 08:18:21 crc kubenswrapper[4881]: I1211 08:18:21.004710 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bzslm" Dec 11 08:18:21 crc kubenswrapper[4881]: E1211 08:18:21.005190 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 11 08:18:21 crc kubenswrapper[4881]: E1211 08:18:21.005392 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bzslm" podUID="3a9b30f4-30b0-4b16-9a4a-135d2e7a9beb" Dec 11 08:18:22 crc kubenswrapper[4881]: I1211 08:18:22.005481 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 11 08:18:22 crc kubenswrapper[4881]: E1211 08:18:22.005935 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 11 08:18:23 crc kubenswrapper[4881]: I1211 08:18:23.004978 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bzslm" Dec 11 08:18:23 crc kubenswrapper[4881]: I1211 08:18:23.005121 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 11 08:18:23 crc kubenswrapper[4881]: I1211 08:18:23.006978 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 11 08:18:23 crc kubenswrapper[4881]: E1211 08:18:23.006966 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bzslm" podUID="3a9b30f4-30b0-4b16-9a4a-135d2e7a9beb" Dec 11 08:18:23 crc kubenswrapper[4881]: E1211 08:18:23.007128 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 11 08:18:23 crc kubenswrapper[4881]: E1211 08:18:23.007244 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 11 08:18:24 crc kubenswrapper[4881]: I1211 08:18:24.005125 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 11 08:18:24 crc kubenswrapper[4881]: I1211 08:18:24.008516 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"openshift-service-ca.crt" Dec 11 08:18:24 crc kubenswrapper[4881]: I1211 08:18:24.008694 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"kube-root-ca.crt" Dec 11 08:18:24 crc kubenswrapper[4881]: I1211 08:18:24.104005 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-wf8q8" Dec 11 08:18:24 crc kubenswrapper[4881]: I1211 08:18:24.886059 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 11 08:18:24 crc kubenswrapper[4881]: E1211 08:18:24.886426 4881 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-11 08:20:26.886308289 +0000 UTC m=+275.263677026 (durationBeforeRetry 2m2s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 08:18:24 crc kubenswrapper[4881]: I1211 08:18:24.987349 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 11 08:18:24 crc kubenswrapper[4881]: I1211 08:18:24.987412 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 11 08:18:24 crc kubenswrapper[4881]: I1211 08:18:24.987477 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 11 08:18:24 crc kubenswrapper[4881]: I1211 08:18:24.987512 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 11 08:18:24 crc kubenswrapper[4881]: E1211 08:18:24.987611 4881 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Dec 11 08:18:24 crc kubenswrapper[4881]: E1211 08:18:24.987623 4881 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Dec 11 08:18:24 crc kubenswrapper[4881]: E1211 08:18:24.987677 4881 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-12-11 08:20:26.987657919 +0000 UTC m=+275.365026616 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Dec 11 08:18:24 crc kubenswrapper[4881]: E1211 08:18:24.987755 4881 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-12-11 08:20:26.987716341 +0000 UTC m=+275.365085078 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Dec 11 08:18:24 crc kubenswrapper[4881]: I1211 08:18:24.997200 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 11 08:18:24 crc kubenswrapper[4881]: I1211 08:18:24.997258 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 11 08:18:25 crc kubenswrapper[4881]: I1211 08:18:25.004864 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 11 08:18:25 crc kubenswrapper[4881]: I1211 08:18:25.004892 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 11 08:18:25 crc kubenswrapper[4881]: I1211 08:18:25.004882 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bzslm" Dec 11 08:18:25 crc kubenswrapper[4881]: I1211 08:18:25.008058 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-console"/"networking-console-plugin" Dec 11 08:18:25 crc kubenswrapper[4881]: I1211 08:18:25.008196 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-console"/"networking-console-plugin-cert" Dec 11 08:18:25 crc kubenswrapper[4881]: I1211 08:18:25.008649 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-sa-dockercfg-d427c" Dec 11 08:18:25 crc kubenswrapper[4881]: I1211 08:18:25.008886 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-secret" Dec 11 08:18:25 crc kubenswrapper[4881]: I1211 08:18:25.060040 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 11 08:18:25 crc kubenswrapper[4881]: I1211 08:18:25.230227 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 11 08:18:25 crc kubenswrapper[4881]: W1211 08:18:25.284466 4881 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3b6479f0_333b_4a96_9adf_2099afdc2447.slice/crio-293542bed097ee0aaa8c72dc9acd7894a2b4a48fc1a95a83500cfd1030cd305c WatchSource:0}: Error finding container 293542bed097ee0aaa8c72dc9acd7894a2b4a48fc1a95a83500cfd1030cd305c: Status 404 returned error can't find the container with id 293542bed097ee0aaa8c72dc9acd7894a2b4a48fc1a95a83500cfd1030cd305c Dec 11 08:18:25 crc kubenswrapper[4881]: W1211 08:18:25.400714 4881 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9d751cbb_f2e2_430d_9754_c882a5e924a5.slice/crio-ced23a90cc5d2383f28127ce04ada06a4ac83824d2788cca5cc1621ea47cec70 WatchSource:0}: Error finding container ced23a90cc5d2383f28127ce04ada06a4ac83824d2788cca5cc1621ea47cec70: Status 404 returned error can't find the container with id ced23a90cc5d2383f28127ce04ada06a4ac83824d2788cca5cc1621ea47cec70 Dec 11 08:18:25 crc kubenswrapper[4881]: I1211 08:18:25.783191 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" event={"ID":"9d751cbb-f2e2-430d-9754-c882a5e924a5","Type":"ContainerStarted","Data":"ced23a90cc5d2383f28127ce04ada06a4ac83824d2788cca5cc1621ea47cec70"} Dec 11 08:18:25 crc kubenswrapper[4881]: I1211 08:18:25.784613 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" event={"ID":"3b6479f0-333b-4a96-9adf-2099afdc2447","Type":"ContainerStarted","Data":"293542bed097ee0aaa8c72dc9acd7894a2b4a48fc1a95a83500cfd1030cd305c"} Dec 11 08:18:26 crc kubenswrapper[4881]: I1211 08:18:26.790774 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" event={"ID":"9d751cbb-f2e2-430d-9754-c882a5e924a5","Type":"ContainerStarted","Data":"9b31ab6814da3459ebab70b1fd7a5a1b95f155a7016efdaab076d4de0c83625d"} Dec 11 08:18:26 crc kubenswrapper[4881]: I1211 08:18:26.793232 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" event={"ID":"3b6479f0-333b-4a96-9adf-2099afdc2447","Type":"ContainerStarted","Data":"5d038181b166e08c3b4ada6225a111ef148131fcdab529e0e4222b481b9ef8ff"} Dec 11 08:18:26 crc kubenswrapper[4881]: I1211 08:18:26.793373 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.126172 4881 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeReady" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.170391 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-jwrst"] Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.170997 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-76f77b778f-jwrst" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.175391 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-vk42n"] Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.175918 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-5694c8668f-vk42n" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.176991 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"etcd-client" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.177121 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"serving-cert" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.177220 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"audit-1" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.177270 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-r546r"] Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.177406 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"kube-root-ca.crt" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.177614 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"openshift-service-ca.crt" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.177763 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-r546r" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.178120 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"image-import-ca" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.178147 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-882gs"] Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.178723 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-882gs" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.183888 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"openshift-apiserver-sa-dockercfg-djjff" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.184174 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"machine-api-operator-images" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.184319 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"config" Dec 11 08:18:27 crc kubenswrapper[4881]: W1211 08:18:27.184615 4881 reflector.go:561] object-"openshift-image-registry"/"image-registry-operator-tls": failed to list *v1.Secret: secrets "image-registry-operator-tls" is forbidden: User "system:node:crc" cannot list resource "secrets" in API group "" in the namespace "openshift-image-registry": no relationship found between node 'crc' and this object Dec 11 08:18:27 crc kubenswrapper[4881]: E1211 08:18:27.184660 4881 reflector.go:158] "Unhandled Error" err="object-\"openshift-image-registry\"/\"image-registry-operator-tls\": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets \"image-registry-operator-tls\" is forbidden: User \"system:node:crc\" cannot list resource \"secrets\" in API group \"\" in the namespace \"openshift-image-registry\": no relationship found between node 'crc' and this object" logger="UnhandledError" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.184739 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"encryption-config-1" Dec 11 08:18:27 crc kubenswrapper[4881]: W1211 08:18:27.184765 4881 reflector.go:561] object-"openshift-image-registry"/"trusted-ca": failed to list *v1.ConfigMap: configmaps "trusted-ca" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openshift-image-registry": no relationship found between node 'crc' and this object Dec 11 08:18:27 crc kubenswrapper[4881]: E1211 08:18:27.184812 4881 reflector.go:158] "Unhandled Error" err="object-\"openshift-image-registry\"/\"trusted-ca\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"trusted-ca\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"openshift-image-registry\": no relationship found between node 'crc' and this object" logger="UnhandledError" Dec 11 08:18:27 crc kubenswrapper[4881]: W1211 08:18:27.184899 4881 reflector.go:561] object-"openshift-route-controller-manager"/"client-ca": failed to list *v1.ConfigMap: configmaps "client-ca" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openshift-route-controller-manager": no relationship found between node 'crc' and this object Dec 11 08:18:27 crc kubenswrapper[4881]: E1211 08:18:27.184916 4881 reflector.go:158] "Unhandled Error" err="object-\"openshift-route-controller-manager\"/\"client-ca\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"client-ca\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"openshift-route-controller-manager\": no relationship found between node 'crc' and this object" logger="UnhandledError" Dec 11 08:18:27 crc kubenswrapper[4881]: W1211 08:18:27.184956 4881 reflector.go:561] object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2": failed to list *v1.Secret: secrets "route-controller-manager-sa-dockercfg-h2zr2" is forbidden: User "system:node:crc" cannot list resource "secrets" in API group "" in the namespace "openshift-route-controller-manager": no relationship found between node 'crc' and this object Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.184973 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-root-ca.crt" Dec 11 08:18:27 crc kubenswrapper[4881]: E1211 08:18:27.184970 4881 reflector.go:158] "Unhandled Error" err="object-\"openshift-route-controller-manager\"/\"route-controller-manager-sa-dockercfg-h2zr2\": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets \"route-controller-manager-sa-dockercfg-h2zr2\" is forbidden: User \"system:node:crc\" cannot list resource \"secrets\" in API group \"\" in the namespace \"openshift-route-controller-manager\": no relationship found between node 'crc' and this object" logger="UnhandledError" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.185021 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.185071 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-tls" Dec 11 08:18:27 crc kubenswrapper[4881]: W1211 08:18:27.185216 4881 reflector.go:561] object-"openshift-route-controller-manager"/"serving-cert": failed to list *v1.Secret: secrets "serving-cert" is forbidden: User "system:node:crc" cannot list resource "secrets" in API group "" in the namespace "openshift-route-controller-manager": no relationship found between node 'crc' and this object Dec 11 08:18:27 crc kubenswrapper[4881]: E1211 08:18:27.185234 4881 reflector.go:158] "Unhandled Error" err="object-\"openshift-route-controller-manager\"/\"serving-cert\": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets \"serving-cert\" is forbidden: User \"system:node:crc\" cannot list resource \"secrets\" in API group \"\" in the namespace \"openshift-route-controller-manager\": no relationship found between node 'crc' and this object" logger="UnhandledError" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.188180 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-dockercfg-mfbb7" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.188245 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"etcd-serving-ca" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.188429 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"openshift-service-ca.crt" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.188449 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-rbac-proxy" Dec 11 08:18:27 crc kubenswrapper[4881]: W1211 08:18:27.188487 4881 reflector.go:561] object-"openshift-image-registry"/"cluster-image-registry-operator-dockercfg-m4qtx": failed to list *v1.Secret: secrets "cluster-image-registry-operator-dockercfg-m4qtx" is forbidden: User "system:node:crc" cannot list resource "secrets" in API group "" in the namespace "openshift-image-registry": no relationship found between node 'crc' and this object Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.188544 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Dec 11 08:18:27 crc kubenswrapper[4881]: E1211 08:18:27.188565 4881 reflector.go:158] "Unhandled Error" err="object-\"openshift-image-registry\"/\"cluster-image-registry-operator-dockercfg-m4qtx\": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets \"cluster-image-registry-operator-dockercfg-m4qtx\" is forbidden: User \"system:node:crc\" cannot list resource \"secrets\" in API group \"\" in the namespace \"openshift-image-registry\": no relationship found between node 'crc' and this object" logger="UnhandledError" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.188654 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.192229 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-z9p6z"] Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.192712 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-69f744f599-z9p6z" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.193866 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-f9d7485db-d6tb5"] Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.194311 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-d6tb5" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.198305 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/downloads-7954f5f757-tzs74"] Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.198827 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"openshift-service-ca.crt" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.198839 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-7954f5f757-tzs74" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.199032 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"service-ca-bundle" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.199486 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-79b8l"] Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.199883 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-79b8l" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.207589 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-zl8wv"] Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.207690 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"kube-root-ca.crt" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.207776 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"openshift-service-ca.crt" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.207779 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-cliconfig" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.207776 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-oauth-config" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.207911 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"oauth-serving-cert" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.207936 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"authentication-operator-config" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.208084 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"openshift-service-ca.crt" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.208122 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"audit" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.208243 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-zl8wv" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.208351 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"kube-root-ca.crt" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.208368 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"console-config" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.208388 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-hnk5n"] Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.208513 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"serving-cert" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.208313 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"service-ca" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.208651 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b4wtd\" (UniqueName: \"kubernetes.io/projected/70bb46be-cfc1-411d-9a3b-55e040e1c2c5-kube-api-access-b4wtd\") pod \"machine-api-operator-5694c8668f-vk42n\" (UID: \"70bb46be-cfc1-411d-9a3b-55e040e1c2c5\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-vk42n" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.208696 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/317a7f01-2747-4994-8000-54613f522149-serving-cert\") pod \"route-controller-manager-6576b87f9c-882gs\" (UID: \"317a7f01-2747-4994-8000-54613f522149\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-882gs" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.208733 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/1b90c971-c734-4fa4-a385-0cce5ecacbd1-node-pullsecrets\") pod \"apiserver-76f77b778f-jwrst\" (UID: \"1b90c971-c734-4fa4-a385-0cce5ecacbd1\") " pod="openshift-apiserver/apiserver-76f77b778f-jwrst" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.208773 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/cb02b71d-7677-477a-8068-c687ebb146ee-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-r546r\" (UID: \"cb02b71d-7677-477a-8068-c687ebb146ee\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-r546r" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.208808 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-hnk5n" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.208813 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/09b82983-c4d5-4c1f-8f41-9dcc20fbfd03-console-serving-cert\") pod \"console-f9d7485db-d6tb5\" (UID: \"09b82983-c4d5-4c1f-8f41-9dcc20fbfd03\") " pod="openshift-console/console-f9d7485db-d6tb5" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.208837 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/09b82983-c4d5-4c1f-8f41-9dcc20fbfd03-service-ca\") pod \"console-f9d7485db-d6tb5\" (UID: \"09b82983-c4d5-4c1f-8f41-9dcc20fbfd03\") " pod="openshift-console/console-f9d7485db-d6tb5" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.208858 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1b90c971-c734-4fa4-a385-0cce5ecacbd1-encryption-config\") pod \"apiserver-76f77b778f-jwrst\" (UID: \"1b90c971-c734-4fa4-a385-0cce5ecacbd1\") " pod="openshift-apiserver/apiserver-76f77b778f-jwrst" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.208874 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/70bb46be-cfc1-411d-9a3b-55e040e1c2c5-config\") pod \"machine-api-operator-5694c8668f-vk42n\" (UID: \"70bb46be-cfc1-411d-9a3b-55e040e1c2c5\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-vk42n" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.208892 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xbplp\" (UniqueName: \"kubernetes.io/projected/fbc88e77-4757-426f-9212-8e4c3d26b8e0-kube-api-access-xbplp\") pod \"oauth-openshift-558db77b4-79b8l\" (UID: \"fbc88e77-4757-426f-9212-8e4c3d26b8e0\") " pod="openshift-authentication/oauth-openshift-558db77b4-79b8l" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.208908 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/09b82983-c4d5-4c1f-8f41-9dcc20fbfd03-trusted-ca-bundle\") pod \"console-f9d7485db-d6tb5\" (UID: \"09b82983-c4d5-4c1f-8f41-9dcc20fbfd03\") " pod="openshift-console/console-f9d7485db-d6tb5" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.208928 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/fbc88e77-4757-426f-9212-8e4c3d26b8e0-audit-dir\") pod \"oauth-openshift-558db77b4-79b8l\" (UID: \"fbc88e77-4757-426f-9212-8e4c3d26b8e0\") " pod="openshift-authentication/oauth-openshift-558db77b4-79b8l" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.208947 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1b90c971-c734-4fa4-a385-0cce5ecacbd1-config\") pod \"apiserver-76f77b778f-jwrst\" (UID: \"1b90c971-c734-4fa4-a385-0cce5ecacbd1\") " pod="openshift-apiserver/apiserver-76f77b778f-jwrst" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.208962 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1b90c971-c734-4fa4-a385-0cce5ecacbd1-audit\") pod \"apiserver-76f77b778f-jwrst\" (UID: \"1b90c971-c734-4fa4-a385-0cce5ecacbd1\") " pod="openshift-apiserver/apiserver-76f77b778f-jwrst" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.208977 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/0b6b1eb2-a5dd-4a95-956f-a787d03f453b-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-z9p6z\" (UID: \"0b6b1eb2-a5dd-4a95-956f-a787d03f453b\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-z9p6z" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.209002 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/fbc88e77-4757-426f-9212-8e4c3d26b8e0-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-79b8l\" (UID: \"fbc88e77-4757-426f-9212-8e4c3d26b8e0\") " pod="openshift-authentication/oauth-openshift-558db77b4-79b8l" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.209022 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/fbc88e77-4757-426f-9212-8e4c3d26b8e0-audit-policies\") pod \"oauth-openshift-558db77b4-79b8l\" (UID: \"fbc88e77-4757-426f-9212-8e4c3d26b8e0\") " pod="openshift-authentication/oauth-openshift-558db77b4-79b8l" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.209038 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/09b82983-c4d5-4c1f-8f41-9dcc20fbfd03-oauth-serving-cert\") pod \"console-f9d7485db-d6tb5\" (UID: \"09b82983-c4d5-4c1f-8f41-9dcc20fbfd03\") " pod="openshift-console/console-f9d7485db-d6tb5" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.209058 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hw55h\" (UniqueName: \"kubernetes.io/projected/cb02b71d-7677-477a-8068-c687ebb146ee-kube-api-access-hw55h\") pod \"cluster-image-registry-operator-dc59b4c8b-r546r\" (UID: \"cb02b71d-7677-477a-8068-c687ebb146ee\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-r546r" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.209072 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/fbc88e77-4757-426f-9212-8e4c3d26b8e0-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-79b8l\" (UID: \"fbc88e77-4757-426f-9212-8e4c3d26b8e0\") " pod="openshift-authentication/oauth-openshift-558db77b4-79b8l" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.209104 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2fn24\" (UniqueName: \"kubernetes.io/projected/0b6b1eb2-a5dd-4a95-956f-a787d03f453b-kube-api-access-2fn24\") pod \"authentication-operator-69f744f599-z9p6z\" (UID: \"0b6b1eb2-a5dd-4a95-956f-a787d03f453b\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-z9p6z" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.209130 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/cb02b71d-7677-477a-8068-c687ebb146ee-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-r546r\" (UID: \"cb02b71d-7677-477a-8068-c687ebb146ee\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-r546r" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.209148 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/fbc88e77-4757-426f-9212-8e4c3d26b8e0-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-79b8l\" (UID: \"fbc88e77-4757-426f-9212-8e4c3d26b8e0\") " pod="openshift-authentication/oauth-openshift-558db77b4-79b8l" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.209166 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1b90c971-c734-4fa4-a385-0cce5ecacbd1-etcd-serving-ca\") pod \"apiserver-76f77b778f-jwrst\" (UID: \"1b90c971-c734-4fa4-a385-0cce5ecacbd1\") " pod="openshift-apiserver/apiserver-76f77b778f-jwrst" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.209183 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/cb02b71d-7677-477a-8068-c687ebb146ee-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-r546r\" (UID: \"cb02b71d-7677-477a-8068-c687ebb146ee\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-r546r" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.209199 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hnw6c\" (UniqueName: \"kubernetes.io/projected/09b82983-c4d5-4c1f-8f41-9dcc20fbfd03-kube-api-access-hnw6c\") pod \"console-f9d7485db-d6tb5\" (UID: \"09b82983-c4d5-4c1f-8f41-9dcc20fbfd03\") " pod="openshift-console/console-f9d7485db-d6tb5" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.209227 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/fbc88e77-4757-426f-9212-8e4c3d26b8e0-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-79b8l\" (UID: \"fbc88e77-4757-426f-9212-8e4c3d26b8e0\") " pod="openshift-authentication/oauth-openshift-558db77b4-79b8l" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.209253 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/fbc88e77-4757-426f-9212-8e4c3d26b8e0-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-79b8l\" (UID: \"fbc88e77-4757-426f-9212-8e4c3d26b8e0\") " pod="openshift-authentication/oauth-openshift-558db77b4-79b8l" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.209274 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/fbc88e77-4757-426f-9212-8e4c3d26b8e0-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-79b8l\" (UID: \"fbc88e77-4757-426f-9212-8e4c3d26b8e0\") " pod="openshift-authentication/oauth-openshift-558db77b4-79b8l" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.209289 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/70bb46be-cfc1-411d-9a3b-55e040e1c2c5-images\") pod \"machine-api-operator-5694c8668f-vk42n\" (UID: \"70bb46be-cfc1-411d-9a3b-55e040e1c2c5\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-vk42n" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.209303 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/317a7f01-2747-4994-8000-54613f522149-client-ca\") pod \"route-controller-manager-6576b87f9c-882gs\" (UID: \"317a7f01-2747-4994-8000-54613f522149\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-882gs" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.208929 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-dockercfg-f62pw" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.209347 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zfkx2\" (UniqueName: \"kubernetes.io/projected/1b90c971-c734-4fa4-a385-0cce5ecacbd1-kube-api-access-zfkx2\") pod \"apiserver-76f77b778f-jwrst\" (UID: \"1b90c971-c734-4fa4-a385-0cce5ecacbd1\") " pod="openshift-apiserver/apiserver-76f77b778f-jwrst" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.209161 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"oauth-openshift-dockercfg-znhcc" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.209619 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/0b6b1eb2-a5dd-4a95-956f-a787d03f453b-service-ca-bundle\") pod \"authentication-operator-69f744f599-z9p6z\" (UID: \"0b6b1eb2-a5dd-4a95-956f-a787d03f453b\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-z9p6z" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.209641 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/09b82983-c4d5-4c1f-8f41-9dcc20fbfd03-console-config\") pod \"console-f9d7485db-d6tb5\" (UID: \"09b82983-c4d5-4c1f-8f41-9dcc20fbfd03\") " pod="openshift-console/console-f9d7485db-d6tb5" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.209692 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1b90c971-c734-4fa4-a385-0cce5ecacbd1-etcd-client\") pod \"apiserver-76f77b778f-jwrst\" (UID: \"1b90c971-c734-4fa4-a385-0cce5ecacbd1\") " pod="openshift-apiserver/apiserver-76f77b778f-jwrst" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.209700 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-router-certs" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.209735 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1b90c971-c734-4fa4-a385-0cce5ecacbd1-trusted-ca-bundle\") pod \"apiserver-76f77b778f-jwrst\" (UID: \"1b90c971-c734-4fa4-a385-0cce5ecacbd1\") " pod="openshift-apiserver/apiserver-76f77b778f-jwrst" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.209803 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"kube-root-ca.crt" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.209728 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-service-ca" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.209802 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/fbc88e77-4757-426f-9212-8e4c3d26b8e0-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-79b8l\" (UID: \"fbc88e77-4757-426f-9212-8e4c3d26b8e0\") " pod="openshift-authentication/oauth-openshift-558db77b4-79b8l" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.209927 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/fbc88e77-4757-426f-9212-8e4c3d26b8e0-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-79b8l\" (UID: \"fbc88e77-4757-426f-9212-8e4c3d26b8e0\") " pod="openshift-authentication/oauth-openshift-558db77b4-79b8l" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.209967 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/317a7f01-2747-4994-8000-54613f522149-config\") pod \"route-controller-manager-6576b87f9c-882gs\" (UID: \"317a7f01-2747-4994-8000-54613f522149\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-882gs" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.210002 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1b90c971-c734-4fa4-a385-0cce5ecacbd1-image-import-ca\") pod \"apiserver-76f77b778f-jwrst\" (UID: \"1b90c971-c734-4fa4-a385-0cce5ecacbd1\") " pod="openshift-apiserver/apiserver-76f77b778f-jwrst" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.210023 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0b6b1eb2-a5dd-4a95-956f-a787d03f453b-config\") pod \"authentication-operator-69f744f599-z9p6z\" (UID: \"0b6b1eb2-a5dd-4a95-956f-a787d03f453b\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-z9p6z" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.210061 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1b90c971-c734-4fa4-a385-0cce5ecacbd1-serving-cert\") pod \"apiserver-76f77b778f-jwrst\" (UID: \"1b90c971-c734-4fa4-a385-0cce5ecacbd1\") " pod="openshift-apiserver/apiserver-76f77b778f-jwrst" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.210086 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7bxfc\" (UniqueName: \"kubernetes.io/projected/317a7f01-2747-4994-8000-54613f522149-kube-api-access-7bxfc\") pod \"route-controller-manager-6576b87f9c-882gs\" (UID: \"317a7f01-2747-4994-8000-54613f522149\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-882gs" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.210107 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8vzp7\" (UniqueName: \"kubernetes.io/projected/26aecb96-f0ab-48d9-977c-89c3a1cf06e7-kube-api-access-8vzp7\") pod \"downloads-7954f5f757-tzs74\" (UID: \"26aecb96-f0ab-48d9-977c-89c3a1cf06e7\") " pod="openshift-console/downloads-7954f5f757-tzs74" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.210130 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/1b90c971-c734-4fa4-a385-0cce5ecacbd1-audit-dir\") pod \"apiserver-76f77b778f-jwrst\" (UID: \"1b90c971-c734-4fa4-a385-0cce5ecacbd1\") " pod="openshift-apiserver/apiserver-76f77b778f-jwrst" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.210150 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b6b1eb2-a5dd-4a95-956f-a787d03f453b-serving-cert\") pod \"authentication-operator-69f744f599-z9p6z\" (UID: \"0b6b1eb2-a5dd-4a95-956f-a787d03f453b\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-z9p6z" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.210172 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/fbc88e77-4757-426f-9212-8e4c3d26b8e0-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-79b8l\" (UID: \"fbc88e77-4757-426f-9212-8e4c3d26b8e0\") " pod="openshift-authentication/oauth-openshift-558db77b4-79b8l" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.210197 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/fbc88e77-4757-426f-9212-8e4c3d26b8e0-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-79b8l\" (UID: \"fbc88e77-4757-426f-9212-8e4c3d26b8e0\") " pod="openshift-authentication/oauth-openshift-558db77b4-79b8l" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.210217 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/09b82983-c4d5-4c1f-8f41-9dcc20fbfd03-console-oauth-config\") pod \"console-f9d7485db-d6tb5\" (UID: \"09b82983-c4d5-4c1f-8f41-9dcc20fbfd03\") " pod="openshift-console/console-f9d7485db-d6tb5" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.210242 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/fbc88e77-4757-426f-9212-8e4c3d26b8e0-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-79b8l\" (UID: \"fbc88e77-4757-426f-9212-8e4c3d26b8e0\") " pod="openshift-authentication/oauth-openshift-558db77b4-79b8l" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.210267 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/70bb46be-cfc1-411d-9a3b-55e040e1c2c5-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-vk42n\" (UID: \"70bb46be-cfc1-411d-9a3b-55e040e1c2c5\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-vk42n" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.210324 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-serving-cert" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.210570 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-serving-cert" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.212114 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-zn9v8"] Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.212797 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-zn9v8" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.214006 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"trusted-ca-bundle" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.230532 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"default-dockercfg-chnjx" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.230986 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.231053 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.231386 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-machine-approver/machine-approver-56656f9798-mdqnb"] Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.231589 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-dockercfg-vw8fw" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.233405 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.250364 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.250790 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.250879 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-idp-0-file-data" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.251287 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-provider-selection" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.252284 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-tls" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.252309 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-error" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.252484 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-session" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.252617 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-service-ca.crt" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.253060 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"authentication-operator-dockercfg-mz9bj" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.253640 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-w4j7x"] Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.253949 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-nhlpk"] Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.254418 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-cxnkh"] Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.254787 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-cxnkh" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.254426 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-nhlpk" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.255921 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"trusted-ca-bundle" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.256381 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"registry-dockercfg-kzzsd" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.256582 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-w4j7x" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.256639 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-mdqnb" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.259030 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.259187 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"installation-pull-secrets" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.259406 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.259601 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console-operator/console-operator-58897d9998-74tpl"] Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.260237 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-58897d9998-74tpl" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.261219 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"kube-root-ca.crt" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.261881 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.265747 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-td2hv"] Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.266717 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-r75cd"] Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.266806 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-7777fb866f-td2hv" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.267144 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"trusted-ca-bundle" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.267520 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-lxwz8"] Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.268227 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-login" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.268823 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-b45778765-r75cd" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.269532 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-lxwz8" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.271089 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.271689 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-ocp-branding-template" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.271847 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-7l2x6"] Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.272612 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-5dbn8"] Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.273162 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-744455d44c-5dbn8" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.273510 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-7l2x6" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.274946 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"encryption-config-1" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.274996 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-service-ca.crt" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.275085 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"audit-1" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.276305 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"oauth-apiserver-sa-dockercfg-6r2bq" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.276464 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"trusted-ca-bundle" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.276600 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"cluster-samples-operator-dockercfg-xpp9w" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.276635 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"etcd-serving-ca" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.276907 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"openshift-service-ca.crt" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.277033 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-rbac-proxy" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.277254 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"openshift-service-ca.crt" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.278280 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"samples-operator-tls" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.278533 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-9srsc"] Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.279126 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-9srsc" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.279225 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress/router-default-5444994796-qbkd8"] Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.279578 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress/router-default-5444994796-qbkd8" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.280651 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-qgvl4"] Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.280796 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"serving-cert" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.280988 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"openshift-service-ca.crt" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.281023 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-qgvl4" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.281184 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"machine-approver-config" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.291735 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.292402 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"kube-root-ca.crt" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.294622 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"kube-root-ca.crt" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.296281 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-root-ca.crt" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.296814 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-tls" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.297319 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-dockercfg-xtcjv" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.297738 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"openshift-service-ca.crt" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.298161 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"kube-root-ca.crt" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.298606 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"etcd-client" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.298876 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-config" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.299221 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"console-operator-config" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.299465 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"serving-cert" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.299524 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.299225 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-sa-dockercfg-nl2j4" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.301206 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"console-operator-dockercfg-4xjcr" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.330128 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/fbc88e77-4757-426f-9212-8e4c3d26b8e0-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-79b8l\" (UID: \"fbc88e77-4757-426f-9212-8e4c3d26b8e0\") " pod="openshift-authentication/oauth-openshift-558db77b4-79b8l" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.330161 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/09b82983-c4d5-4c1f-8f41-9dcc20fbfd03-console-oauth-config\") pod \"console-f9d7485db-d6tb5\" (UID: \"09b82983-c4d5-4c1f-8f41-9dcc20fbfd03\") " pod="openshift-console/console-f9d7485db-d6tb5" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.330180 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/fbc88e77-4757-426f-9212-8e4c3d26b8e0-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-79b8l\" (UID: \"fbc88e77-4757-426f-9212-8e4c3d26b8e0\") " pod="openshift-authentication/oauth-openshift-558db77b4-79b8l" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.330199 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/70bb46be-cfc1-411d-9a3b-55e040e1c2c5-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-vk42n\" (UID: \"70bb46be-cfc1-411d-9a3b-55e040e1c2c5\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-vk42n" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.330213 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b4wtd\" (UniqueName: \"kubernetes.io/projected/70bb46be-cfc1-411d-9a3b-55e040e1c2c5-kube-api-access-b4wtd\") pod \"machine-api-operator-5694c8668f-vk42n\" (UID: \"70bb46be-cfc1-411d-9a3b-55e040e1c2c5\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-vk42n" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.330228 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/317a7f01-2747-4994-8000-54613f522149-serving-cert\") pod \"route-controller-manager-6576b87f9c-882gs\" (UID: \"317a7f01-2747-4994-8000-54613f522149\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-882gs" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.330242 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/1b90c971-c734-4fa4-a385-0cce5ecacbd1-node-pullsecrets\") pod \"apiserver-76f77b778f-jwrst\" (UID: \"1b90c971-c734-4fa4-a385-0cce5ecacbd1\") " pod="openshift-apiserver/apiserver-76f77b778f-jwrst" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.330257 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/cb02b71d-7677-477a-8068-c687ebb146ee-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-r546r\" (UID: \"cb02b71d-7677-477a-8068-c687ebb146ee\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-r546r" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.330273 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/09b82983-c4d5-4c1f-8f41-9dcc20fbfd03-console-serving-cert\") pod \"console-f9d7485db-d6tb5\" (UID: \"09b82983-c4d5-4c1f-8f41-9dcc20fbfd03\") " pod="openshift-console/console-f9d7485db-d6tb5" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.330287 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/09b82983-c4d5-4c1f-8f41-9dcc20fbfd03-service-ca\") pod \"console-f9d7485db-d6tb5\" (UID: \"09b82983-c4d5-4c1f-8f41-9dcc20fbfd03\") " pod="openshift-console/console-f9d7485db-d6tb5" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.330301 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1b90c971-c734-4fa4-a385-0cce5ecacbd1-encryption-config\") pod \"apiserver-76f77b778f-jwrst\" (UID: \"1b90c971-c734-4fa4-a385-0cce5ecacbd1\") " pod="openshift-apiserver/apiserver-76f77b778f-jwrst" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.330317 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/70bb46be-cfc1-411d-9a3b-55e040e1c2c5-config\") pod \"machine-api-operator-5694c8668f-vk42n\" (UID: \"70bb46be-cfc1-411d-9a3b-55e040e1c2c5\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-vk42n" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.330352 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xbplp\" (UniqueName: \"kubernetes.io/projected/fbc88e77-4757-426f-9212-8e4c3d26b8e0-kube-api-access-xbplp\") pod \"oauth-openshift-558db77b4-79b8l\" (UID: \"fbc88e77-4757-426f-9212-8e4c3d26b8e0\") " pod="openshift-authentication/oauth-openshift-558db77b4-79b8l" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.330369 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/09b82983-c4d5-4c1f-8f41-9dcc20fbfd03-trusted-ca-bundle\") pod \"console-f9d7485db-d6tb5\" (UID: \"09b82983-c4d5-4c1f-8f41-9dcc20fbfd03\") " pod="openshift-console/console-f9d7485db-d6tb5" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.330388 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/fbc88e77-4757-426f-9212-8e4c3d26b8e0-audit-dir\") pod \"oauth-openshift-558db77b4-79b8l\" (UID: \"fbc88e77-4757-426f-9212-8e4c3d26b8e0\") " pod="openshift-authentication/oauth-openshift-558db77b4-79b8l" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.330407 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1b90c971-c734-4fa4-a385-0cce5ecacbd1-config\") pod \"apiserver-76f77b778f-jwrst\" (UID: \"1b90c971-c734-4fa4-a385-0cce5ecacbd1\") " pod="openshift-apiserver/apiserver-76f77b778f-jwrst" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.330422 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1b90c971-c734-4fa4-a385-0cce5ecacbd1-audit\") pod \"apiserver-76f77b778f-jwrst\" (UID: \"1b90c971-c734-4fa4-a385-0cce5ecacbd1\") " pod="openshift-apiserver/apiserver-76f77b778f-jwrst" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.330447 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/0b6b1eb2-a5dd-4a95-956f-a787d03f453b-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-z9p6z\" (UID: \"0b6b1eb2-a5dd-4a95-956f-a787d03f453b\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-z9p6z" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.330459 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"kube-root-ca.crt" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.331014 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/1b90c971-c734-4fa4-a385-0cce5ecacbd1-node-pullsecrets\") pod \"apiserver-76f77b778f-jwrst\" (UID: \"1b90c971-c734-4fa4-a385-0cce5ecacbd1\") " pod="openshift-apiserver/apiserver-76f77b778f-jwrst" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.330463 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/fbc88e77-4757-426f-9212-8e4c3d26b8e0-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-79b8l\" (UID: \"fbc88e77-4757-426f-9212-8e4c3d26b8e0\") " pod="openshift-authentication/oauth-openshift-558db77b4-79b8l" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.331122 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/fbc88e77-4757-426f-9212-8e4c3d26b8e0-audit-policies\") pod \"oauth-openshift-558db77b4-79b8l\" (UID: \"fbc88e77-4757-426f-9212-8e4c3d26b8e0\") " pod="openshift-authentication/oauth-openshift-558db77b4-79b8l" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.331148 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/09b82983-c4d5-4c1f-8f41-9dcc20fbfd03-oauth-serving-cert\") pod \"console-f9d7485db-d6tb5\" (UID: \"09b82983-c4d5-4c1f-8f41-9dcc20fbfd03\") " pod="openshift-console/console-f9d7485db-d6tb5" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.331176 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hw55h\" (UniqueName: \"kubernetes.io/projected/cb02b71d-7677-477a-8068-c687ebb146ee-kube-api-access-hw55h\") pod \"cluster-image-registry-operator-dc59b4c8b-r546r\" (UID: \"cb02b71d-7677-477a-8068-c687ebb146ee\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-r546r" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.331199 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/fbc88e77-4757-426f-9212-8e4c3d26b8e0-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-79b8l\" (UID: \"fbc88e77-4757-426f-9212-8e4c3d26b8e0\") " pod="openshift-authentication/oauth-openshift-558db77b4-79b8l" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.331248 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2fn24\" (UniqueName: \"kubernetes.io/projected/0b6b1eb2-a5dd-4a95-956f-a787d03f453b-kube-api-access-2fn24\") pod \"authentication-operator-69f744f599-z9p6z\" (UID: \"0b6b1eb2-a5dd-4a95-956f-a787d03f453b\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-z9p6z" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.331269 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/cb02b71d-7677-477a-8068-c687ebb146ee-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-r546r\" (UID: \"cb02b71d-7677-477a-8068-c687ebb146ee\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-r546r" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.331293 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/fbc88e77-4757-426f-9212-8e4c3d26b8e0-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-79b8l\" (UID: \"fbc88e77-4757-426f-9212-8e4c3d26b8e0\") " pod="openshift-authentication/oauth-openshift-558db77b4-79b8l" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.331317 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1b90c971-c734-4fa4-a385-0cce5ecacbd1-etcd-serving-ca\") pod \"apiserver-76f77b778f-jwrst\" (UID: \"1b90c971-c734-4fa4-a385-0cce5ecacbd1\") " pod="openshift-apiserver/apiserver-76f77b778f-jwrst" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.331359 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/cb02b71d-7677-477a-8068-c687ebb146ee-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-r546r\" (UID: \"cb02b71d-7677-477a-8068-c687ebb146ee\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-r546r" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.331383 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hnw6c\" (UniqueName: \"kubernetes.io/projected/09b82983-c4d5-4c1f-8f41-9dcc20fbfd03-kube-api-access-hnw6c\") pod \"console-f9d7485db-d6tb5\" (UID: \"09b82983-c4d5-4c1f-8f41-9dcc20fbfd03\") " pod="openshift-console/console-f9d7485db-d6tb5" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.331408 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/fbc88e77-4757-426f-9212-8e4c3d26b8e0-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-79b8l\" (UID: \"fbc88e77-4757-426f-9212-8e4c3d26b8e0\") " pod="openshift-authentication/oauth-openshift-558db77b4-79b8l" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.331430 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/fbc88e77-4757-426f-9212-8e4c3d26b8e0-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-79b8l\" (UID: \"fbc88e77-4757-426f-9212-8e4c3d26b8e0\") " pod="openshift-authentication/oauth-openshift-558db77b4-79b8l" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.331465 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/70bb46be-cfc1-411d-9a3b-55e040e1c2c5-images\") pod \"machine-api-operator-5694c8668f-vk42n\" (UID: \"70bb46be-cfc1-411d-9a3b-55e040e1c2c5\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-vk42n" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.331488 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/fbc88e77-4757-426f-9212-8e4c3d26b8e0-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-79b8l\" (UID: \"fbc88e77-4757-426f-9212-8e4c3d26b8e0\") " pod="openshift-authentication/oauth-openshift-558db77b4-79b8l" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.331513 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/317a7f01-2747-4994-8000-54613f522149-client-ca\") pod \"route-controller-manager-6576b87f9c-882gs\" (UID: \"317a7f01-2747-4994-8000-54613f522149\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-882gs" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.331537 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/09b82983-c4d5-4c1f-8f41-9dcc20fbfd03-console-config\") pod \"console-f9d7485db-d6tb5\" (UID: \"09b82983-c4d5-4c1f-8f41-9dcc20fbfd03\") " pod="openshift-console/console-f9d7485db-d6tb5" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.331563 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zfkx2\" (UniqueName: \"kubernetes.io/projected/1b90c971-c734-4fa4-a385-0cce5ecacbd1-kube-api-access-zfkx2\") pod \"apiserver-76f77b778f-jwrst\" (UID: \"1b90c971-c734-4fa4-a385-0cce5ecacbd1\") " pod="openshift-apiserver/apiserver-76f77b778f-jwrst" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.331585 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/0b6b1eb2-a5dd-4a95-956f-a787d03f453b-service-ca-bundle\") pod \"authentication-operator-69f744f599-z9p6z\" (UID: \"0b6b1eb2-a5dd-4a95-956f-a787d03f453b\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-z9p6z" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.331608 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1b90c971-c734-4fa4-a385-0cce5ecacbd1-etcd-client\") pod \"apiserver-76f77b778f-jwrst\" (UID: \"1b90c971-c734-4fa4-a385-0cce5ecacbd1\") " pod="openshift-apiserver/apiserver-76f77b778f-jwrst" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.331631 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1b90c971-c734-4fa4-a385-0cce5ecacbd1-trusted-ca-bundle\") pod \"apiserver-76f77b778f-jwrst\" (UID: \"1b90c971-c734-4fa4-a385-0cce5ecacbd1\") " pod="openshift-apiserver/apiserver-76f77b778f-jwrst" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.331654 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/fbc88e77-4757-426f-9212-8e4c3d26b8e0-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-79b8l\" (UID: \"fbc88e77-4757-426f-9212-8e4c3d26b8e0\") " pod="openshift-authentication/oauth-openshift-558db77b4-79b8l" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.331680 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/fbc88e77-4757-426f-9212-8e4c3d26b8e0-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-79b8l\" (UID: \"fbc88e77-4757-426f-9212-8e4c3d26b8e0\") " pod="openshift-authentication/oauth-openshift-558db77b4-79b8l" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.331713 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/317a7f01-2747-4994-8000-54613f522149-config\") pod \"route-controller-manager-6576b87f9c-882gs\" (UID: \"317a7f01-2747-4994-8000-54613f522149\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-882gs" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.331735 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1b90c971-c734-4fa4-a385-0cce5ecacbd1-image-import-ca\") pod \"apiserver-76f77b778f-jwrst\" (UID: \"1b90c971-c734-4fa4-a385-0cce5ecacbd1\") " pod="openshift-apiserver/apiserver-76f77b778f-jwrst" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.331756 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0b6b1eb2-a5dd-4a95-956f-a787d03f453b-config\") pod \"authentication-operator-69f744f599-z9p6z\" (UID: \"0b6b1eb2-a5dd-4a95-956f-a787d03f453b\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-z9p6z" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.331779 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1b90c971-c734-4fa4-a385-0cce5ecacbd1-serving-cert\") pod \"apiserver-76f77b778f-jwrst\" (UID: \"1b90c971-c734-4fa4-a385-0cce5ecacbd1\") " pod="openshift-apiserver/apiserver-76f77b778f-jwrst" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.331805 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7bxfc\" (UniqueName: \"kubernetes.io/projected/317a7f01-2747-4994-8000-54613f522149-kube-api-access-7bxfc\") pod \"route-controller-manager-6576b87f9c-882gs\" (UID: \"317a7f01-2747-4994-8000-54613f522149\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-882gs" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.331830 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8vzp7\" (UniqueName: \"kubernetes.io/projected/26aecb96-f0ab-48d9-977c-89c3a1cf06e7-kube-api-access-8vzp7\") pod \"downloads-7954f5f757-tzs74\" (UID: \"26aecb96-f0ab-48d9-977c-89c3a1cf06e7\") " pod="openshift-console/downloads-7954f5f757-tzs74" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.331853 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/1b90c971-c734-4fa4-a385-0cce5ecacbd1-audit-dir\") pod \"apiserver-76f77b778f-jwrst\" (UID: \"1b90c971-c734-4fa4-a385-0cce5ecacbd1\") " pod="openshift-apiserver/apiserver-76f77b778f-jwrst" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.331875 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b6b1eb2-a5dd-4a95-956f-a787d03f453b-serving-cert\") pod \"authentication-operator-69f744f599-z9p6z\" (UID: \"0b6b1eb2-a5dd-4a95-956f-a787d03f453b\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-z9p6z" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.331900 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/fbc88e77-4757-426f-9212-8e4c3d26b8e0-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-79b8l\" (UID: \"fbc88e77-4757-426f-9212-8e4c3d26b8e0\") " pod="openshift-authentication/oauth-openshift-558db77b4-79b8l" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.332119 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/09b82983-c4d5-4c1f-8f41-9dcc20fbfd03-trusted-ca-bundle\") pod \"console-f9d7485db-d6tb5\" (UID: \"09b82983-c4d5-4c1f-8f41-9dcc20fbfd03\") " pod="openshift-console/console-f9d7485db-d6tb5" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.332646 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-d8zgs"] Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.332725 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/09b82983-c4d5-4c1f-8f41-9dcc20fbfd03-service-ca\") pod \"console-f9d7485db-d6tb5\" (UID: \"09b82983-c4d5-4c1f-8f41-9dcc20fbfd03\") " pod="openshift-console/console-f9d7485db-d6tb5" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.333260 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-kjhrd"] Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.333803 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-jzrlt"] Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.336929 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-jzrlt" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.337612 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-d8zgs" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.337825 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-857f4d67dd-kjhrd" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.341316 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/09b82983-c4d5-4c1f-8f41-9dcc20fbfd03-console-oauth-config\") pod \"console-f9d7485db-d6tb5\" (UID: \"09b82983-c4d5-4c1f-8f41-9dcc20fbfd03\") " pod="openshift-console/console-f9d7485db-d6tb5" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.342751 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/fbc88e77-4757-426f-9212-8e4c3d26b8e0-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-79b8l\" (UID: \"fbc88e77-4757-426f-9212-8e4c3d26b8e0\") " pod="openshift-authentication/oauth-openshift-558db77b4-79b8l" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.345122 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-rsvfs"] Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.345458 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1b90c971-c734-4fa4-a385-0cce5ecacbd1-config\") pod \"apiserver-76f77b778f-jwrst\" (UID: \"1b90c971-c734-4fa4-a385-0cce5ecacbd1\") " pod="openshift-apiserver/apiserver-76f77b778f-jwrst" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.345534 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/fbc88e77-4757-426f-9212-8e4c3d26b8e0-audit-dir\") pod \"oauth-openshift-558db77b4-79b8l\" (UID: \"fbc88e77-4757-426f-9212-8e4c3d26b8e0\") " pod="openshift-authentication/oauth-openshift-558db77b4-79b8l" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.345558 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"openshift-service-ca.crt" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.345668 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-9jj8n"] Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.346455 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/fbc88e77-4757-426f-9212-8e4c3d26b8e0-audit-policies\") pod \"oauth-openshift-558db77b4-79b8l\" (UID: \"fbc88e77-4757-426f-9212-8e4c3d26b8e0\") " pod="openshift-authentication/oauth-openshift-558db77b4-79b8l" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.346655 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/fbc88e77-4757-426f-9212-8e4c3d26b8e0-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-79b8l\" (UID: \"fbc88e77-4757-426f-9212-8e4c3d26b8e0\") " pod="openshift-authentication/oauth-openshift-558db77b4-79b8l" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.347686 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-9stj8"] Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.348041 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-tgcxv"] Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.348609 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-w78d4"] Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.348891 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1b90c971-c734-4fa4-a385-0cce5ecacbd1-audit\") pod \"apiserver-76f77b778f-jwrst\" (UID: \"1b90c971-c734-4fa4-a385-0cce5ecacbd1\") " pod="openshift-apiserver/apiserver-76f77b778f-jwrst" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.349064 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-777779d784-w78d4" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.349366 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-rsvfs" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.349917 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/fbc88e77-4757-426f-9212-8e4c3d26b8e0-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-79b8l\" (UID: \"fbc88e77-4757-426f-9212-8e4c3d26b8e0\") " pod="openshift-authentication/oauth-openshift-558db77b4-79b8l" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.350437 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/0b6b1eb2-a5dd-4a95-956f-a787d03f453b-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-z9p6z\" (UID: \"0b6b1eb2-a5dd-4a95-956f-a787d03f453b\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-z9p6z" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.351058 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/70bb46be-cfc1-411d-9a3b-55e040e1c2c5-config\") pod \"machine-api-operator-5694c8668f-vk42n\" (UID: \"70bb46be-cfc1-411d-9a3b-55e040e1c2c5\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-vk42n" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.352399 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/fbc88e77-4757-426f-9212-8e4c3d26b8e0-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-79b8l\" (UID: \"fbc88e77-4757-426f-9212-8e4c3d26b8e0\") " pod="openshift-authentication/oauth-openshift-558db77b4-79b8l" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.352935 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/fbc88e77-4757-426f-9212-8e4c3d26b8e0-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-79b8l\" (UID: \"fbc88e77-4757-426f-9212-8e4c3d26b8e0\") " pod="openshift-authentication/oauth-openshift-558db77b4-79b8l" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.355825 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/09b82983-c4d5-4c1f-8f41-9dcc20fbfd03-oauth-serving-cert\") pod \"console-f9d7485db-d6tb5\" (UID: \"09b82983-c4d5-4c1f-8f41-9dcc20fbfd03\") " pod="openshift-console/console-f9d7485db-d6tb5" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.357062 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/fbc88e77-4757-426f-9212-8e4c3d26b8e0-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-79b8l\" (UID: \"fbc88e77-4757-426f-9212-8e4c3d26b8e0\") " pod="openshift-authentication/oauth-openshift-558db77b4-79b8l" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.357775 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/fbc88e77-4757-426f-9212-8e4c3d26b8e0-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-79b8l\" (UID: \"fbc88e77-4757-426f-9212-8e4c3d26b8e0\") " pod="openshift-authentication/oauth-openshift-558db77b4-79b8l" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.357904 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/09b82983-c4d5-4c1f-8f41-9dcc20fbfd03-console-config\") pod \"console-f9d7485db-d6tb5\" (UID: \"09b82983-c4d5-4c1f-8f41-9dcc20fbfd03\") " pod="openshift-console/console-f9d7485db-d6tb5" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.357941 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"trusted-ca" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.358413 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/fbc88e77-4757-426f-9212-8e4c3d26b8e0-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-79b8l\" (UID: \"fbc88e77-4757-426f-9212-8e4c3d26b8e0\") " pod="openshift-authentication/oauth-openshift-558db77b4-79b8l" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.358580 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/0b6b1eb2-a5dd-4a95-956f-a787d03f453b-service-ca-bundle\") pod \"authentication-operator-69f744f599-z9p6z\" (UID: \"0b6b1eb2-a5dd-4a95-956f-a787d03f453b\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-z9p6z" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.358891 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/fbc88e77-4757-426f-9212-8e4c3d26b8e0-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-79b8l\" (UID: \"fbc88e77-4757-426f-9212-8e4c3d26b8e0\") " pod="openshift-authentication/oauth-openshift-558db77b4-79b8l" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.359045 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1b90c971-c734-4fa4-a385-0cce5ecacbd1-trusted-ca-bundle\") pod \"apiserver-76f77b778f-jwrst\" (UID: \"1b90c971-c734-4fa4-a385-0cce5ecacbd1\") " pod="openshift-apiserver/apiserver-76f77b778f-jwrst" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.359618 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1b90c971-c734-4fa4-a385-0cce5ecacbd1-image-import-ca\") pod \"apiserver-76f77b778f-jwrst\" (UID: \"1b90c971-c734-4fa4-a385-0cce5ecacbd1\") " pod="openshift-apiserver/apiserver-76f77b778f-jwrst" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.359641 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-8p5cn"] Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.361688 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-r546r"] Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.360069 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0b6b1eb2-a5dd-4a95-956f-a787d03f453b-config\") pod \"authentication-operator-69f744f599-z9p6z\" (UID: \"0b6b1eb2-a5dd-4a95-956f-a787d03f453b\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-z9p6z" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.359902 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/1b90c971-c734-4fa4-a385-0cce5ecacbd1-audit-dir\") pod \"apiserver-76f77b778f-jwrst\" (UID: \"1b90c971-c734-4fa4-a385-0cce5ecacbd1\") " pod="openshift-apiserver/apiserver-76f77b778f-jwrst" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.362173 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29424015-bl9jf"] Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.362239 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"images\" (UniqueName: \"kubernetes.io/configmap/70bb46be-cfc1-411d-9a3b-55e040e1c2c5-images\") pod \"machine-api-operator-5694c8668f-vk42n\" (UID: \"70bb46be-cfc1-411d-9a3b-55e040e1c2c5\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-vk42n" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.362570 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1b90c971-c734-4fa4-a385-0cce5ecacbd1-etcd-serving-ca\") pod \"apiserver-76f77b778f-jwrst\" (UID: \"1b90c971-c734-4fa4-a385-0cce5ecacbd1\") " pod="openshift-apiserver/apiserver-76f77b778f-jwrst" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.362895 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"openshift-config-operator-dockercfg-7pc5z" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.363099 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1b90c971-c734-4fa4-a385-0cce5ecacbd1-etcd-client\") pod \"apiserver-76f77b778f-jwrst\" (UID: \"1b90c971-c734-4fa4-a385-0cce5ecacbd1\") " pod="openshift-apiserver/apiserver-76f77b778f-jwrst" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.364486 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/317a7f01-2747-4994-8000-54613f522149-config\") pod \"route-controller-manager-6576b87f9c-882gs\" (UID: \"317a7f01-2747-4994-8000-54613f522149\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-882gs" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.364883 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-vk42n"] Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.365473 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"config-operator-serving-cert" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.367673 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/70bb46be-cfc1-411d-9a3b-55e040e1c2c5-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-vk42n\" (UID: \"70bb46be-cfc1-411d-9a3b-55e040e1c2c5\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-vk42n" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.367733 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/09b82983-c4d5-4c1f-8f41-9dcc20fbfd03-console-serving-cert\") pod \"console-f9d7485db-d6tb5\" (UID: \"09b82983-c4d5-4c1f-8f41-9dcc20fbfd03\") " pod="openshift-console/console-f9d7485db-d6tb5" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.367788 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-2n9k5"] Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.368013 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b6b1eb2-a5dd-4a95-956f-a787d03f453b-serving-cert\") pod \"authentication-operator-69f744f599-z9p6z\" (UID: \"0b6b1eb2-a5dd-4a95-956f-a787d03f453b\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-z9p6z" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.368979 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-jwrst"] Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.369062 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-dw769"] Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.370155 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-459c4"] Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.371058 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/fbc88e77-4757-426f-9212-8e4c3d26b8e0-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-79b8l\" (UID: \"fbc88e77-4757-426f-9212-8e4c3d26b8e0\") " pod="openshift-authentication/oauth-openshift-558db77b4-79b8l" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.371488 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-vbpk5"] Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.371763 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1b90c971-c734-4fa4-a385-0cce5ecacbd1-encryption-config\") pod \"apiserver-76f77b778f-jwrst\" (UID: \"1b90c971-c734-4fa4-a385-0cce5ecacbd1\") " pod="openshift-apiserver/apiserver-76f77b778f-jwrst" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.372045 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-9jj8n" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.372116 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-9stj8" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.372831 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-tgcxv" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.373028 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-8p5cn" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.373059 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"kube-root-ca.crt" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.373249 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29424015-bl9jf" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.373316 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-2n9k5" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.373490 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-dw769" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.373551 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-459c4" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.375910 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/fbc88e77-4757-426f-9212-8e4c3d26b8e0-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-79b8l\" (UID: \"fbc88e77-4757-426f-9212-8e4c3d26b8e0\") " pod="openshift-authentication/oauth-openshift-558db77b4-79b8l" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.376378 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1b90c971-c734-4fa4-a385-0cce5ecacbd1-serving-cert\") pod \"apiserver-76f77b778f-jwrst\" (UID: \"1b90c971-c734-4fa4-a385-0cce5ecacbd1\") " pod="openshift-apiserver/apiserver-76f77b778f-jwrst" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.384670 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-z9p6z"] Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.384794 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-9c57cc56f-vbpk5" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.390411 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-zl8wv"] Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.394236 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"openshift-service-ca.crt" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.401991 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/downloads-7954f5f757-tzs74"] Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.404558 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-882gs"] Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.405259 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-f9d7485db-d6tb5"] Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.406710 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-zn9v8"] Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.407659 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-nhlpk"] Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.408813 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-w4j7x"] Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.409878 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-hnk5n"] Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.411025 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-79b8l"] Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.412378 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"kube-root-ca.crt" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.412483 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-w78d4"] Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.413449 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-9srsc"] Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.414791 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-kjhrd"] Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.416426 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-9stj8"] Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.417358 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-qgvl4"] Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.418459 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-tgcxv"] Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.419667 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console-operator/console-operator-58897d9998-74tpl"] Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.423241 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-7l2x6"] Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.428964 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-2n9k5"] Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.434760 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-jzrlt"] Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.434803 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress-canary/ingress-canary-kjptn"] Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.435400 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-kjptn" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.435499 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-nh6jb"] Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.438135 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/43b9a8a1-a19d-4fb1-b9ca-5bc87c2dac43-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-lxwz8\" (UID: \"43b9a8a1-a19d-4fb1-b9ca-5bc87c2dac43\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-lxwz8" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.438176 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6qsgc\" (UniqueName: \"kubernetes.io/projected/b6c9051a-b8c5-4127-9665-b07e92e60bab-kube-api-access-6qsgc\") pod \"machine-config-controller-84d6567774-7l2x6\" (UID: \"b6c9051a-b8c5-4127-9665-b07e92e60bab\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-7l2x6" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.438192 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/79e6349a-afe4-412b-8d64-f0875a38ccf2-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-cxnkh\" (UID: \"79e6349a-afe4-412b-8d64-f0875a38ccf2\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-cxnkh" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.438207 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w4td5\" (UniqueName: \"kubernetes.io/projected/79e6349a-afe4-412b-8d64-f0875a38ccf2-kube-api-access-w4td5\") pod \"apiserver-7bbb656c7d-cxnkh\" (UID: \"79e6349a-afe4-412b-8d64-f0875a38ccf2\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-cxnkh" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.438227 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/79e6349a-afe4-412b-8d64-f0875a38ccf2-audit-dir\") pod \"apiserver-7bbb656c7d-cxnkh\" (UID: \"79e6349a-afe4-412b-8d64-f0875a38ccf2\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-cxnkh" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.438249 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/79e6349a-afe4-412b-8d64-f0875a38ccf2-serving-cert\") pod \"apiserver-7bbb656c7d-cxnkh\" (UID: \"79e6349a-afe4-412b-8d64-f0875a38ccf2\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-cxnkh" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.438273 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/43b9a8a1-a19d-4fb1-b9ca-5bc87c2dac43-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-lxwz8\" (UID: \"43b9a8a1-a19d-4fb1-b9ca-5bc87c2dac43\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-lxwz8" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.438288 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/43b9a8a1-a19d-4fb1-b9ca-5bc87c2dac43-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-lxwz8\" (UID: \"43b9a8a1-a19d-4fb1-b9ca-5bc87c2dac43\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-lxwz8" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.438310 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/8ecef833-b914-464d-a395-49bb7f66a180-available-featuregates\") pod \"openshift-config-operator-7777fb866f-td2hv\" (UID: \"8ecef833-b914-464d-a395-49bb7f66a180\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-td2hv" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.438344 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/79e6349a-afe4-412b-8d64-f0875a38ccf2-etcd-client\") pod \"apiserver-7bbb656c7d-cxnkh\" (UID: \"79e6349a-afe4-412b-8d64-f0875a38ccf2\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-cxnkh" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.438438 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/b6c9051a-b8c5-4127-9665-b07e92e60bab-proxy-tls\") pod \"machine-config-controller-84d6567774-7l2x6\" (UID: \"b6c9051a-b8c5-4127-9665-b07e92e60bab\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-7l2x6" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.438460 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/79e6349a-afe4-412b-8d64-f0875a38ccf2-encryption-config\") pod \"apiserver-7bbb656c7d-cxnkh\" (UID: \"79e6349a-afe4-412b-8d64-f0875a38ccf2\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-cxnkh" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.438483 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/79e6349a-afe4-412b-8d64-f0875a38ccf2-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-cxnkh\" (UID: \"79e6349a-afe4-412b-8d64-f0875a38ccf2\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-cxnkh" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.438498 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/69071241-3547-43b1-bf14-5bb03184a08a-config\") pod \"controller-manager-879f6c89f-zl8wv\" (UID: \"69071241-3547-43b1-bf14-5bb03184a08a\") " pod="openshift-controller-manager/controller-manager-879f6c89f-zl8wv" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.438514 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/69071241-3547-43b1-bf14-5bb03184a08a-client-ca\") pod \"controller-manager-879f6c89f-zl8wv\" (UID: \"69071241-3547-43b1-bf14-5bb03184a08a\") " pod="openshift-controller-manager/controller-manager-879f6c89f-zl8wv" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.438707 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rjg5j\" (UniqueName: \"kubernetes.io/projected/69071241-3547-43b1-bf14-5bb03184a08a-kube-api-access-rjg5j\") pod \"controller-manager-879f6c89f-zl8wv\" (UID: \"69071241-3547-43b1-bf14-5bb03184a08a\") " pod="openshift-controller-manager/controller-manager-879f6c89f-zl8wv" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.438767 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/69071241-3547-43b1-bf14-5bb03184a08a-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-zl8wv\" (UID: \"69071241-3547-43b1-bf14-5bb03184a08a\") " pod="openshift-controller-manager/controller-manager-879f6c89f-zl8wv" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.438816 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/69071241-3547-43b1-bf14-5bb03184a08a-serving-cert\") pod \"controller-manager-879f6c89f-zl8wv\" (UID: \"69071241-3547-43b1-bf14-5bb03184a08a\") " pod="openshift-controller-manager/controller-manager-879f6c89f-zl8wv" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.438837 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/b6c9051a-b8c5-4127-9665-b07e92e60bab-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-7l2x6\" (UID: \"b6c9051a-b8c5-4127-9665-b07e92e60bab\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-7l2x6" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.438874 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/79e6349a-afe4-412b-8d64-f0875a38ccf2-audit-policies\") pod \"apiserver-7bbb656c7d-cxnkh\" (UID: \"79e6349a-afe4-412b-8d64-f0875a38ccf2\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-cxnkh" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.438912 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8ecef833-b914-464d-a395-49bb7f66a180-serving-cert\") pod \"openshift-config-operator-7777fb866f-td2hv\" (UID: \"8ecef833-b914-464d-a395-49bb7f66a180\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-td2hv" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.438929 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2l8nv\" (UniqueName: \"kubernetes.io/projected/8ecef833-b914-464d-a395-49bb7f66a180-kube-api-access-2l8nv\") pod \"openshift-config-operator-7777fb866f-td2hv\" (UID: \"8ecef833-b914-464d-a395-49bb7f66a180\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-td2hv" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.439013 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns/dns-default-f5gdp"] Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.439270 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-nh6jb" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.439732 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-dockercfg-r9srn" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.439841 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-f5gdp" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.440280 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-dw769"] Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.442479 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-r75cd"] Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.444002 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-9jj8n"] Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.445462 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-td2hv"] Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.446796 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-cxnkh"] Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.448276 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-d8zgs"] Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.449634 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-lxwz8"] Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.450905 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29424015-bl9jf"] Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.452317 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-nh6jb"] Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.452786 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-serving-cert" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.454175 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns/dns-default-f5gdp"] Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.455683 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-459c4"] Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.456694 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-rsvfs"] Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.458282 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-8p5cn"] Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.459646 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-5dbn8"] Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.461139 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-canary/ingress-canary-kjptn"] Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.462411 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-vbpk5"] Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.463464 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-server-dcbjb"] Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.464166 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-server-dcbjb" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.472707 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-client" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.492691 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-operator-config" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.512430 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-ca-bundle" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.533445 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-service-ca-bundle" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.539447 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/79e6349a-afe4-412b-8d64-f0875a38ccf2-audit-dir\") pod \"apiserver-7bbb656c7d-cxnkh\" (UID: \"79e6349a-afe4-412b-8d64-f0875a38ccf2\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-cxnkh" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.539490 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/79e6349a-afe4-412b-8d64-f0875a38ccf2-serving-cert\") pod \"apiserver-7bbb656c7d-cxnkh\" (UID: \"79e6349a-afe4-412b-8d64-f0875a38ccf2\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-cxnkh" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.539522 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/43b9a8a1-a19d-4fb1-b9ca-5bc87c2dac43-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-lxwz8\" (UID: \"43b9a8a1-a19d-4fb1-b9ca-5bc87c2dac43\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-lxwz8" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.539541 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/43b9a8a1-a19d-4fb1-b9ca-5bc87c2dac43-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-lxwz8\" (UID: \"43b9a8a1-a19d-4fb1-b9ca-5bc87c2dac43\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-lxwz8" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.539566 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/79e6349a-afe4-412b-8d64-f0875a38ccf2-audit-dir\") pod \"apiserver-7bbb656c7d-cxnkh\" (UID: \"79e6349a-afe4-412b-8d64-f0875a38ccf2\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-cxnkh" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.539567 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/8ecef833-b914-464d-a395-49bb7f66a180-available-featuregates\") pod \"openshift-config-operator-7777fb866f-td2hv\" (UID: \"8ecef833-b914-464d-a395-49bb7f66a180\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-td2hv" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.539615 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/79e6349a-afe4-412b-8d64-f0875a38ccf2-etcd-client\") pod \"apiserver-7bbb656c7d-cxnkh\" (UID: \"79e6349a-afe4-412b-8d64-f0875a38ccf2\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-cxnkh" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.539674 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/b6c9051a-b8c5-4127-9665-b07e92e60bab-proxy-tls\") pod \"machine-config-controller-84d6567774-7l2x6\" (UID: \"b6c9051a-b8c5-4127-9665-b07e92e60bab\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-7l2x6" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.539692 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/79e6349a-afe4-412b-8d64-f0875a38ccf2-encryption-config\") pod \"apiserver-7bbb656c7d-cxnkh\" (UID: \"79e6349a-afe4-412b-8d64-f0875a38ccf2\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-cxnkh" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.539799 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/79e6349a-afe4-412b-8d64-f0875a38ccf2-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-cxnkh\" (UID: \"79e6349a-afe4-412b-8d64-f0875a38ccf2\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-cxnkh" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.539817 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/69071241-3547-43b1-bf14-5bb03184a08a-config\") pod \"controller-manager-879f6c89f-zl8wv\" (UID: \"69071241-3547-43b1-bf14-5bb03184a08a\") " pod="openshift-controller-manager/controller-manager-879f6c89f-zl8wv" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.539832 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/69071241-3547-43b1-bf14-5bb03184a08a-client-ca\") pod \"controller-manager-879f6c89f-zl8wv\" (UID: \"69071241-3547-43b1-bf14-5bb03184a08a\") " pod="openshift-controller-manager/controller-manager-879f6c89f-zl8wv" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.539857 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rjg5j\" (UniqueName: \"kubernetes.io/projected/69071241-3547-43b1-bf14-5bb03184a08a-kube-api-access-rjg5j\") pod \"controller-manager-879f6c89f-zl8wv\" (UID: \"69071241-3547-43b1-bf14-5bb03184a08a\") " pod="openshift-controller-manager/controller-manager-879f6c89f-zl8wv" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.539923 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/8ecef833-b914-464d-a395-49bb7f66a180-available-featuregates\") pod \"openshift-config-operator-7777fb866f-td2hv\" (UID: \"8ecef833-b914-464d-a395-49bb7f66a180\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-td2hv" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.539987 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/69071241-3547-43b1-bf14-5bb03184a08a-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-zl8wv\" (UID: \"69071241-3547-43b1-bf14-5bb03184a08a\") " pod="openshift-controller-manager/controller-manager-879f6c89f-zl8wv" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.540009 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/69071241-3547-43b1-bf14-5bb03184a08a-serving-cert\") pod \"controller-manager-879f6c89f-zl8wv\" (UID: \"69071241-3547-43b1-bf14-5bb03184a08a\") " pod="openshift-controller-manager/controller-manager-879f6c89f-zl8wv" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.540028 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/b6c9051a-b8c5-4127-9665-b07e92e60bab-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-7l2x6\" (UID: \"b6c9051a-b8c5-4127-9665-b07e92e60bab\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-7l2x6" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.540043 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/79e6349a-afe4-412b-8d64-f0875a38ccf2-audit-policies\") pod \"apiserver-7bbb656c7d-cxnkh\" (UID: \"79e6349a-afe4-412b-8d64-f0875a38ccf2\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-cxnkh" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.540060 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8ecef833-b914-464d-a395-49bb7f66a180-serving-cert\") pod \"openshift-config-operator-7777fb866f-td2hv\" (UID: \"8ecef833-b914-464d-a395-49bb7f66a180\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-td2hv" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.540075 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2l8nv\" (UniqueName: \"kubernetes.io/projected/8ecef833-b914-464d-a395-49bb7f66a180-kube-api-access-2l8nv\") pod \"openshift-config-operator-7777fb866f-td2hv\" (UID: \"8ecef833-b914-464d-a395-49bb7f66a180\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-td2hv" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.540112 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/43b9a8a1-a19d-4fb1-b9ca-5bc87c2dac43-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-lxwz8\" (UID: \"43b9a8a1-a19d-4fb1-b9ca-5bc87c2dac43\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-lxwz8" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.540128 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6qsgc\" (UniqueName: \"kubernetes.io/projected/b6c9051a-b8c5-4127-9665-b07e92e60bab-kube-api-access-6qsgc\") pod \"machine-config-controller-84d6567774-7l2x6\" (UID: \"b6c9051a-b8c5-4127-9665-b07e92e60bab\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-7l2x6" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.540142 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/79e6349a-afe4-412b-8d64-f0875a38ccf2-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-cxnkh\" (UID: \"79e6349a-afe4-412b-8d64-f0875a38ccf2\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-cxnkh" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.540157 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w4td5\" (UniqueName: \"kubernetes.io/projected/79e6349a-afe4-412b-8d64-f0875a38ccf2-kube-api-access-w4td5\") pod \"apiserver-7bbb656c7d-cxnkh\" (UID: \"79e6349a-afe4-412b-8d64-f0875a38ccf2\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-cxnkh" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.540465 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/79e6349a-afe4-412b-8d64-f0875a38ccf2-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-cxnkh\" (UID: \"79e6349a-afe4-412b-8d64-f0875a38ccf2\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-cxnkh" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.540642 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/79e6349a-afe4-412b-8d64-f0875a38ccf2-audit-policies\") pod \"apiserver-7bbb656c7d-cxnkh\" (UID: \"79e6349a-afe4-412b-8d64-f0875a38ccf2\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-cxnkh" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.540802 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/b6c9051a-b8c5-4127-9665-b07e92e60bab-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-7l2x6\" (UID: \"b6c9051a-b8c5-4127-9665-b07e92e60bab\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-7l2x6" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.541152 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/79e6349a-afe4-412b-8d64-f0875a38ccf2-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-cxnkh\" (UID: \"79e6349a-afe4-412b-8d64-f0875a38ccf2\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-cxnkh" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.541807 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/69071241-3547-43b1-bf14-5bb03184a08a-client-ca\") pod \"controller-manager-879f6c89f-zl8wv\" (UID: \"69071241-3547-43b1-bf14-5bb03184a08a\") " pod="openshift-controller-manager/controller-manager-879f6c89f-zl8wv" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.543237 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/69071241-3547-43b1-bf14-5bb03184a08a-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-zl8wv\" (UID: \"69071241-3547-43b1-bf14-5bb03184a08a\") " pod="openshift-controller-manager/controller-manager-879f6c89f-zl8wv" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.543298 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/69071241-3547-43b1-bf14-5bb03184a08a-config\") pod \"controller-manager-879f6c89f-zl8wv\" (UID: \"69071241-3547-43b1-bf14-5bb03184a08a\") " pod="openshift-controller-manager/controller-manager-879f6c89f-zl8wv" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.551852 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8ecef833-b914-464d-a395-49bb7f66a180-serving-cert\") pod \"openshift-config-operator-7777fb866f-td2hv\" (UID: \"8ecef833-b914-464d-a395-49bb7f66a180\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-td2hv" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.551905 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/79e6349a-afe4-412b-8d64-f0875a38ccf2-serving-cert\") pod \"apiserver-7bbb656c7d-cxnkh\" (UID: \"79e6349a-afe4-412b-8d64-f0875a38ccf2\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-cxnkh" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.551951 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/69071241-3547-43b1-bf14-5bb03184a08a-serving-cert\") pod \"controller-manager-879f6c89f-zl8wv\" (UID: \"69071241-3547-43b1-bf14-5bb03184a08a\") " pod="openshift-controller-manager/controller-manager-879f6c89f-zl8wv" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.552030 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/79e6349a-afe4-412b-8d64-f0875a38ccf2-encryption-config\") pod \"apiserver-7bbb656c7d-cxnkh\" (UID: \"79e6349a-afe4-412b-8d64-f0875a38ccf2\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-cxnkh" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.552329 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/79e6349a-afe4-412b-8d64-f0875a38ccf2-etcd-client\") pod \"apiserver-7bbb656c7d-cxnkh\" (UID: \"79e6349a-afe4-412b-8d64-f0875a38ccf2\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-cxnkh" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.556213 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"kube-root-ca.crt" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.573078 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-dockercfg-qt55r" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.593673 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.602697 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/43b9a8a1-a19d-4fb1-b9ca-5bc87c2dac43-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-lxwz8\" (UID: \"43b9a8a1-a19d-4fb1-b9ca-5bc87c2dac43\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-lxwz8" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.613818 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.620411 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/43b9a8a1-a19d-4fb1-b9ca-5bc87c2dac43-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-lxwz8\" (UID: \"43b9a8a1-a19d-4fb1-b9ca-5bc87c2dac43\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-lxwz8" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.633087 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"openshift-service-ca.crt" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.652882 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mcc-proxy-tls" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.662456 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/b6c9051a-b8c5-4127-9665-b07e92e60bab-proxy-tls\") pod \"machine-config-controller-84d6567774-7l2x6\" (UID: \"b6c9051a-b8c5-4127-9665-b07e92e60bab\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-7l2x6" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.673557 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"dns-operator-dockercfg-9mqw5" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.693449 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"metrics-tls" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.713678 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"kube-root-ca.crt" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.732730 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-controller-dockercfg-c2lfx" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.759118 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serviceaccount-dockercfg-rq7zk" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.773253 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.799026 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.813287 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"openshift-service-ca.crt" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.833143 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"kube-root-ca.crt" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.853222 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-dockercfg-zdk86" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.875784 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-certs-default" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.893509 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-stats-default" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.913204 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-metrics-certs-default" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.933978 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.953096 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"service-ca-bundle" Dec 11 08:18:27 crc kubenswrapper[4881]: I1211 08:18:27.974473 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"kube-root-ca.crt" Dec 11 08:18:28 crc kubenswrapper[4881]: I1211 08:18:28.024817 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"pprof-cert" Dec 11 08:18:28 crc kubenswrapper[4881]: I1211 08:18:28.052739 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b4wtd\" (UniqueName: \"kubernetes.io/projected/70bb46be-cfc1-411d-9a3b-55e040e1c2c5-kube-api-access-b4wtd\") pod \"machine-api-operator-5694c8668f-vk42n\" (UID: \"70bb46be-cfc1-411d-9a3b-55e040e1c2c5\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-vk42n" Dec 11 08:18:28 crc kubenswrapper[4881]: I1211 08:18:28.073857 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"openshift-service-ca.crt" Dec 11 08:18:28 crc kubenswrapper[4881]: I1211 08:18:28.093397 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"machine-config-operator-images" Dec 11 08:18:28 crc kubenswrapper[4881]: I1211 08:18:28.098494 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-5694c8668f-vk42n" Dec 11 08:18:28 crc kubenswrapper[4881]: I1211 08:18:28.113289 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-admission-controller-secret" Dec 11 08:18:28 crc kubenswrapper[4881]: I1211 08:18:28.133507 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"metrics-tls" Dec 11 08:18:28 crc kubenswrapper[4881]: I1211 08:18:28.154574 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"ingress-operator-dockercfg-7lnqk" Dec 11 08:18:28 crc kubenswrapper[4881]: I1211 08:18:28.193199 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"trusted-ca" Dec 11 08:18:28 crc kubenswrapper[4881]: I1211 08:18:28.197202 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"kube-root-ca.crt" Dec 11 08:18:28 crc kubenswrapper[4881]: I1211 08:18:28.214457 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mco-proxy-tls" Dec 11 08:18:28 crc kubenswrapper[4881]: I1211 08:18:28.234477 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-operator-dockercfg-98p87" Dec 11 08:18:28 crc kubenswrapper[4881]: I1211 08:18:28.253988 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ac-dockercfg-9lkdf" Dec 11 08:18:28 crc kubenswrapper[4881]: I1211 08:18:28.296915 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/cb02b71d-7677-477a-8068-c687ebb146ee-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-r546r\" (UID: \"cb02b71d-7677-477a-8068-c687ebb146ee\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-r546r" Dec 11 08:18:28 crc kubenswrapper[4881]: I1211 08:18:28.315724 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-vk42n"] Dec 11 08:18:28 crc kubenswrapper[4881]: I1211 08:18:28.317451 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2fn24\" (UniqueName: \"kubernetes.io/projected/0b6b1eb2-a5dd-4a95-956f-a787d03f453b-kube-api-access-2fn24\") pod \"authentication-operator-69f744f599-z9p6z\" (UID: \"0b6b1eb2-a5dd-4a95-956f-a787d03f453b\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-z9p6z" Dec 11 08:18:28 crc kubenswrapper[4881]: I1211 08:18:28.327650 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hw55h\" (UniqueName: \"kubernetes.io/projected/cb02b71d-7677-477a-8068-c687ebb146ee-kube-api-access-hw55h\") pod \"cluster-image-registry-operator-dc59b4c8b-r546r\" (UID: \"cb02b71d-7677-477a-8068-c687ebb146ee\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-r546r" Dec 11 08:18:28 crc kubenswrapper[4881]: E1211 08:18:28.331446 4881 secret.go:188] Couldn't get secret openshift-route-controller-manager/serving-cert: failed to sync secret cache: timed out waiting for the condition Dec 11 08:18:28 crc kubenswrapper[4881]: E1211 08:18:28.331540 4881 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/317a7f01-2747-4994-8000-54613f522149-serving-cert podName:317a7f01-2747-4994-8000-54613f522149 nodeName:}" failed. No retries permitted until 2025-12-11 08:18:28.831521104 +0000 UTC m=+157.208889811 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/317a7f01-2747-4994-8000-54613f522149-serving-cert") pod "route-controller-manager-6576b87f9c-882gs" (UID: "317a7f01-2747-4994-8000-54613f522149") : failed to sync secret cache: timed out waiting for the condition Dec 11 08:18:28 crc kubenswrapper[4881]: I1211 08:18:28.333570 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"service-ca-operator-config" Dec 11 08:18:28 crc kubenswrapper[4881]: E1211 08:18:28.336841 4881 secret.go:188] Couldn't get secret openshift-image-registry/image-registry-operator-tls: failed to sync secret cache: timed out waiting for the condition Dec 11 08:18:28 crc kubenswrapper[4881]: E1211 08:18:28.336971 4881 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/cb02b71d-7677-477a-8068-c687ebb146ee-image-registry-operator-tls podName:cb02b71d-7677-477a-8068-c687ebb146ee nodeName:}" failed. No retries permitted until 2025-12-11 08:18:28.836941597 +0000 UTC m=+157.214310324 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "image-registry-operator-tls" (UniqueName: "kubernetes.io/secret/cb02b71d-7677-477a-8068-c687ebb146ee-image-registry-operator-tls") pod "cluster-image-registry-operator-dc59b4c8b-r546r" (UID: "cb02b71d-7677-477a-8068-c687ebb146ee") : failed to sync secret cache: timed out waiting for the condition Dec 11 08:18:28 crc kubenswrapper[4881]: I1211 08:18:28.351882 4881 request.go:700] Waited for 1.002317538s due to client-side throttling, not priority and fairness, request: GET:https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager-operator/configmaps?fieldSelector=metadata.name%3Dkube-controller-manager-operator-config&limit=500&resourceVersion=0 Dec 11 08:18:28 crc kubenswrapper[4881]: I1211 08:18:28.354208 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" Dec 11 08:18:28 crc kubenswrapper[4881]: E1211 08:18:28.357953 4881 configmap.go:193] Couldn't get configMap openshift-route-controller-manager/client-ca: failed to sync configmap cache: timed out waiting for the condition Dec 11 08:18:28 crc kubenswrapper[4881]: E1211 08:18:28.358084 4881 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/317a7f01-2747-4994-8000-54613f522149-client-ca podName:317a7f01-2747-4994-8000-54613f522149 nodeName:}" failed. No retries permitted until 2025-12-11 08:18:28.858052728 +0000 UTC m=+157.235421465 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "client-ca" (UniqueName: "kubernetes.io/configmap/317a7f01-2747-4994-8000-54613f522149-client-ca") pod "route-controller-manager-6576b87f9c-882gs" (UID: "317a7f01-2747-4994-8000-54613f522149") : failed to sync configmap cache: timed out waiting for the condition Dec 11 08:18:28 crc kubenswrapper[4881]: E1211 08:18:28.358364 4881 configmap.go:193] Couldn't get configMap openshift-image-registry/trusted-ca: failed to sync configmap cache: timed out waiting for the condition Dec 11 08:18:28 crc kubenswrapper[4881]: E1211 08:18:28.358444 4881 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/cb02b71d-7677-477a-8068-c687ebb146ee-trusted-ca podName:cb02b71d-7677-477a-8068-c687ebb146ee nodeName:}" failed. No retries permitted until 2025-12-11 08:18:28.858421127 +0000 UTC m=+157.235789884 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "trusted-ca" (UniqueName: "kubernetes.io/configmap/cb02b71d-7677-477a-8068-c687ebb146ee-trusted-ca") pod "cluster-image-registry-operator-dc59b4c8b-r546r" (UID: "cb02b71d-7677-477a-8068-c687ebb146ee") : failed to sync configmap cache: timed out waiting for the condition Dec 11 08:18:28 crc kubenswrapper[4881]: I1211 08:18:28.395480 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hnw6c\" (UniqueName: \"kubernetes.io/projected/09b82983-c4d5-4c1f-8f41-9dcc20fbfd03-kube-api-access-hnw6c\") pod \"console-f9d7485db-d6tb5\" (UID: \"09b82983-c4d5-4c1f-8f41-9dcc20fbfd03\") " pod="openshift-console/console-f9d7485db-d6tb5" Dec 11 08:18:28 crc kubenswrapper[4881]: I1211 08:18:28.409953 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xbplp\" (UniqueName: \"kubernetes.io/projected/fbc88e77-4757-426f-9212-8e4c3d26b8e0-kube-api-access-xbplp\") pod \"oauth-openshift-558db77b4-79b8l\" (UID: \"fbc88e77-4757-426f-9212-8e4c3d26b8e0\") " pod="openshift-authentication/oauth-openshift-558db77b4-79b8l" Dec 11 08:18:28 crc kubenswrapper[4881]: I1211 08:18:28.414203 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"kube-root-ca.crt" Dec 11 08:18:28 crc kubenswrapper[4881]: I1211 08:18:28.448973 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zfkx2\" (UniqueName: \"kubernetes.io/projected/1b90c971-c734-4fa4-a385-0cce5ecacbd1-kube-api-access-zfkx2\") pod \"apiserver-76f77b778f-jwrst\" (UID: \"1b90c971-c734-4fa4-a385-0cce5ecacbd1\") " pod="openshift-apiserver/apiserver-76f77b778f-jwrst" Dec 11 08:18:28 crc kubenswrapper[4881]: I1211 08:18:28.453227 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"service-ca-operator-dockercfg-rg9jl" Dec 11 08:18:28 crc kubenswrapper[4881]: I1211 08:18:28.458636 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-69f744f599-z9p6z" Dec 11 08:18:28 crc kubenswrapper[4881]: I1211 08:18:28.474857 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"serving-cert" Dec 11 08:18:28 crc kubenswrapper[4881]: I1211 08:18:28.477773 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-d6tb5" Dec 11 08:18:28 crc kubenswrapper[4881]: I1211 08:18:28.494451 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-dockercfg-gkqpw" Dec 11 08:18:28 crc kubenswrapper[4881]: I1211 08:18:28.514173 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"openshift-service-ca.crt" Dec 11 08:18:28 crc kubenswrapper[4881]: I1211 08:18:28.533812 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" Dec 11 08:18:28 crc kubenswrapper[4881]: I1211 08:18:28.554119 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-root-ca.crt" Dec 11 08:18:28 crc kubenswrapper[4881]: I1211 08:18:28.605093 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8vzp7\" (UniqueName: \"kubernetes.io/projected/26aecb96-f0ab-48d9-977c-89c3a1cf06e7-kube-api-access-8vzp7\") pod \"downloads-7954f5f757-tzs74\" (UID: \"26aecb96-f0ab-48d9-977c-89c3a1cf06e7\") " pod="openshift-console/downloads-7954f5f757-tzs74" Dec 11 08:18:28 crc kubenswrapper[4881]: I1211 08:18:28.608862 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-79b8l" Dec 11 08:18:28 crc kubenswrapper[4881]: I1211 08:18:28.609541 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7bxfc\" (UniqueName: \"kubernetes.io/projected/317a7f01-2747-4994-8000-54613f522149-kube-api-access-7bxfc\") pod \"route-controller-manager-6576b87f9c-882gs\" (UID: \"317a7f01-2747-4994-8000-54613f522149\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-882gs" Dec 11 08:18:28 crc kubenswrapper[4881]: I1211 08:18:28.612935 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" Dec 11 08:18:28 crc kubenswrapper[4881]: I1211 08:18:28.633886 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-dockercfg-x57mr" Dec 11 08:18:28 crc kubenswrapper[4881]: I1211 08:18:28.654862 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" Dec 11 08:18:28 crc kubenswrapper[4881]: I1211 08:18:28.674768 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-root-ca.crt" Dec 11 08:18:28 crc kubenswrapper[4881]: I1211 08:18:28.686066 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-f9d7485db-d6tb5"] Dec 11 08:18:28 crc kubenswrapper[4881]: I1211 08:18:28.689731 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-76f77b778f-jwrst" Dec 11 08:18:28 crc kubenswrapper[4881]: I1211 08:18:28.693323 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-z9p6z"] Dec 11 08:18:28 crc kubenswrapper[4881]: I1211 08:18:28.693566 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" Dec 11 08:18:28 crc kubenswrapper[4881]: W1211 08:18:28.696903 4881 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod09b82983_c4d5_4c1f_8f41_9dcc20fbfd03.slice/crio-c104383ea2dc8dfe0e6f6537c3b72d2425698a783fb63d849e02c93d133303ca WatchSource:0}: Error finding container c104383ea2dc8dfe0e6f6537c3b72d2425698a783fb63d849e02c93d133303ca: Status 404 returned error can't find the container with id c104383ea2dc8dfe0e6f6537c3b72d2425698a783fb63d849e02c93d133303ca Dec 11 08:18:28 crc kubenswrapper[4881]: I1211 08:18:28.715141 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" Dec 11 08:18:28 crc kubenswrapper[4881]: I1211 08:18:28.734048 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"config" Dec 11 08:18:28 crc kubenswrapper[4881]: I1211 08:18:28.754000 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" Dec 11 08:18:28 crc kubenswrapper[4881]: I1211 08:18:28.773869 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"serving-cert" Dec 11 08:18:28 crc kubenswrapper[4881]: I1211 08:18:28.806483 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"kube-storage-version-migrator-operator-dockercfg-2bh8d" Dec 11 08:18:28 crc kubenswrapper[4881]: I1211 08:18:28.808055 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-vk42n" event={"ID":"70bb46be-cfc1-411d-9a3b-55e040e1c2c5","Type":"ContainerStarted","Data":"dd4dfa7864f16587d479e3a67b2fd4bed704ac071a685c52719c0e5ec5050523"} Dec 11 08:18:28 crc kubenswrapper[4881]: I1211 08:18:28.808134 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-vk42n" event={"ID":"70bb46be-cfc1-411d-9a3b-55e040e1c2c5","Type":"ContainerStarted","Data":"10dfaef3140d2a35a333b1c3262054531bc304c75d3322a57232506913a0caac"} Dec 11 08:18:28 crc kubenswrapper[4881]: I1211 08:18:28.808152 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-vk42n" event={"ID":"70bb46be-cfc1-411d-9a3b-55e040e1c2c5","Type":"ContainerStarted","Data":"7eb6d56733d37afe904facd13738fca42cba1cf53bd0f334fdf2ed6b1a1e2ac2"} Dec 11 08:18:28 crc kubenswrapper[4881]: I1211 08:18:28.812599 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-d6tb5" event={"ID":"09b82983-c4d5-4c1f-8f41-9dcc20fbfd03","Type":"ContainerStarted","Data":"c104383ea2dc8dfe0e6f6537c3b72d2425698a783fb63d849e02c93d133303ca"} Dec 11 08:18:28 crc kubenswrapper[4881]: I1211 08:18:28.813627 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-69f744f599-z9p6z" event={"ID":"0b6b1eb2-a5dd-4a95-956f-a787d03f453b","Type":"ContainerStarted","Data":"efd46a88109657b3abc11366dd02b19a62b201cba3f18d0bbb893641f7a9c3ce"} Dec 11 08:18:28 crc kubenswrapper[4881]: I1211 08:18:28.816151 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Dec 11 08:18:28 crc kubenswrapper[4881]: I1211 08:18:28.818681 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-79b8l"] Dec 11 08:18:28 crc kubenswrapper[4881]: W1211 08:18:28.826458 4881 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfbc88e77_4757_426f_9212_8e4c3d26b8e0.slice/crio-a0098c4caccf1456e8cec9078f0f3ac67e557b45383e99b4108ae068dc4f527c WatchSource:0}: Error finding container a0098c4caccf1456e8cec9078f0f3ac67e557b45383e99b4108ae068dc4f527c: Status 404 returned error can't find the container with id a0098c4caccf1456e8cec9078f0f3ac67e557b45383e99b4108ae068dc4f527c Dec 11 08:18:28 crc kubenswrapper[4881]: I1211 08:18:28.833457 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Dec 11 08:18:28 crc kubenswrapper[4881]: I1211 08:18:28.836489 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-7954f5f757-tzs74" Dec 11 08:18:28 crc kubenswrapper[4881]: I1211 08:18:28.853149 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-metrics" Dec 11 08:18:28 crc kubenswrapper[4881]: I1211 08:18:28.855680 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/317a7f01-2747-4994-8000-54613f522149-serving-cert\") pod \"route-controller-manager-6576b87f9c-882gs\" (UID: \"317a7f01-2747-4994-8000-54613f522149\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-882gs" Dec 11 08:18:28 crc kubenswrapper[4881]: I1211 08:18:28.855736 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/cb02b71d-7677-477a-8068-c687ebb146ee-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-r546r\" (UID: \"cb02b71d-7677-477a-8068-c687ebb146ee\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-r546r" Dec 11 08:18:28 crc kubenswrapper[4881]: I1211 08:18:28.873253 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"openshift-service-ca.crt" Dec 11 08:18:28 crc kubenswrapper[4881]: I1211 08:18:28.885282 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-jwrst"] Dec 11 08:18:28 crc kubenswrapper[4881]: I1211 08:18:28.904073 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"marketplace-trusted-ca" Dec 11 08:18:28 crc kubenswrapper[4881]: I1211 08:18:28.917152 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"kube-root-ca.crt" Dec 11 08:18:28 crc kubenswrapper[4881]: I1211 08:18:28.934219 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-dockercfg-5nsgg" Dec 11 08:18:28 crc kubenswrapper[4881]: I1211 08:18:28.954858 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-tls" Dec 11 08:18:28 crc kubenswrapper[4881]: I1211 08:18:28.960875 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/cb02b71d-7677-477a-8068-c687ebb146ee-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-r546r\" (UID: \"cb02b71d-7677-477a-8068-c687ebb146ee\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-r546r" Dec 11 08:18:28 crc kubenswrapper[4881]: I1211 08:18:28.961000 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/317a7f01-2747-4994-8000-54613f522149-client-ca\") pod \"route-controller-manager-6576b87f9c-882gs\" (UID: \"317a7f01-2747-4994-8000-54613f522149\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-882gs" Dec 11 08:18:28 crc kubenswrapper[4881]: I1211 08:18:28.973367 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-dockercfg-k9rxt" Dec 11 08:18:28 crc kubenswrapper[4881]: I1211 08:18:28.992711 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"packageserver-service-cert" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.013408 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.021701 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/downloads-7954f5f757-tzs74"] Dec 11 08:18:29 crc kubenswrapper[4881]: W1211 08:18:29.027428 4881 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod26aecb96_f0ab_48d9_977c_89c3a1cf06e7.slice/crio-584761de0b268886d4cb851031a3932701123aec975843b160f7d593aaefb532 WatchSource:0}: Error finding container 584761de0b268886d4cb851031a3932701123aec975843b160f7d593aaefb532: Status 404 returned error can't find the container with id 584761de0b268886d4cb851031a3932701123aec975843b160f7d593aaefb532 Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.033145 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator"/"kube-storage-version-migrator-sa-dockercfg-5xfcg" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.053856 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"kube-root-ca.crt" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.074814 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"signing-cabundle" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.093010 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"service-ca-dockercfg-pn86c" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.113652 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"signing-key" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.168509 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"openshift-service-ca.crt" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.168581 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"kube-root-ca.crt" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.174771 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"openshift-service-ca.crt" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.194547 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"default-dockercfg-2llfx" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.214222 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"canary-serving-cert" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.234999 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"kube-root-ca.crt" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.253604 4881 reflector.go:368] Caches populated for *v1.Secret from object-"hostpath-provisioner"/"csi-hostpath-provisioner-sa-dockercfg-qd74k" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.273797 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"openshift-service-ca.crt" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.294215 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"kube-root-ca.crt" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.314599 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-dockercfg-jwfmh" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.333070 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"dns-default" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.353999 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-default-metrics-tls" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.371324 4881 request.go:700] Waited for 1.906944205s due to client-side throttling, not priority and fairness, request: GET:https://api-int.crc.testing:6443/api/v1/namespaces/openshift-machine-config-operator/secrets?fieldSelector=metadata.name%3Dnode-bootstrapper-token&limit=500&resourceVersion=0 Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.374562 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"node-bootstrapper-token" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.393228 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-dockercfg-qx5rd" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.397236 4881 patch_prober.go:28] interesting pod/machine-config-daemon-z9nnh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.397383 4881 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.414247 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-tls" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.451529 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rjg5j\" (UniqueName: \"kubernetes.io/projected/69071241-3547-43b1-bf14-5bb03184a08a-kube-api-access-rjg5j\") pod \"controller-manager-879f6c89f-zl8wv\" (UID: \"69071241-3547-43b1-bf14-5bb03184a08a\") " pod="openshift-controller-manager/controller-manager-879f6c89f-zl8wv" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.484463 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w4td5\" (UniqueName: \"kubernetes.io/projected/79e6349a-afe4-412b-8d64-f0875a38ccf2-kube-api-access-w4td5\") pod \"apiserver-7bbb656c7d-cxnkh\" (UID: \"79e6349a-afe4-412b-8d64-f0875a38ccf2\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-cxnkh" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.487388 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/43b9a8a1-a19d-4fb1-b9ca-5bc87c2dac43-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-lxwz8\" (UID: \"43b9a8a1-a19d-4fb1-b9ca-5bc87c2dac43\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-lxwz8" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.516891 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2l8nv\" (UniqueName: \"kubernetes.io/projected/8ecef833-b914-464d-a395-49bb7f66a180-kube-api-access-2l8nv\") pod \"openshift-config-operator-7777fb866f-td2hv\" (UID: \"8ecef833-b914-464d-a395-49bb7f66a180\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-td2hv" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.518196 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-zl8wv" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.537636 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6qsgc\" (UniqueName: \"kubernetes.io/projected/b6c9051a-b8c5-4127-9665-b07e92e60bab-kube-api-access-6qsgc\") pod \"machine-config-controller-84d6567774-7l2x6\" (UID: \"b6c9051a-b8c5-4127-9665-b07e92e60bab\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-7l2x6" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.549696 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-cxnkh" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.570784 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1-ca-trust-extracted\") pod \"image-registry-697d97f7c8-zn9v8\" (UID: \"ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-zn9v8" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.570835 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/899a9cd1-026b-40e1-a698-9f9f4f7ad857-srv-cert\") pod \"olm-operator-6b444d44fb-qgvl4\" (UID: \"899a9cd1-026b-40e1-a698-9f9f4f7ad857\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-qgvl4" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.570865 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/3c3589de-1b86-4b24-af28-d01a548fcd82-auth-proxy-config\") pod \"machine-approver-56656f9798-mdqnb\" (UID: \"3c3589de-1b86-4b24-af28-d01a548fcd82\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-mdqnb" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.570928 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1-bound-sa-token\") pod \"image-registry-697d97f7c8-zn9v8\" (UID: \"ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-zn9v8" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.571007 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1-trusted-ca\") pod \"image-registry-697d97f7c8-zn9v8\" (UID: \"ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-zn9v8" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.571031 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9b52d816-10ed-46d8-9421-678e446d568c-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-w4j7x\" (UID: \"9b52d816-10ed-46d8-9421-678e446d568c\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-w4j7x" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.571055 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/899a9cd1-026b-40e1-a698-9f9f4f7ad857-profile-collector-cert\") pod \"olm-operator-6b444d44fb-qgvl4\" (UID: \"899a9cd1-026b-40e1-a698-9f9f4f7ad857\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-qgvl4" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.571076 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/35345934-239d-4ccb-b388-e861e51f49a6-etcd-ca\") pod \"etcd-operator-b45778765-r75cd\" (UID: \"35345934-239d-4ccb-b388-e861e51f49a6\") " pod="openshift-etcd-operator/etcd-operator-b45778765-r75cd" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.571100 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wvpp9\" (UniqueName: \"kubernetes.io/projected/3c3589de-1b86-4b24-af28-d01a548fcd82-kube-api-access-wvpp9\") pod \"machine-approver-56656f9798-mdqnb\" (UID: \"3c3589de-1b86-4b24-af28-d01a548fcd82\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-mdqnb" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.571130 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-czj7b\" (UniqueName: \"kubernetes.io/projected/27875db6-80f0-4378-ba4d-56df53952813-kube-api-access-czj7b\") pod \"console-operator-58897d9998-74tpl\" (UID: \"27875db6-80f0-4378-ba4d-56df53952813\") " pod="openshift-console-operator/console-operator-58897d9998-74tpl" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.571160 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d118b564-5f65-4f23-aa56-42316ba80ef0-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-hnk5n\" (UID: \"d118b564-5f65-4f23-aa56-42316ba80ef0\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-hnk5n" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.571220 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1-registry-tls\") pod \"image-registry-697d97f7c8-zn9v8\" (UID: \"ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-zn9v8" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.571247 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3c3589de-1b86-4b24-af28-d01a548fcd82-config\") pod \"machine-approver-56656f9798-mdqnb\" (UID: \"3c3589de-1b86-4b24-af28-d01a548fcd82\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-mdqnb" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.571267 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/35345934-239d-4ccb-b388-e861e51f49a6-etcd-service-ca\") pod \"etcd-operator-b45778765-r75cd\" (UID: \"35345934-239d-4ccb-b388-e861e51f49a6\") " pod="openshift-etcd-operator/etcd-operator-b45778765-r75cd" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.571290 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/301dbf85-d5d3-48f8-8d66-5d05a2d2d22b-default-certificate\") pod \"router-default-5444994796-qbkd8\" (UID: \"301dbf85-d5d3-48f8-8d66-5d05a2d2d22b\") " pod="openshift-ingress/router-default-5444994796-qbkd8" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.571315 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/35345934-239d-4ccb-b388-e861e51f49a6-serving-cert\") pod \"etcd-operator-b45778765-r75cd\" (UID: \"35345934-239d-4ccb-b388-e861e51f49a6\") " pod="openshift-etcd-operator/etcd-operator-b45778765-r75cd" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.571408 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1-installation-pull-secrets\") pod \"image-registry-697d97f7c8-zn9v8\" (UID: \"ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-zn9v8" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.571436 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/445199dc-20f2-4401-a687-b2a126ddcdd2-srv-cert\") pod \"catalog-operator-68c6474976-9srsc\" (UID: \"445199dc-20f2-4401-a687-b2a126ddcdd2\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-9srsc" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.571482 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bfsg7\" (UniqueName: \"kubernetes.io/projected/46332c07-10b7-4cd4-abcc-af9054cfb28d-kube-api-access-bfsg7\") pod \"cluster-samples-operator-665b6dd947-nhlpk\" (UID: \"46332c07-10b7-4cd4-abcc-af9054cfb28d\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-nhlpk" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.571525 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/27875db6-80f0-4378-ba4d-56df53952813-serving-cert\") pod \"console-operator-58897d9998-74tpl\" (UID: \"27875db6-80f0-4378-ba4d-56df53952813\") " pod="openshift-console-operator/console-operator-58897d9998-74tpl" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.571556 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t6j22\" (UniqueName: \"kubernetes.io/projected/ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1-kube-api-access-t6j22\") pod \"image-registry-697d97f7c8-zn9v8\" (UID: \"ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-zn9v8" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.571597 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/3c3589de-1b86-4b24-af28-d01a548fcd82-machine-approver-tls\") pod \"machine-approver-56656f9798-mdqnb\" (UID: \"3c3589de-1b86-4b24-af28-d01a548fcd82\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-mdqnb" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.571624 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/c8180671-37d6-4315-a47f-bdaeef223448-metrics-tls\") pod \"dns-operator-744455d44c-5dbn8\" (UID: \"c8180671-37d6-4315-a47f-bdaeef223448\") " pod="openshift-dns-operator/dns-operator-744455d44c-5dbn8" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.571649 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gdjv8\" (UniqueName: \"kubernetes.io/projected/35345934-239d-4ccb-b388-e861e51f49a6-kube-api-access-gdjv8\") pod \"etcd-operator-b45778765-r75cd\" (UID: \"35345934-239d-4ccb-b388-e861e51f49a6\") " pod="openshift-etcd-operator/etcd-operator-b45778765-r75cd" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.571671 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d118b564-5f65-4f23-aa56-42316ba80ef0-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-hnk5n\" (UID: \"d118b564-5f65-4f23-aa56-42316ba80ef0\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-hnk5n" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.571693 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/35345934-239d-4ccb-b388-e861e51f49a6-config\") pod \"etcd-operator-b45778765-r75cd\" (UID: \"35345934-239d-4ccb-b388-e861e51f49a6\") " pod="openshift-etcd-operator/etcd-operator-b45778765-r75cd" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.571745 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/301dbf85-d5d3-48f8-8d66-5d05a2d2d22b-stats-auth\") pod \"router-default-5444994796-qbkd8\" (UID: \"301dbf85-d5d3-48f8-8d66-5d05a2d2d22b\") " pod="openshift-ingress/router-default-5444994796-qbkd8" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.571769 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2rrjf\" (UniqueName: \"kubernetes.io/projected/c8180671-37d6-4315-a47f-bdaeef223448-kube-api-access-2rrjf\") pod \"dns-operator-744455d44c-5dbn8\" (UID: \"c8180671-37d6-4315-a47f-bdaeef223448\") " pod="openshift-dns-operator/dns-operator-744455d44c-5dbn8" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.571796 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hqczv\" (UniqueName: \"kubernetes.io/projected/d118b564-5f65-4f23-aa56-42316ba80ef0-kube-api-access-hqczv\") pod \"openshift-controller-manager-operator-756b6f6bc6-hnk5n\" (UID: \"d118b564-5f65-4f23-aa56-42316ba80ef0\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-hnk5n" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.571836 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1-registry-certificates\") pod \"image-registry-697d97f7c8-zn9v8\" (UID: \"ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-zn9v8" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.571859 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6nkpq\" (UniqueName: \"kubernetes.io/projected/445199dc-20f2-4401-a687-b2a126ddcdd2-kube-api-access-6nkpq\") pod \"catalog-operator-68c6474976-9srsc\" (UID: \"445199dc-20f2-4401-a687-b2a126ddcdd2\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-9srsc" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.571884 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/301dbf85-d5d3-48f8-8d66-5d05a2d2d22b-metrics-certs\") pod \"router-default-5444994796-qbkd8\" (UID: \"301dbf85-d5d3-48f8-8d66-5d05a2d2d22b\") " pod="openshift-ingress/router-default-5444994796-qbkd8" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.571906 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9fg6s\" (UniqueName: \"kubernetes.io/projected/899a9cd1-026b-40e1-a698-9f9f4f7ad857-kube-api-access-9fg6s\") pod \"olm-operator-6b444d44fb-qgvl4\" (UID: \"899a9cd1-026b-40e1-a698-9f9f4f7ad857\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-qgvl4" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.571928 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/445199dc-20f2-4401-a687-b2a126ddcdd2-profile-collector-cert\") pod \"catalog-operator-68c6474976-9srsc\" (UID: \"445199dc-20f2-4401-a687-b2a126ddcdd2\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-9srsc" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.571954 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rncxc\" (UniqueName: \"kubernetes.io/projected/301dbf85-d5d3-48f8-8d66-5d05a2d2d22b-kube-api-access-rncxc\") pod \"router-default-5444994796-qbkd8\" (UID: \"301dbf85-d5d3-48f8-8d66-5d05a2d2d22b\") " pod="openshift-ingress/router-default-5444994796-qbkd8" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.571977 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9b52d816-10ed-46d8-9421-678e446d568c-config\") pod \"openshift-apiserver-operator-796bbdcf4f-w4j7x\" (UID: \"9b52d816-10ed-46d8-9421-678e446d568c\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-w4j7x" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.572024 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/27875db6-80f0-4378-ba4d-56df53952813-trusted-ca\") pod \"console-operator-58897d9998-74tpl\" (UID: \"27875db6-80f0-4378-ba4d-56df53952813\") " pod="openshift-console-operator/console-operator-58897d9998-74tpl" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.572056 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zn9v8\" (UID: \"ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-zn9v8" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.572083 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kmfxs\" (UniqueName: \"kubernetes.io/projected/9b52d816-10ed-46d8-9421-678e446d568c-kube-api-access-kmfxs\") pod \"openshift-apiserver-operator-796bbdcf4f-w4j7x\" (UID: \"9b52d816-10ed-46d8-9421-678e446d568c\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-w4j7x" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.572107 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/27875db6-80f0-4378-ba4d-56df53952813-config\") pod \"console-operator-58897d9998-74tpl\" (UID: \"27875db6-80f0-4378-ba4d-56df53952813\") " pod="openshift-console-operator/console-operator-58897d9998-74tpl" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.572228 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/35345934-239d-4ccb-b388-e861e51f49a6-etcd-client\") pod \"etcd-operator-b45778765-r75cd\" (UID: \"35345934-239d-4ccb-b388-e861e51f49a6\") " pod="openshift-etcd-operator/etcd-operator-b45778765-r75cd" Dec 11 08:18:29 crc kubenswrapper[4881]: E1211 08:18:29.573779 4881 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-11 08:18:30.07376177 +0000 UTC m=+158.451130667 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zn9v8" (UID: "ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.573723 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/46332c07-10b7-4cd4-abcc-af9054cfb28d-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-nhlpk\" (UID: \"46332c07-10b7-4cd4-abcc-af9054cfb28d\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-nhlpk" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.574399 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/301dbf85-d5d3-48f8-8d66-5d05a2d2d22b-service-ca-bundle\") pod \"router-default-5444994796-qbkd8\" (UID: \"301dbf85-d5d3-48f8-8d66-5d05a2d2d22b\") " pod="openshift-ingress/router-default-5444994796-qbkd8" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.575381 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-operator-tls" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.581491 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/cb02b71d-7677-477a-8068-c687ebb146ee-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-r546r\" (UID: \"cb02b71d-7677-477a-8068-c687ebb146ee\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-r546r" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.595073 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-7777fb866f-td2hv" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.600489 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.603314 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-lxwz8" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.604728 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/317a7f01-2747-4994-8000-54613f522149-client-ca\") pod \"route-controller-manager-6576b87f9c-882gs\" (UID: \"317a7f01-2747-4994-8000-54613f522149\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-882gs" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.616643 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-7l2x6" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.619442 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.629959 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/317a7f01-2747-4994-8000-54613f522149-serving-cert\") pod \"route-controller-manager-6576b87f9c-882gs\" (UID: \"317a7f01-2747-4994-8000-54613f522149\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-882gs" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.639072 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.653281 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"cluster-image-registry-operator-dockercfg-m4qtx" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.668589 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-882gs" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.674950 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.675233 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/8dc5d9d6-b64d-494d-a6e6-917ed40c01ae-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-2n9k5\" (UID: \"8dc5d9d6-b64d-494d-a6e6-917ed40c01ae\") " pod="openshift-marketplace/marketplace-operator-79b997595-2n9k5" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.675263 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/ed625b9a-3c51-472f-9210-761f7a318acd-tmpfs\") pod \"packageserver-d55dfcdfc-459c4\" (UID: \"ed625b9a-3c51-472f-9210-761f7a318acd\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-459c4" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.675283 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/ed625b9a-3c51-472f-9210-761f7a318acd-apiservice-cert\") pod \"packageserver-d55dfcdfc-459c4\" (UID: \"ed625b9a-3c51-472f-9210-761f7a318acd\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-459c4" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.675311 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d118b564-5f65-4f23-aa56-42316ba80ef0-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-hnk5n\" (UID: \"d118b564-5f65-4f23-aa56-42316ba80ef0\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-hnk5n" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.676185 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/301dbf85-d5d3-48f8-8d66-5d05a2d2d22b-default-certificate\") pod \"router-default-5444994796-qbkd8\" (UID: \"301dbf85-d5d3-48f8-8d66-5d05a2d2d22b\") " pod="openshift-ingress/router-default-5444994796-qbkd8" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.676235 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1-registry-tls\") pod \"image-registry-697d97f7c8-zn9v8\" (UID: \"ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-zn9v8" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.676257 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/35345934-239d-4ccb-b388-e861e51f49a6-etcd-service-ca\") pod \"etcd-operator-b45778765-r75cd\" (UID: \"35345934-239d-4ccb-b388-e861e51f49a6\") " pod="openshift-etcd-operator/etcd-operator-b45778765-r75cd" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.676279 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/4e5fd3de-8162-4bf5-bc4f-e704b605108c-config-volume\") pod \"dns-default-f5gdp\" (UID: \"4e5fd3de-8162-4bf5-bc4f-e704b605108c\") " pod="openshift-dns/dns-default-f5gdp" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.676309 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/35345934-239d-4ccb-b388-e861e51f49a6-serving-cert\") pod \"etcd-operator-b45778765-r75cd\" (UID: \"35345934-239d-4ccb-b388-e861e51f49a6\") " pod="openshift-etcd-operator/etcd-operator-b45778765-r75cd" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.676366 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1-installation-pull-secrets\") pod \"image-registry-697d97f7c8-zn9v8\" (UID: \"ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-zn9v8" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.676390 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c8qwh\" (UniqueName: \"kubernetes.io/projected/77d994ef-53f1-4ef8-a668-38226c6c460b-kube-api-access-c8qwh\") pod \"collect-profiles-29424015-bl9jf\" (UID: \"77d994ef-53f1-4ef8-a668-38226c6c460b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29424015-bl9jf" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.676422 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/445199dc-20f2-4401-a687-b2a126ddcdd2-srv-cert\") pod \"catalog-operator-68c6474976-9srsc\" (UID: \"445199dc-20f2-4401-a687-b2a126ddcdd2\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-9srsc" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.676445 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/9589dff7-1c8f-4e58-b31c-b70ec577353a-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-tgcxv\" (UID: \"9589dff7-1c8f-4e58-b31c-b70ec577353a\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-tgcxv" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.676469 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/09ace6bf-d3f8-407b-a17f-7163f94af7c7-registration-dir\") pod \"csi-hostpathplugin-nh6jb\" (UID: \"09ace6bf-d3f8-407b-a17f-7163f94af7c7\") " pod="hostpath-provisioner/csi-hostpathplugin-nh6jb" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.676491 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sx96d\" (UniqueName: \"kubernetes.io/projected/279350fe-c8bb-4389-9915-666eb8694c79-kube-api-access-sx96d\") pod \"service-ca-9c57cc56f-vbpk5\" (UID: \"279350fe-c8bb-4389-9915-666eb8694c79\") " pod="openshift-service-ca/service-ca-9c57cc56f-vbpk5" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.676515 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/0ca4c6e6-5e38-4798-acaf-8b2574668772-metrics-tls\") pod \"ingress-operator-5b745b69d9-jzrlt\" (UID: \"0ca4c6e6-5e38-4798-acaf-8b2574668772\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-jzrlt" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.676535 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/279350fe-c8bb-4389-9915-666eb8694c79-signing-cabundle\") pod \"service-ca-9c57cc56f-vbpk5\" (UID: \"279350fe-c8bb-4389-9915-666eb8694c79\") " pod="openshift-service-ca/service-ca-9c57cc56f-vbpk5" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.676562 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/27875db6-80f0-4378-ba4d-56df53952813-serving-cert\") pod \"console-operator-58897d9998-74tpl\" (UID: \"27875db6-80f0-4378-ba4d-56df53952813\") " pod="openshift-console-operator/console-operator-58897d9998-74tpl" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.676585 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/09ace6bf-d3f8-407b-a17f-7163f94af7c7-csi-data-dir\") pod \"csi-hostpathplugin-nh6jb\" (UID: \"09ace6bf-d3f8-407b-a17f-7163f94af7c7\") " pod="hostpath-provisioner/csi-hostpathplugin-nh6jb" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.676636 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bh4l2\" (UniqueName: \"kubernetes.io/projected/8dc5d9d6-b64d-494d-a6e6-917ed40c01ae-kube-api-access-bh4l2\") pod \"marketplace-operator-79b997595-2n9k5\" (UID: \"8dc5d9d6-b64d-494d-a6e6-917ed40c01ae\") " pod="openshift-marketplace/marketplace-operator-79b997595-2n9k5" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.676661 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/c8180671-37d6-4315-a47f-bdaeef223448-metrics-tls\") pod \"dns-operator-744455d44c-5dbn8\" (UID: \"c8180671-37d6-4315-a47f-bdaeef223448\") " pod="openshift-dns-operator/dns-operator-744455d44c-5dbn8" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.676688 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gdjv8\" (UniqueName: \"kubernetes.io/projected/35345934-239d-4ccb-b388-e861e51f49a6-kube-api-access-gdjv8\") pod \"etcd-operator-b45778765-r75cd\" (UID: \"35345934-239d-4ccb-b388-e861e51f49a6\") " pod="openshift-etcd-operator/etcd-operator-b45778765-r75cd" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.676711 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d118b564-5f65-4f23-aa56-42316ba80ef0-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-hnk5n\" (UID: \"d118b564-5f65-4f23-aa56-42316ba80ef0\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-hnk5n" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.676736 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p7vb8\" (UniqueName: \"kubernetes.io/projected/0ca4c6e6-5e38-4798-acaf-8b2574668772-kube-api-access-p7vb8\") pod \"ingress-operator-5b745b69d9-jzrlt\" (UID: \"0ca4c6e6-5e38-4798-acaf-8b2574668772\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-jzrlt" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.676759 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ed625b9a-3c51-472f-9210-761f7a318acd-webhook-cert\") pod \"packageserver-d55dfcdfc-459c4\" (UID: \"ed625b9a-3c51-472f-9210-761f7a318acd\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-459c4" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.676811 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/301dbf85-d5d3-48f8-8d66-5d05a2d2d22b-stats-auth\") pod \"router-default-5444994796-qbkd8\" (UID: \"301dbf85-d5d3-48f8-8d66-5d05a2d2d22b\") " pod="openshift-ingress/router-default-5444994796-qbkd8" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.676839 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hqczv\" (UniqueName: \"kubernetes.io/projected/d118b564-5f65-4f23-aa56-42316ba80ef0-kube-api-access-hqczv\") pod \"openshift-controller-manager-operator-756b6f6bc6-hnk5n\" (UID: \"d118b564-5f65-4f23-aa56-42316ba80ef0\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-hnk5n" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.676862 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vhqgd\" (UniqueName: \"kubernetes.io/projected/a658d244-5927-4518-b8bb-0685d0e40a07-kube-api-access-vhqgd\") pod \"control-plane-machine-set-operator-78cbb6b69f-dw769\" (UID: \"a658d244-5927-4518-b8bb-0685d0e40a07\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-dw769" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.676918 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1-registry-certificates\") pod \"image-registry-697d97f7c8-zn9v8\" (UID: \"ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-zn9v8" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.676943 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/301dbf85-d5d3-48f8-8d66-5d05a2d2d22b-metrics-certs\") pod \"router-default-5444994796-qbkd8\" (UID: \"301dbf85-d5d3-48f8-8d66-5d05a2d2d22b\") " pod="openshift-ingress/router-default-5444994796-qbkd8" Dec 11 08:18:29 crc kubenswrapper[4881]: E1211 08:18:29.676967 4881 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-11 08:18:30.176943776 +0000 UTC m=+158.554312473 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.676992 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9fg6s\" (UniqueName: \"kubernetes.io/projected/899a9cd1-026b-40e1-a698-9f9f4f7ad857-kube-api-access-9fg6s\") pod \"olm-operator-6b444d44fb-qgvl4\" (UID: \"899a9cd1-026b-40e1-a698-9f9f4f7ad857\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-qgvl4" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.677024 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lvnrx\" (UniqueName: \"kubernetes.io/projected/1611d2d4-1d07-4ccb-aba0-2885b075dd9c-kube-api-access-lvnrx\") pod \"machine-config-operator-74547568cd-d8zgs\" (UID: \"1611d2d4-1d07-4ccb-aba0-2885b075dd9c\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-d8zgs" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.677049 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4xlqz\" (UniqueName: \"kubernetes.io/projected/0229d25c-9b61-4e79-b8a6-47188bd5de7f-kube-api-access-4xlqz\") pod \"kube-storage-version-migrator-operator-b67b599dd-8p5cn\" (UID: \"0229d25c-9b61-4e79-b8a6-47188bd5de7f\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-8p5cn" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.677089 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9b52d816-10ed-46d8-9421-678e446d568c-config\") pod \"openshift-apiserver-operator-796bbdcf4f-w4j7x\" (UID: \"9b52d816-10ed-46d8-9421-678e446d568c\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-w4j7x" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.677111 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/27875db6-80f0-4378-ba4d-56df53952813-trusted-ca\") pod \"console-operator-58897d9998-74tpl\" (UID: \"27875db6-80f0-4378-ba4d-56df53952813\") " pod="openshift-console-operator/console-operator-58897d9998-74tpl" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.677134 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ece8a23d-9b7b-4b9b-bc55-e6a1288e6d4f-config\") pod \"kube-apiserver-operator-766d6c64bb-9stj8\" (UID: \"ece8a23d-9b7b-4b9b-bc55-e6a1288e6d4f\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-9stj8" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.677165 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zn9v8\" (UID: \"ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-zn9v8" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.677189 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/09ace6bf-d3f8-407b-a17f-7163f94af7c7-plugins-dir\") pod \"csi-hostpathplugin-nh6jb\" (UID: \"09ace6bf-d3f8-407b-a17f-7163f94af7c7\") " pod="hostpath-provisioner/csi-hostpathplugin-nh6jb" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.677211 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/77d994ef-53f1-4ef8-a668-38226c6c460b-config-volume\") pod \"collect-profiles-29424015-bl9jf\" (UID: \"77d994ef-53f1-4ef8-a668-38226c6c460b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29424015-bl9jf" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.677248 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/35345934-239d-4ccb-b388-e861e51f49a6-etcd-client\") pod \"etcd-operator-b45778765-r75cd\" (UID: \"35345934-239d-4ccb-b388-e861e51f49a6\") " pod="openshift-etcd-operator/etcd-operator-b45778765-r75cd" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.677291 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/46332c07-10b7-4cd4-abcc-af9054cfb28d-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-nhlpk\" (UID: \"46332c07-10b7-4cd4-abcc-af9054cfb28d\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-nhlpk" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.677318 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/301dbf85-d5d3-48f8-8d66-5d05a2d2d22b-service-ca-bundle\") pod \"router-default-5444994796-qbkd8\" (UID: \"301dbf85-d5d3-48f8-8d66-5d05a2d2d22b\") " pod="openshift-ingress/router-default-5444994796-qbkd8" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.677362 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0229d25c-9b61-4e79-b8a6-47188bd5de7f-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-8p5cn\" (UID: \"0229d25c-9b61-4e79-b8a6-47188bd5de7f\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-8p5cn" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.677388 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-84272\" (UniqueName: \"kubernetes.io/projected/ed625b9a-3c51-472f-9210-761f7a318acd-kube-api-access-84272\") pod \"packageserver-d55dfcdfc-459c4\" (UID: \"ed625b9a-3c51-472f-9210-761f7a318acd\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-459c4" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.677475 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/899a9cd1-026b-40e1-a698-9f9f4f7ad857-srv-cert\") pod \"olm-operator-6b444d44fb-qgvl4\" (UID: \"899a9cd1-026b-40e1-a698-9f9f4f7ad857\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-qgvl4" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.677640 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/3d0f98f5-f496-43cd-8b37-6f969af809d4-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-kjhrd\" (UID: \"3d0f98f5-f496-43cd-8b37-6f969af809d4\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-kjhrd" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.677706 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/3c3589de-1b86-4b24-af28-d01a548fcd82-auth-proxy-config\") pod \"machine-approver-56656f9798-mdqnb\" (UID: \"3c3589de-1b86-4b24-af28-d01a548fcd82\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-mdqnb" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.677732 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1-bound-sa-token\") pod \"image-registry-697d97f7c8-zn9v8\" (UID: \"ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-zn9v8" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.677757 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d5gm2\" (UniqueName: \"kubernetes.io/projected/390d1287-ab65-447e-93a0-44beb7ec9a84-kube-api-access-d5gm2\") pod \"migrator-59844c95c7-9jj8n\" (UID: \"390d1287-ab65-447e-93a0-44beb7ec9a84\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-9jj8n" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.677818 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1-trusted-ca\") pod \"image-registry-697d97f7c8-zn9v8\" (UID: \"ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-zn9v8" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.679735 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"trusted-ca" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.679789 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/3c3589de-1b86-4b24-af28-d01a548fcd82-auth-proxy-config\") pod \"machine-approver-56656f9798-mdqnb\" (UID: \"3c3589de-1b86-4b24-af28-d01a548fcd82\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-mdqnb" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.679812 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/301dbf85-d5d3-48f8-8d66-5d05a2d2d22b-service-ca-bundle\") pod \"router-default-5444994796-qbkd8\" (UID: \"301dbf85-d5d3-48f8-8d66-5d05a2d2d22b\") " pod="openshift-ingress/router-default-5444994796-qbkd8" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.681357 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gbxdj\" (UniqueName: \"kubernetes.io/projected/9589dff7-1c8f-4e58-b31c-b70ec577353a-kube-api-access-gbxdj\") pod \"package-server-manager-789f6589d5-tgcxv\" (UID: \"9589dff7-1c8f-4e58-b31c-b70ec577353a\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-tgcxv" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.681487 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wvpp9\" (UniqueName: \"kubernetes.io/projected/3c3589de-1b86-4b24-af28-d01a548fcd82-kube-api-access-wvpp9\") pod \"machine-approver-56656f9798-mdqnb\" (UID: \"3c3589de-1b86-4b24-af28-d01a548fcd82\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-mdqnb" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.681529 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/899a9cd1-026b-40e1-a698-9f9f4f7ad857-profile-collector-cert\") pod \"olm-operator-6b444d44fb-qgvl4\" (UID: \"899a9cd1-026b-40e1-a698-9f9f4f7ad857\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-qgvl4" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.681555 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/35345934-239d-4ccb-b388-e861e51f49a6-etcd-ca\") pod \"etcd-operator-b45778765-r75cd\" (UID: \"35345934-239d-4ccb-b388-e861e51f49a6\") " pod="openshift-etcd-operator/etcd-operator-b45778765-r75cd" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.681590 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-czj7b\" (UniqueName: \"kubernetes.io/projected/27875db6-80f0-4378-ba4d-56df53952813-kube-api-access-czj7b\") pod \"console-operator-58897d9998-74tpl\" (UID: \"27875db6-80f0-4378-ba4d-56df53952813\") " pod="openshift-console-operator/console-operator-58897d9998-74tpl" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.681620 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/279350fe-c8bb-4389-9915-666eb8694c79-signing-key\") pod \"service-ca-9c57cc56f-vbpk5\" (UID: \"279350fe-c8bb-4389-9915-666eb8694c79\") " pod="openshift-service-ca/service-ca-9c57cc56f-vbpk5" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.681647 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2b9p9\" (UniqueName: \"kubernetes.io/projected/ed97acca-9dbf-4791-a11b-164ca4d74f55-kube-api-access-2b9p9\") pod \"service-ca-operator-777779d784-w78d4\" (UID: \"ed97acca-9dbf-4791-a11b-164ca4d74f55\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-w78d4" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.681684 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3c3589de-1b86-4b24-af28-d01a548fcd82-config\") pod \"machine-approver-56656f9798-mdqnb\" (UID: \"3c3589de-1b86-4b24-af28-d01a548fcd82\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-mdqnb" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.681726 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/d658247d-c8dd-49dd-9372-618a42ea566d-certs\") pod \"machine-config-server-dcbjb\" (UID: \"d658247d-c8dd-49dd-9372-618a42ea566d\") " pod="openshift-machine-config-operator/machine-config-server-dcbjb" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.681992 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/77d994ef-53f1-4ef8-a668-38226c6c460b-secret-volume\") pod \"collect-profiles-29424015-bl9jf\" (UID: \"77d994ef-53f1-4ef8-a668-38226c6c460b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29424015-bl9jf" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.682030 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/09ace6bf-d3f8-407b-a17f-7163f94af7c7-mountpoint-dir\") pod \"csi-hostpathplugin-nh6jb\" (UID: \"09ace6bf-d3f8-407b-a17f-7163f94af7c7\") " pod="hostpath-provisioner/csi-hostpathplugin-nh6jb" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.682061 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bfsg7\" (UniqueName: \"kubernetes.io/projected/46332c07-10b7-4cd4-abcc-af9054cfb28d-kube-api-access-bfsg7\") pod \"cluster-samples-operator-665b6dd947-nhlpk\" (UID: \"46332c07-10b7-4cd4-abcc-af9054cfb28d\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-nhlpk" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.682116 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/d658247d-c8dd-49dd-9372-618a42ea566d-node-bootstrap-token\") pod \"machine-config-server-dcbjb\" (UID: \"d658247d-c8dd-49dd-9372-618a42ea566d\") " pod="openshift-machine-config-operator/machine-config-server-dcbjb" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.682143 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t6j22\" (UniqueName: \"kubernetes.io/projected/ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1-kube-api-access-t6j22\") pod \"image-registry-697d97f7c8-zn9v8\" (UID: \"ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-zn9v8" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.682191 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/1611d2d4-1d07-4ccb-aba0-2885b075dd9c-images\") pod \"machine-config-operator-74547568cd-d8zgs\" (UID: \"1611d2d4-1d07-4ccb-aba0-2885b075dd9c\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-d8zgs" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.682230 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/3c3589de-1b86-4b24-af28-d01a548fcd82-machine-approver-tls\") pod \"machine-approver-56656f9798-mdqnb\" (UID: \"3c3589de-1b86-4b24-af28-d01a548fcd82\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-mdqnb" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.682257 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/4e5fd3de-8162-4bf5-bc4f-e704b605108c-metrics-tls\") pod \"dns-default-f5gdp\" (UID: \"4e5fd3de-8162-4bf5-bc4f-e704b605108c\") " pod="openshift-dns/dns-default-f5gdp" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.682456 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0229d25c-9b61-4e79-b8a6-47188bd5de7f-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-8p5cn\" (UID: \"0229d25c-9b61-4e79-b8a6-47188bd5de7f\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-8p5cn" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.682483 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ed97acca-9dbf-4791-a11b-164ca4d74f55-serving-cert\") pod \"service-ca-operator-777779d784-w78d4\" (UID: \"ed97acca-9dbf-4791-a11b-164ca4d74f55\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-w78d4" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.682531 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/1611d2d4-1d07-4ccb-aba0-2885b075dd9c-proxy-tls\") pod \"machine-config-operator-74547568cd-d8zgs\" (UID: \"1611d2d4-1d07-4ccb-aba0-2885b075dd9c\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-d8zgs" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.682534 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1-trusted-ca\") pod \"image-registry-697d97f7c8-zn9v8\" (UID: \"ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-zn9v8" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.682576 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/35345934-239d-4ccb-b388-e861e51f49a6-config\") pod \"etcd-operator-b45778765-r75cd\" (UID: \"35345934-239d-4ccb-b388-e861e51f49a6\") " pod="openshift-etcd-operator/etcd-operator-b45778765-r75cd" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.682614 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bcl8k\" (UniqueName: \"kubernetes.io/projected/09ace6bf-d3f8-407b-a17f-7163f94af7c7-kube-api-access-bcl8k\") pod \"csi-hostpathplugin-nh6jb\" (UID: \"09ace6bf-d3f8-407b-a17f-7163f94af7c7\") " pod="hostpath-provisioner/csi-hostpathplugin-nh6jb" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.682633 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ed97acca-9dbf-4791-a11b-164ca4d74f55-config\") pod \"service-ca-operator-777779d784-w78d4\" (UID: \"ed97acca-9dbf-4791-a11b-164ca4d74f55\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-w78d4" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.682657 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2rrjf\" (UniqueName: \"kubernetes.io/projected/c8180671-37d6-4315-a47f-bdaeef223448-kube-api-access-2rrjf\") pod \"dns-operator-744455d44c-5dbn8\" (UID: \"c8180671-37d6-4315-a47f-bdaeef223448\") " pod="openshift-dns-operator/dns-operator-744455d44c-5dbn8" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.682679 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-phj6p\" (UniqueName: \"kubernetes.io/projected/a648f1f8-f077-421b-9599-95e3ef459adf-kube-api-access-phj6p\") pod \"ingress-canary-kjptn\" (UID: \"a648f1f8-f077-421b-9599-95e3ef459adf\") " pod="openshift-ingress-canary/ingress-canary-kjptn" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.682714 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/5c4fd066-f527-4568-91f8-71b92b5db286-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-rsvfs\" (UID: \"5c4fd066-f527-4568-91f8-71b92b5db286\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-rsvfs" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.682748 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6nkpq\" (UniqueName: \"kubernetes.io/projected/445199dc-20f2-4401-a687-b2a126ddcdd2-kube-api-access-6nkpq\") pod \"catalog-operator-68c6474976-9srsc\" (UID: \"445199dc-20f2-4401-a687-b2a126ddcdd2\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-9srsc" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.682771 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8dc5d9d6-b64d-494d-a6e6-917ed40c01ae-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-2n9k5\" (UID: \"8dc5d9d6-b64d-494d-a6e6-917ed40c01ae\") " pod="openshift-marketplace/marketplace-operator-79b997595-2n9k5" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.682790 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5c4fd066-f527-4568-91f8-71b92b5db286-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-rsvfs\" (UID: \"5c4fd066-f527-4568-91f8-71b92b5db286\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-rsvfs" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.682808 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ccndv\" (UniqueName: \"kubernetes.io/projected/3d0f98f5-f496-43cd-8b37-6f969af809d4-kube-api-access-ccndv\") pod \"multus-admission-controller-857f4d67dd-kjhrd\" (UID: \"3d0f98f5-f496-43cd-8b37-6f969af809d4\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-kjhrd" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.682811 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/35345934-239d-4ccb-b388-e861e51f49a6-etcd-service-ca\") pod \"etcd-operator-b45778765-r75cd\" (UID: \"35345934-239d-4ccb-b388-e861e51f49a6\") " pod="openshift-etcd-operator/etcd-operator-b45778765-r75cd" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.682858 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/445199dc-20f2-4401-a687-b2a126ddcdd2-profile-collector-cert\") pod \"catalog-operator-68c6474976-9srsc\" (UID: \"445199dc-20f2-4401-a687-b2a126ddcdd2\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-9srsc" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.682881 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rncxc\" (UniqueName: \"kubernetes.io/projected/301dbf85-d5d3-48f8-8d66-5d05a2d2d22b-kube-api-access-rncxc\") pod \"router-default-5444994796-qbkd8\" (UID: \"301dbf85-d5d3-48f8-8d66-5d05a2d2d22b\") " pod="openshift-ingress/router-default-5444994796-qbkd8" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.682901 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ece8a23d-9b7b-4b9b-bc55-e6a1288e6d4f-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-9stj8\" (UID: \"ece8a23d-9b7b-4b9b-bc55-e6a1288e6d4f\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-9stj8" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.682956 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/0ca4c6e6-5e38-4798-acaf-8b2574668772-trusted-ca\") pod \"ingress-operator-5b745b69d9-jzrlt\" (UID: \"0ca4c6e6-5e38-4798-acaf-8b2574668772\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-jzrlt" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.682975 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/cb02b71d-7677-477a-8068-c687ebb146ee-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-r546r\" (UID: \"cb02b71d-7677-477a-8068-c687ebb146ee\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-r546r" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.682992 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3c3589de-1b86-4b24-af28-d01a548fcd82-config\") pod \"machine-approver-56656f9798-mdqnb\" (UID: \"3c3589de-1b86-4b24-af28-d01a548fcd82\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-mdqnb" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.682985 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/a658d244-5927-4518-b8bb-0685d0e40a07-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-dw769\" (UID: \"a658d244-5927-4518-b8bb-0685d0e40a07\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-dw769" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.683078 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kmfxs\" (UniqueName: \"kubernetes.io/projected/9b52d816-10ed-46d8-9421-678e446d568c-kube-api-access-kmfxs\") pod \"openshift-apiserver-operator-796bbdcf4f-w4j7x\" (UID: \"9b52d816-10ed-46d8-9421-678e446d568c\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-w4j7x" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.683105 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/27875db6-80f0-4378-ba4d-56df53952813-config\") pod \"console-operator-58897d9998-74tpl\" (UID: \"27875db6-80f0-4378-ba4d-56df53952813\") " pod="openshift-console-operator/console-operator-58897d9998-74tpl" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.683129 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/09ace6bf-d3f8-407b-a17f-7163f94af7c7-socket-dir\") pod \"csi-hostpathplugin-nh6jb\" (UID: \"09ace6bf-d3f8-407b-a17f-7163f94af7c7\") " pod="hostpath-provisioner/csi-hostpathplugin-nh6jb" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.683146 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/a648f1f8-f077-421b-9599-95e3ef459adf-cert\") pod \"ingress-canary-kjptn\" (UID: \"a648f1f8-f077-421b-9599-95e3ef459adf\") " pod="openshift-ingress-canary/ingress-canary-kjptn" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.683194 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/0ca4c6e6-5e38-4798-acaf-8b2574668772-bound-sa-token\") pod \"ingress-operator-5b745b69d9-jzrlt\" (UID: \"0ca4c6e6-5e38-4798-acaf-8b2574668772\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-jzrlt" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.683222 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mz66x\" (UniqueName: \"kubernetes.io/projected/d658247d-c8dd-49dd-9372-618a42ea566d-kube-api-access-mz66x\") pod \"machine-config-server-dcbjb\" (UID: \"d658247d-c8dd-49dd-9372-618a42ea566d\") " pod="openshift-machine-config-operator/machine-config-server-dcbjb" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.683252 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1-ca-trust-extracted\") pod \"image-registry-697d97f7c8-zn9v8\" (UID: \"ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-zn9v8" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.683275 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rs7vt\" (UniqueName: \"kubernetes.io/projected/4e5fd3de-8162-4bf5-bc4f-e704b605108c-kube-api-access-rs7vt\") pod \"dns-default-f5gdp\" (UID: \"4e5fd3de-8162-4bf5-bc4f-e704b605108c\") " pod="openshift-dns/dns-default-f5gdp" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.683319 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/1611d2d4-1d07-4ccb-aba0-2885b075dd9c-auth-proxy-config\") pod \"machine-config-operator-74547568cd-d8zgs\" (UID: \"1611d2d4-1d07-4ccb-aba0-2885b075dd9c\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-d8zgs" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.683411 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5c4fd066-f527-4568-91f8-71b92b5db286-config\") pod \"kube-controller-manager-operator-78b949d7b-rsvfs\" (UID: \"5c4fd066-f527-4568-91f8-71b92b5db286\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-rsvfs" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.683433 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/ece8a23d-9b7b-4b9b-bc55-e6a1288e6d4f-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-9stj8\" (UID: \"ece8a23d-9b7b-4b9b-bc55-e6a1288e6d4f\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-9stj8" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.683452 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9b52d816-10ed-46d8-9421-678e446d568c-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-w4j7x\" (UID: \"9b52d816-10ed-46d8-9421-678e446d568c\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-w4j7x" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.683643 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/35345934-239d-4ccb-b388-e861e51f49a6-etcd-client\") pod \"etcd-operator-b45778765-r75cd\" (UID: \"35345934-239d-4ccb-b388-e861e51f49a6\") " pod="openshift-etcd-operator/etcd-operator-b45778765-r75cd" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.683786 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/35345934-239d-4ccb-b388-e861e51f49a6-config\") pod \"etcd-operator-b45778765-r75cd\" (UID: \"35345934-239d-4ccb-b388-e861e51f49a6\") " pod="openshift-etcd-operator/etcd-operator-b45778765-r75cd" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.684386 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/27875db6-80f0-4378-ba4d-56df53952813-config\") pod \"console-operator-58897d9998-74tpl\" (UID: \"27875db6-80f0-4378-ba4d-56df53952813\") " pod="openshift-console-operator/console-operator-58897d9998-74tpl" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.684622 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1-ca-trust-extracted\") pod \"image-registry-697d97f7c8-zn9v8\" (UID: \"ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-zn9v8" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.685199 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/301dbf85-d5d3-48f8-8d66-5d05a2d2d22b-metrics-certs\") pod \"router-default-5444994796-qbkd8\" (UID: \"301dbf85-d5d3-48f8-8d66-5d05a2d2d22b\") " pod="openshift-ingress/router-default-5444994796-qbkd8" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.685232 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/46332c07-10b7-4cd4-abcc-af9054cfb28d-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-nhlpk\" (UID: \"46332c07-10b7-4cd4-abcc-af9054cfb28d\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-nhlpk" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.685753 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/35345934-239d-4ccb-b388-e861e51f49a6-etcd-ca\") pod \"etcd-operator-b45778765-r75cd\" (UID: \"35345934-239d-4ccb-b388-e861e51f49a6\") " pod="openshift-etcd-operator/etcd-operator-b45778765-r75cd" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.685913 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1-registry-certificates\") pod \"image-registry-697d97f7c8-zn9v8\" (UID: \"ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-zn9v8" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.689673 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1-installation-pull-secrets\") pod \"image-registry-697d97f7c8-zn9v8\" (UID: \"ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-zn9v8" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.690360 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/35345934-239d-4ccb-b388-e861e51f49a6-serving-cert\") pod \"etcd-operator-b45778765-r75cd\" (UID: \"35345934-239d-4ccb-b388-e861e51f49a6\") " pod="openshift-etcd-operator/etcd-operator-b45778765-r75cd" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.690403 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/3c3589de-1b86-4b24-af28-d01a548fcd82-machine-approver-tls\") pod \"machine-approver-56656f9798-mdqnb\" (UID: \"3c3589de-1b86-4b24-af28-d01a548fcd82\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-mdqnb" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.690901 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/27875db6-80f0-4378-ba4d-56df53952813-serving-cert\") pod \"console-operator-58897d9998-74tpl\" (UID: \"27875db6-80f0-4378-ba4d-56df53952813\") " pod="openshift-console-operator/console-operator-58897d9998-74tpl" Dec 11 08:18:29 crc kubenswrapper[4881]: E1211 08:18:29.691668 4881 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-11 08:18:30.191645599 +0000 UTC m=+158.569014486 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zn9v8" (UID: "ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.692245 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9b52d816-10ed-46d8-9421-678e446d568c-config\") pod \"openshift-apiserver-operator-796bbdcf4f-w4j7x\" (UID: \"9b52d816-10ed-46d8-9421-678e446d568c\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-w4j7x" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.692668 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/c8180671-37d6-4315-a47f-bdaeef223448-metrics-tls\") pod \"dns-operator-744455d44c-5dbn8\" (UID: \"c8180671-37d6-4315-a47f-bdaeef223448\") " pod="openshift-dns-operator/dns-operator-744455d44c-5dbn8" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.693412 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d118b564-5f65-4f23-aa56-42316ba80ef0-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-hnk5n\" (UID: \"d118b564-5f65-4f23-aa56-42316ba80ef0\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-hnk5n" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.694947 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/301dbf85-d5d3-48f8-8d66-5d05a2d2d22b-default-certificate\") pod \"router-default-5444994796-qbkd8\" (UID: \"301dbf85-d5d3-48f8-8d66-5d05a2d2d22b\") " pod="openshift-ingress/router-default-5444994796-qbkd8" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.695582 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1-registry-tls\") pod \"image-registry-697d97f7c8-zn9v8\" (UID: \"ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-zn9v8" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.696664 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/445199dc-20f2-4401-a687-b2a126ddcdd2-profile-collector-cert\") pod \"catalog-operator-68c6474976-9srsc\" (UID: \"445199dc-20f2-4401-a687-b2a126ddcdd2\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-9srsc" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.696833 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/899a9cd1-026b-40e1-a698-9f9f4f7ad857-srv-cert\") pod \"olm-operator-6b444d44fb-qgvl4\" (UID: \"899a9cd1-026b-40e1-a698-9f9f4f7ad857\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-qgvl4" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.696926 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/899a9cd1-026b-40e1-a698-9f9f4f7ad857-profile-collector-cert\") pod \"olm-operator-6b444d44fb-qgvl4\" (UID: \"899a9cd1-026b-40e1-a698-9f9f4f7ad857\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-qgvl4" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.697454 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9b52d816-10ed-46d8-9421-678e446d568c-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-w4j7x\" (UID: \"9b52d816-10ed-46d8-9421-678e446d568c\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-w4j7x" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.698764 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d118b564-5f65-4f23-aa56-42316ba80ef0-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-hnk5n\" (UID: \"d118b564-5f65-4f23-aa56-42316ba80ef0\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-hnk5n" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.700144 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/301dbf85-d5d3-48f8-8d66-5d05a2d2d22b-stats-auth\") pod \"router-default-5444994796-qbkd8\" (UID: \"301dbf85-d5d3-48f8-8d66-5d05a2d2d22b\") " pod="openshift-ingress/router-default-5444994796-qbkd8" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.701462 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/445199dc-20f2-4401-a687-b2a126ddcdd2-srv-cert\") pod \"catalog-operator-68c6474976-9srsc\" (UID: \"445199dc-20f2-4401-a687-b2a126ddcdd2\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-9srsc" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.708693 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/27875db6-80f0-4378-ba4d-56df53952813-trusted-ca\") pod \"console-operator-58897d9998-74tpl\" (UID: \"27875db6-80f0-4378-ba4d-56df53952813\") " pod="openshift-console-operator/console-operator-58897d9998-74tpl" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.738426 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1-bound-sa-token\") pod \"image-registry-697d97f7c8-zn9v8\" (UID: \"ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-zn9v8" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.782726 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-czj7b\" (UniqueName: \"kubernetes.io/projected/27875db6-80f0-4378-ba4d-56df53952813-kube-api-access-czj7b\") pod \"console-operator-58897d9998-74tpl\" (UID: \"27875db6-80f0-4378-ba4d-56df53952813\") " pod="openshift-console-operator/console-operator-58897d9998-74tpl" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.786236 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.786461 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p7vb8\" (UniqueName: \"kubernetes.io/projected/0ca4c6e6-5e38-4798-acaf-8b2574668772-kube-api-access-p7vb8\") pod \"ingress-operator-5b745b69d9-jzrlt\" (UID: \"0ca4c6e6-5e38-4798-acaf-8b2574668772\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-jzrlt" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.786498 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ed625b9a-3c51-472f-9210-761f7a318acd-webhook-cert\") pod \"packageserver-d55dfcdfc-459c4\" (UID: \"ed625b9a-3c51-472f-9210-761f7a318acd\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-459c4" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.786520 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vhqgd\" (UniqueName: \"kubernetes.io/projected/a658d244-5927-4518-b8bb-0685d0e40a07-kube-api-access-vhqgd\") pod \"control-plane-machine-set-operator-78cbb6b69f-dw769\" (UID: \"a658d244-5927-4518-b8bb-0685d0e40a07\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-dw769" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.786550 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lvnrx\" (UniqueName: \"kubernetes.io/projected/1611d2d4-1d07-4ccb-aba0-2885b075dd9c-kube-api-access-lvnrx\") pod \"machine-config-operator-74547568cd-d8zgs\" (UID: \"1611d2d4-1d07-4ccb-aba0-2885b075dd9c\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-d8zgs" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.786568 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4xlqz\" (UniqueName: \"kubernetes.io/projected/0229d25c-9b61-4e79-b8a6-47188bd5de7f-kube-api-access-4xlqz\") pod \"kube-storage-version-migrator-operator-b67b599dd-8p5cn\" (UID: \"0229d25c-9b61-4e79-b8a6-47188bd5de7f\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-8p5cn" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.786605 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ece8a23d-9b7b-4b9b-bc55-e6a1288e6d4f-config\") pod \"kube-apiserver-operator-766d6c64bb-9stj8\" (UID: \"ece8a23d-9b7b-4b9b-bc55-e6a1288e6d4f\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-9stj8" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.786641 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/09ace6bf-d3f8-407b-a17f-7163f94af7c7-plugins-dir\") pod \"csi-hostpathplugin-nh6jb\" (UID: \"09ace6bf-d3f8-407b-a17f-7163f94af7c7\") " pod="hostpath-provisioner/csi-hostpathplugin-nh6jb" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.786667 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/77d994ef-53f1-4ef8-a668-38226c6c460b-config-volume\") pod \"collect-profiles-29424015-bl9jf\" (UID: \"77d994ef-53f1-4ef8-a668-38226c6c460b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29424015-bl9jf" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.786703 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0229d25c-9b61-4e79-b8a6-47188bd5de7f-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-8p5cn\" (UID: \"0229d25c-9b61-4e79-b8a6-47188bd5de7f\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-8p5cn" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.786727 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-84272\" (UniqueName: \"kubernetes.io/projected/ed625b9a-3c51-472f-9210-761f7a318acd-kube-api-access-84272\") pod \"packageserver-d55dfcdfc-459c4\" (UID: \"ed625b9a-3c51-472f-9210-761f7a318acd\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-459c4" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.786750 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/3d0f98f5-f496-43cd-8b37-6f969af809d4-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-kjhrd\" (UID: \"3d0f98f5-f496-43cd-8b37-6f969af809d4\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-kjhrd" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.786773 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d5gm2\" (UniqueName: \"kubernetes.io/projected/390d1287-ab65-447e-93a0-44beb7ec9a84-kube-api-access-d5gm2\") pod \"migrator-59844c95c7-9jj8n\" (UID: \"390d1287-ab65-447e-93a0-44beb7ec9a84\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-9jj8n" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.786797 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gbxdj\" (UniqueName: \"kubernetes.io/projected/9589dff7-1c8f-4e58-b31c-b70ec577353a-kube-api-access-gbxdj\") pod \"package-server-manager-789f6589d5-tgcxv\" (UID: \"9589dff7-1c8f-4e58-b31c-b70ec577353a\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-tgcxv" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.786820 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/279350fe-c8bb-4389-9915-666eb8694c79-signing-key\") pod \"service-ca-9c57cc56f-vbpk5\" (UID: \"279350fe-c8bb-4389-9915-666eb8694c79\") " pod="openshift-service-ca/service-ca-9c57cc56f-vbpk5" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.786840 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2b9p9\" (UniqueName: \"kubernetes.io/projected/ed97acca-9dbf-4791-a11b-164ca4d74f55-kube-api-access-2b9p9\") pod \"service-ca-operator-777779d784-w78d4\" (UID: \"ed97acca-9dbf-4791-a11b-164ca4d74f55\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-w78d4" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.786855 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/d658247d-c8dd-49dd-9372-618a42ea566d-certs\") pod \"machine-config-server-dcbjb\" (UID: \"d658247d-c8dd-49dd-9372-618a42ea566d\") " pod="openshift-machine-config-operator/machine-config-server-dcbjb" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.786875 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/77d994ef-53f1-4ef8-a668-38226c6c460b-secret-volume\") pod \"collect-profiles-29424015-bl9jf\" (UID: \"77d994ef-53f1-4ef8-a668-38226c6c460b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29424015-bl9jf" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.786889 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/09ace6bf-d3f8-407b-a17f-7163f94af7c7-mountpoint-dir\") pod \"csi-hostpathplugin-nh6jb\" (UID: \"09ace6bf-d3f8-407b-a17f-7163f94af7c7\") " pod="hostpath-provisioner/csi-hostpathplugin-nh6jb" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.786911 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/d658247d-c8dd-49dd-9372-618a42ea566d-node-bootstrap-token\") pod \"machine-config-server-dcbjb\" (UID: \"d658247d-c8dd-49dd-9372-618a42ea566d\") " pod="openshift-machine-config-operator/machine-config-server-dcbjb" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.786930 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/1611d2d4-1d07-4ccb-aba0-2885b075dd9c-images\") pod \"machine-config-operator-74547568cd-d8zgs\" (UID: \"1611d2d4-1d07-4ccb-aba0-2885b075dd9c\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-d8zgs" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.786979 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/4e5fd3de-8162-4bf5-bc4f-e704b605108c-metrics-tls\") pod \"dns-default-f5gdp\" (UID: \"4e5fd3de-8162-4bf5-bc4f-e704b605108c\") " pod="openshift-dns/dns-default-f5gdp" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.787004 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0229d25c-9b61-4e79-b8a6-47188bd5de7f-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-8p5cn\" (UID: \"0229d25c-9b61-4e79-b8a6-47188bd5de7f\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-8p5cn" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.787025 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ed97acca-9dbf-4791-a11b-164ca4d74f55-serving-cert\") pod \"service-ca-operator-777779d784-w78d4\" (UID: \"ed97acca-9dbf-4791-a11b-164ca4d74f55\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-w78d4" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.787046 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/1611d2d4-1d07-4ccb-aba0-2885b075dd9c-proxy-tls\") pod \"machine-config-operator-74547568cd-d8zgs\" (UID: \"1611d2d4-1d07-4ccb-aba0-2885b075dd9c\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-d8zgs" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.787083 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bcl8k\" (UniqueName: \"kubernetes.io/projected/09ace6bf-d3f8-407b-a17f-7163f94af7c7-kube-api-access-bcl8k\") pod \"csi-hostpathplugin-nh6jb\" (UID: \"09ace6bf-d3f8-407b-a17f-7163f94af7c7\") " pod="hostpath-provisioner/csi-hostpathplugin-nh6jb" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.787104 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ed97acca-9dbf-4791-a11b-164ca4d74f55-config\") pod \"service-ca-operator-777779d784-w78d4\" (UID: \"ed97acca-9dbf-4791-a11b-164ca4d74f55\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-w78d4" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.787131 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-phj6p\" (UniqueName: \"kubernetes.io/projected/a648f1f8-f077-421b-9599-95e3ef459adf-kube-api-access-phj6p\") pod \"ingress-canary-kjptn\" (UID: \"a648f1f8-f077-421b-9599-95e3ef459adf\") " pod="openshift-ingress-canary/ingress-canary-kjptn" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.787160 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8dc5d9d6-b64d-494d-a6e6-917ed40c01ae-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-2n9k5\" (UID: \"8dc5d9d6-b64d-494d-a6e6-917ed40c01ae\") " pod="openshift-marketplace/marketplace-operator-79b997595-2n9k5" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.787203 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5c4fd066-f527-4568-91f8-71b92b5db286-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-rsvfs\" (UID: \"5c4fd066-f527-4568-91f8-71b92b5db286\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-rsvfs" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.787225 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/5c4fd066-f527-4568-91f8-71b92b5db286-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-rsvfs\" (UID: \"5c4fd066-f527-4568-91f8-71b92b5db286\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-rsvfs" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.787243 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ccndv\" (UniqueName: \"kubernetes.io/projected/3d0f98f5-f496-43cd-8b37-6f969af809d4-kube-api-access-ccndv\") pod \"multus-admission-controller-857f4d67dd-kjhrd\" (UID: \"3d0f98f5-f496-43cd-8b37-6f969af809d4\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-kjhrd" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.787265 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ece8a23d-9b7b-4b9b-bc55-e6a1288e6d4f-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-9stj8\" (UID: \"ece8a23d-9b7b-4b9b-bc55-e6a1288e6d4f\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-9stj8" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.787280 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/0ca4c6e6-5e38-4798-acaf-8b2574668772-trusted-ca\") pod \"ingress-operator-5b745b69d9-jzrlt\" (UID: \"0ca4c6e6-5e38-4798-acaf-8b2574668772\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-jzrlt" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.787297 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/a658d244-5927-4518-b8bb-0685d0e40a07-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-dw769\" (UID: \"a658d244-5927-4518-b8bb-0685d0e40a07\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-dw769" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.787345 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/09ace6bf-d3f8-407b-a17f-7163f94af7c7-socket-dir\") pod \"csi-hostpathplugin-nh6jb\" (UID: \"09ace6bf-d3f8-407b-a17f-7163f94af7c7\") " pod="hostpath-provisioner/csi-hostpathplugin-nh6jb" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.787364 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/a648f1f8-f077-421b-9599-95e3ef459adf-cert\") pod \"ingress-canary-kjptn\" (UID: \"a648f1f8-f077-421b-9599-95e3ef459adf\") " pod="openshift-ingress-canary/ingress-canary-kjptn" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.787382 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/0ca4c6e6-5e38-4798-acaf-8b2574668772-bound-sa-token\") pod \"ingress-operator-5b745b69d9-jzrlt\" (UID: \"0ca4c6e6-5e38-4798-acaf-8b2574668772\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-jzrlt" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.787404 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mz66x\" (UniqueName: \"kubernetes.io/projected/d658247d-c8dd-49dd-9372-618a42ea566d-kube-api-access-mz66x\") pod \"machine-config-server-dcbjb\" (UID: \"d658247d-c8dd-49dd-9372-618a42ea566d\") " pod="openshift-machine-config-operator/machine-config-server-dcbjb" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.787427 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rs7vt\" (UniqueName: \"kubernetes.io/projected/4e5fd3de-8162-4bf5-bc4f-e704b605108c-kube-api-access-rs7vt\") pod \"dns-default-f5gdp\" (UID: \"4e5fd3de-8162-4bf5-bc4f-e704b605108c\") " pod="openshift-dns/dns-default-f5gdp" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.787444 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/1611d2d4-1d07-4ccb-aba0-2885b075dd9c-auth-proxy-config\") pod \"machine-config-operator-74547568cd-d8zgs\" (UID: \"1611d2d4-1d07-4ccb-aba0-2885b075dd9c\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-d8zgs" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.787470 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5c4fd066-f527-4568-91f8-71b92b5db286-config\") pod \"kube-controller-manager-operator-78b949d7b-rsvfs\" (UID: \"5c4fd066-f527-4568-91f8-71b92b5db286\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-rsvfs" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.787485 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/ece8a23d-9b7b-4b9b-bc55-e6a1288e6d4f-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-9stj8\" (UID: \"ece8a23d-9b7b-4b9b-bc55-e6a1288e6d4f\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-9stj8" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.787506 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/8dc5d9d6-b64d-494d-a6e6-917ed40c01ae-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-2n9k5\" (UID: \"8dc5d9d6-b64d-494d-a6e6-917ed40c01ae\") " pod="openshift-marketplace/marketplace-operator-79b997595-2n9k5" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.787522 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/ed625b9a-3c51-472f-9210-761f7a318acd-apiservice-cert\") pod \"packageserver-d55dfcdfc-459c4\" (UID: \"ed625b9a-3c51-472f-9210-761f7a318acd\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-459c4" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.787537 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/ed625b9a-3c51-472f-9210-761f7a318acd-tmpfs\") pod \"packageserver-d55dfcdfc-459c4\" (UID: \"ed625b9a-3c51-472f-9210-761f7a318acd\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-459c4" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.787554 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/4e5fd3de-8162-4bf5-bc4f-e704b605108c-config-volume\") pod \"dns-default-f5gdp\" (UID: \"4e5fd3de-8162-4bf5-bc4f-e704b605108c\") " pod="openshift-dns/dns-default-f5gdp" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.787576 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c8qwh\" (UniqueName: \"kubernetes.io/projected/77d994ef-53f1-4ef8-a668-38226c6c460b-kube-api-access-c8qwh\") pod \"collect-profiles-29424015-bl9jf\" (UID: \"77d994ef-53f1-4ef8-a668-38226c6c460b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29424015-bl9jf" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.787593 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/9589dff7-1c8f-4e58-b31c-b70ec577353a-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-tgcxv\" (UID: \"9589dff7-1c8f-4e58-b31c-b70ec577353a\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-tgcxv" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.787609 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/09ace6bf-d3f8-407b-a17f-7163f94af7c7-registration-dir\") pod \"csi-hostpathplugin-nh6jb\" (UID: \"09ace6bf-d3f8-407b-a17f-7163f94af7c7\") " pod="hostpath-provisioner/csi-hostpathplugin-nh6jb" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.787625 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sx96d\" (UniqueName: \"kubernetes.io/projected/279350fe-c8bb-4389-9915-666eb8694c79-kube-api-access-sx96d\") pod \"service-ca-9c57cc56f-vbpk5\" (UID: \"279350fe-c8bb-4389-9915-666eb8694c79\") " pod="openshift-service-ca/service-ca-9c57cc56f-vbpk5" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.787639 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/0ca4c6e6-5e38-4798-acaf-8b2574668772-metrics-tls\") pod \"ingress-operator-5b745b69d9-jzrlt\" (UID: \"0ca4c6e6-5e38-4798-acaf-8b2574668772\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-jzrlt" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.787679 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/09ace6bf-d3f8-407b-a17f-7163f94af7c7-csi-data-dir\") pod \"csi-hostpathplugin-nh6jb\" (UID: \"09ace6bf-d3f8-407b-a17f-7163f94af7c7\") " pod="hostpath-provisioner/csi-hostpathplugin-nh6jb" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.787694 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/279350fe-c8bb-4389-9915-666eb8694c79-signing-cabundle\") pod \"service-ca-9c57cc56f-vbpk5\" (UID: \"279350fe-c8bb-4389-9915-666eb8694c79\") " pod="openshift-service-ca/service-ca-9c57cc56f-vbpk5" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.787712 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bh4l2\" (UniqueName: \"kubernetes.io/projected/8dc5d9d6-b64d-494d-a6e6-917ed40c01ae-kube-api-access-bh4l2\") pod \"marketplace-operator-79b997595-2n9k5\" (UID: \"8dc5d9d6-b64d-494d-a6e6-917ed40c01ae\") " pod="openshift-marketplace/marketplace-operator-79b997595-2n9k5" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.790933 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ece8a23d-9b7b-4b9b-bc55-e6a1288e6d4f-config\") pod \"kube-apiserver-operator-766d6c64bb-9stj8\" (UID: \"ece8a23d-9b7b-4b9b-bc55-e6a1288e6d4f\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-9stj8" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.792071 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-zl8wv"] Dec 11 08:18:29 crc kubenswrapper[4881]: E1211 08:18:29.792719 4881 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-11 08:18:30.292694822 +0000 UTC m=+158.670063519 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.792899 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/09ace6bf-d3f8-407b-a17f-7163f94af7c7-socket-dir\") pod \"csi-hostpathplugin-nh6jb\" (UID: \"09ace6bf-d3f8-407b-a17f-7163f94af7c7\") " pod="hostpath-provisioner/csi-hostpathplugin-nh6jb" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.793779 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/09ace6bf-d3f8-407b-a17f-7163f94af7c7-plugins-dir\") pod \"csi-hostpathplugin-nh6jb\" (UID: \"09ace6bf-d3f8-407b-a17f-7163f94af7c7\") " pod="hostpath-provisioner/csi-hostpathplugin-nh6jb" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.794352 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/09ace6bf-d3f8-407b-a17f-7163f94af7c7-mountpoint-dir\") pod \"csi-hostpathplugin-nh6jb\" (UID: \"09ace6bf-d3f8-407b-a17f-7163f94af7c7\") " pod="hostpath-provisioner/csi-hostpathplugin-nh6jb" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.794577 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/77d994ef-53f1-4ef8-a668-38226c6c460b-config-volume\") pod \"collect-profiles-29424015-bl9jf\" (UID: \"77d994ef-53f1-4ef8-a668-38226c6c460b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29424015-bl9jf" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.795328 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0229d25c-9b61-4e79-b8a6-47188bd5de7f-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-8p5cn\" (UID: \"0229d25c-9b61-4e79-b8a6-47188bd5de7f\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-8p5cn" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.795980 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"images\" (UniqueName: \"kubernetes.io/configmap/1611d2d4-1d07-4ccb-aba0-2885b075dd9c-images\") pod \"machine-config-operator-74547568cd-d8zgs\" (UID: \"1611d2d4-1d07-4ccb-aba0-2885b075dd9c\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-d8zgs" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.796283 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/ed625b9a-3c51-472f-9210-761f7a318acd-tmpfs\") pod \"packageserver-d55dfcdfc-459c4\" (UID: \"ed625b9a-3c51-472f-9210-761f7a318acd\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-459c4" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.797050 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/4e5fd3de-8162-4bf5-bc4f-e704b605108c-config-volume\") pod \"dns-default-f5gdp\" (UID: \"4e5fd3de-8162-4bf5-bc4f-e704b605108c\") " pod="openshift-dns/dns-default-f5gdp" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.797162 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/09ace6bf-d3f8-407b-a17f-7163f94af7c7-registration-dir\") pod \"csi-hostpathplugin-nh6jb\" (UID: \"09ace6bf-d3f8-407b-a17f-7163f94af7c7\") " pod="hostpath-provisioner/csi-hostpathplugin-nh6jb" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.798024 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/279350fe-c8bb-4389-9915-666eb8694c79-signing-cabundle\") pod \"service-ca-9c57cc56f-vbpk5\" (UID: \"279350fe-c8bb-4389-9915-666eb8694c79\") " pod="openshift-service-ca/service-ca-9c57cc56f-vbpk5" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.798245 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ed97acca-9dbf-4791-a11b-164ca4d74f55-config\") pod \"service-ca-operator-777779d784-w78d4\" (UID: \"ed97acca-9dbf-4791-a11b-164ca4d74f55\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-w78d4" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.798660 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/09ace6bf-d3f8-407b-a17f-7163f94af7c7-csi-data-dir\") pod \"csi-hostpathplugin-nh6jb\" (UID: \"09ace6bf-d3f8-407b-a17f-7163f94af7c7\") " pod="hostpath-provisioner/csi-hostpathplugin-nh6jb" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.798852 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/1611d2d4-1d07-4ccb-aba0-2885b075dd9c-auth-proxy-config\") pod \"machine-config-operator-74547568cd-d8zgs\" (UID: \"1611d2d4-1d07-4ccb-aba0-2885b075dd9c\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-d8zgs" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.799115 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wvpp9\" (UniqueName: \"kubernetes.io/projected/3c3589de-1b86-4b24-af28-d01a548fcd82-kube-api-access-wvpp9\") pod \"machine-approver-56656f9798-mdqnb\" (UID: \"3c3589de-1b86-4b24-af28-d01a548fcd82\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-mdqnb" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.799292 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5c4fd066-f527-4568-91f8-71b92b5db286-config\") pod \"kube-controller-manager-operator-78b949d7b-rsvfs\" (UID: \"5c4fd066-f527-4568-91f8-71b92b5db286\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-rsvfs" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.801682 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8dc5d9d6-b64d-494d-a6e6-917ed40c01ae-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-2n9k5\" (UID: \"8dc5d9d6-b64d-494d-a6e6-917ed40c01ae\") " pod="openshift-marketplace/marketplace-operator-79b997595-2n9k5" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.802353 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/0ca4c6e6-5e38-4798-acaf-8b2574668772-trusted-ca\") pod \"ingress-operator-5b745b69d9-jzrlt\" (UID: \"0ca4c6e6-5e38-4798-acaf-8b2574668772\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-jzrlt" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.807421 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ed625b9a-3c51-472f-9210-761f7a318acd-webhook-cert\") pod \"packageserver-d55dfcdfc-459c4\" (UID: \"ed625b9a-3c51-472f-9210-761f7a318acd\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-459c4" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.807673 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/8dc5d9d6-b64d-494d-a6e6-917ed40c01ae-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-2n9k5\" (UID: \"8dc5d9d6-b64d-494d-a6e6-917ed40c01ae\") " pod="openshift-marketplace/marketplace-operator-79b997595-2n9k5" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.812203 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/77d994ef-53f1-4ef8-a668-38226c6c460b-secret-volume\") pod \"collect-profiles-29424015-bl9jf\" (UID: \"77d994ef-53f1-4ef8-a668-38226c6c460b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29424015-bl9jf" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.819884 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/d658247d-c8dd-49dd-9372-618a42ea566d-node-bootstrap-token\") pod \"machine-config-server-dcbjb\" (UID: \"d658247d-c8dd-49dd-9372-618a42ea566d\") " pod="openshift-machine-config-operator/machine-config-server-dcbjb" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.819831 4881 generic.go:334] "Generic (PLEG): container finished" podID="1b90c971-c734-4fa4-a385-0cce5ecacbd1" containerID="4939a7dc187f962bcc8a666c835e0b3399835340c750f2ea62827f02e2c39ced" exitCode=0 Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.820257 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-jwrst" event={"ID":"1b90c971-c734-4fa4-a385-0cce5ecacbd1","Type":"ContainerDied","Data":"4939a7dc187f962bcc8a666c835e0b3399835340c750f2ea62827f02e2c39ced"} Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.820361 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-jwrst" event={"ID":"1b90c971-c734-4fa4-a385-0cce5ecacbd1","Type":"ContainerStarted","Data":"132bf96b41eb9db0423f107110517ebec47db4f7c5e8ef8032a4570b853a2ee9"} Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.820659 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ece8a23d-9b7b-4b9b-bc55-e6a1288e6d4f-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-9stj8\" (UID: \"ece8a23d-9b7b-4b9b-bc55-e6a1288e6d4f\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-9stj8" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.821373 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"certs\" (UniqueName: \"kubernetes.io/secret/d658247d-c8dd-49dd-9372-618a42ea566d-certs\") pod \"machine-config-server-dcbjb\" (UID: \"d658247d-c8dd-49dd-9372-618a42ea566d\") " pod="openshift-machine-config-operator/machine-config-server-dcbjb" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.821946 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bfsg7\" (UniqueName: \"kubernetes.io/projected/46332c07-10b7-4cd4-abcc-af9054cfb28d-kube-api-access-bfsg7\") pod \"cluster-samples-operator-665b6dd947-nhlpk\" (UID: \"46332c07-10b7-4cd4-abcc-af9054cfb28d\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-nhlpk" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.822420 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-d6tb5" event={"ID":"09b82983-c4d5-4c1f-8f41-9dcc20fbfd03","Type":"ContainerStarted","Data":"b279299bf01bc7bdacce1f90a320087c815d34f26dc2b7d78e28cb016043fc81"} Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.823742 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-69f744f599-z9p6z" event={"ID":"0b6b1eb2-a5dd-4a95-956f-a787d03f453b","Type":"ContainerStarted","Data":"55958b21c246aaf894a7b96a07fe57113b95263e997ae51a676f8267fab364fa"} Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.824860 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-tzs74" event={"ID":"26aecb96-f0ab-48d9-977c-89c3a1cf06e7","Type":"ContainerStarted","Data":"59b23d3aca9ea59668bddef3f3bf177bfb8efd905edc619bb1c006be2f960a67"} Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.824892 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-tzs74" event={"ID":"26aecb96-f0ab-48d9-977c-89c3a1cf06e7","Type":"ContainerStarted","Data":"584761de0b268886d4cb851031a3932701123aec975843b160f7d593aaefb532"} Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.825180 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/downloads-7954f5f757-tzs74" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.826876 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/a648f1f8-f077-421b-9599-95e3ef459adf-cert\") pod \"ingress-canary-kjptn\" (UID: \"a648f1f8-f077-421b-9599-95e3ef459adf\") " pod="openshift-ingress-canary/ingress-canary-kjptn" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.827045 4881 patch_prober.go:28] interesting pod/downloads-7954f5f757-tzs74 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.10:8080/\": dial tcp 10.217.0.10:8080: connect: connection refused" start-of-body= Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.827099 4881 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-tzs74" podUID="26aecb96-f0ab-48d9-977c-89c3a1cf06e7" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.10:8080/\": dial tcp 10.217.0.10:8080: connect: connection refused" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.827187 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5c4fd066-f527-4568-91f8-71b92b5db286-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-rsvfs\" (UID: \"5c4fd066-f527-4568-91f8-71b92b5db286\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-rsvfs" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.827442 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/3d0f98f5-f496-43cd-8b37-6f969af809d4-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-kjhrd\" (UID: \"3d0f98f5-f496-43cd-8b37-6f969af809d4\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-kjhrd" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.827867 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/0ca4c6e6-5e38-4798-acaf-8b2574668772-metrics-tls\") pod \"ingress-operator-5b745b69d9-jzrlt\" (UID: \"0ca4c6e6-5e38-4798-acaf-8b2574668772\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-jzrlt" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.828604 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/4e5fd3de-8162-4bf5-bc4f-e704b605108c-metrics-tls\") pod \"dns-default-f5gdp\" (UID: \"4e5fd3de-8162-4bf5-bc4f-e704b605108c\") " pod="openshift-dns/dns-default-f5gdp" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.828617 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/279350fe-c8bb-4389-9915-666eb8694c79-signing-key\") pod \"service-ca-9c57cc56f-vbpk5\" (UID: \"279350fe-c8bb-4389-9915-666eb8694c79\") " pod="openshift-service-ca/service-ca-9c57cc56f-vbpk5" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.829036 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ed97acca-9dbf-4791-a11b-164ca4d74f55-serving-cert\") pod \"service-ca-operator-777779d784-w78d4\" (UID: \"ed97acca-9dbf-4791-a11b-164ca4d74f55\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-w78d4" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.829934 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/a658d244-5927-4518-b8bb-0685d0e40a07-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-dw769\" (UID: \"a658d244-5927-4518-b8bb-0685d0e40a07\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-dw769" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.830559 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/9589dff7-1c8f-4e58-b31c-b70ec577353a-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-tgcxv\" (UID: \"9589dff7-1c8f-4e58-b31c-b70ec577353a\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-tgcxv" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.830655 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0229d25c-9b61-4e79-b8a6-47188bd5de7f-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-8p5cn\" (UID: \"0229d25c-9b61-4e79-b8a6-47188bd5de7f\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-8p5cn" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.830688 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-79b8l" event={"ID":"fbc88e77-4757-426f-9212-8e4c3d26b8e0","Type":"ContainerStarted","Data":"2529e00c18dd5a4e1e02de975a5654ba6325ce7db6af218f77c0fb1a3f457634"} Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.830711 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-79b8l" event={"ID":"fbc88e77-4757-426f-9212-8e4c3d26b8e0","Type":"ContainerStarted","Data":"a0098c4caccf1456e8cec9078f0f3ac67e557b45383e99b4108ae068dc4f527c"} Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.830724 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-558db77b4-79b8l" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.830842 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/ed625b9a-3c51-472f-9210-761f7a318acd-apiservice-cert\") pod \"packageserver-d55dfcdfc-459c4\" (UID: \"ed625b9a-3c51-472f-9210-761f7a318acd\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-459c4" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.834282 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/1611d2d4-1d07-4ccb-aba0-2885b075dd9c-proxy-tls\") pod \"machine-config-operator-74547568cd-d8zgs\" (UID: \"1611d2d4-1d07-4ccb-aba0-2885b075dd9c\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-d8zgs" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.835955 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-558db77b4-79b8l" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.836449 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t6j22\" (UniqueName: \"kubernetes.io/projected/ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1-kube-api-access-t6j22\") pod \"image-registry-697d97f7c8-zn9v8\" (UID: \"ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-zn9v8" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.842799 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2rrjf\" (UniqueName: \"kubernetes.io/projected/c8180671-37d6-4315-a47f-bdaeef223448-kube-api-access-2rrjf\") pod \"dns-operator-744455d44c-5dbn8\" (UID: \"c8180671-37d6-4315-a47f-bdaeef223448\") " pod="openshift-dns-operator/dns-operator-744455d44c-5dbn8" Dec 11 08:18:29 crc kubenswrapper[4881]: W1211 08:18:29.855377 4881 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod69071241_3547_43b1_bf14_5bb03184a08a.slice/crio-3786245efae87026ed9e3a675598e3bf1b6dd1710c99ef6a13862685addf9161 WatchSource:0}: Error finding container 3786245efae87026ed9e3a675598e3bf1b6dd1710c99ef6a13862685addf9161: Status 404 returned error can't find the container with id 3786245efae87026ed9e3a675598e3bf1b6dd1710c99ef6a13862685addf9161 Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.856478 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6nkpq\" (UniqueName: \"kubernetes.io/projected/445199dc-20f2-4401-a687-b2a126ddcdd2-kube-api-access-6nkpq\") pod \"catalog-operator-68c6474976-9srsc\" (UID: \"445199dc-20f2-4401-a687-b2a126ddcdd2\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-9srsc" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.859870 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-nhlpk" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.868219 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-mdqnb" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.869471 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rncxc\" (UniqueName: \"kubernetes.io/projected/301dbf85-d5d3-48f8-8d66-5d05a2d2d22b-kube-api-access-rncxc\") pod \"router-default-5444994796-qbkd8\" (UID: \"301dbf85-d5d3-48f8-8d66-5d05a2d2d22b\") " pod="openshift-ingress/router-default-5444994796-qbkd8" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.875630 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-58897d9998-74tpl" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.896566 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zn9v8\" (UID: \"ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-zn9v8" Dec 11 08:18:29 crc kubenswrapper[4881]: E1211 08:18:29.899080 4881 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-11 08:18:30.399066606 +0000 UTC m=+158.776435303 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zn9v8" (UID: "ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.903809 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kmfxs\" (UniqueName: \"kubernetes.io/projected/9b52d816-10ed-46d8-9421-678e446d568c-kube-api-access-kmfxs\") pod \"openshift-apiserver-operator-796bbdcf4f-w4j7x\" (UID: \"9b52d816-10ed-46d8-9421-678e446d568c\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-w4j7x" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.912770 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-744455d44c-5dbn8" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.912850 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-r546r" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.918237 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9fg6s\" (UniqueName: \"kubernetes.io/projected/899a9cd1-026b-40e1-a698-9f9f4f7ad857-kube-api-access-9fg6s\") pod \"olm-operator-6b444d44fb-qgvl4\" (UID: \"899a9cd1-026b-40e1-a698-9f9f4f7ad857\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-qgvl4" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.923023 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-9srsc" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.930099 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress/router-default-5444994796-qbkd8" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.931376 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hqczv\" (UniqueName: \"kubernetes.io/projected/d118b564-5f65-4f23-aa56-42316ba80ef0-kube-api-access-hqczv\") pod \"openshift-controller-manager-operator-756b6f6bc6-hnk5n\" (UID: \"d118b564-5f65-4f23-aa56-42316ba80ef0\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-hnk5n" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.936828 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-qgvl4" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.954360 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gdjv8\" (UniqueName: \"kubernetes.io/projected/35345934-239d-4ccb-b388-e861e51f49a6-kube-api-access-gdjv8\") pod \"etcd-operator-b45778765-r75cd\" (UID: \"35345934-239d-4ccb-b388-e861e51f49a6\") " pod="openshift-etcd-operator/etcd-operator-b45778765-r75cd" Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.974232 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-lxwz8"] Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.976758 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-7l2x6"] Dec 11 08:18:29 crc kubenswrapper[4881]: I1211 08:18:29.995543 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p7vb8\" (UniqueName: \"kubernetes.io/projected/0ca4c6e6-5e38-4798-acaf-8b2574668772-kube-api-access-p7vb8\") pod \"ingress-operator-5b745b69d9-jzrlt\" (UID: \"0ca4c6e6-5e38-4798-acaf-8b2574668772\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-jzrlt" Dec 11 08:18:30 crc kubenswrapper[4881]: I1211 08:18:30.000702 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 11 08:18:30 crc kubenswrapper[4881]: E1211 08:18:30.000893 4881 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-11 08:18:30.500864857 +0000 UTC m=+158.878233554 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 08:18:30 crc kubenswrapper[4881]: I1211 08:18:30.001133 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zn9v8\" (UID: \"ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-zn9v8" Dec 11 08:18:30 crc kubenswrapper[4881]: E1211 08:18:30.001423 4881 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-11 08:18:30.501408461 +0000 UTC m=+158.878777158 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zn9v8" (UID: "ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 08:18:30 crc kubenswrapper[4881]: W1211 08:18:30.012141 4881 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb6c9051a_b8c5_4127_9665_b07e92e60bab.slice/crio-7533dcd46af7257c044cf94da9eabdb91820f6199d501ed0980475a5fcc1dfe1 WatchSource:0}: Error finding container 7533dcd46af7257c044cf94da9eabdb91820f6199d501ed0980475a5fcc1dfe1: Status 404 returned error can't find the container with id 7533dcd46af7257c044cf94da9eabdb91820f6199d501ed0980475a5fcc1dfe1 Dec 11 08:18:30 crc kubenswrapper[4881]: I1211 08:18:30.015707 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lvnrx\" (UniqueName: \"kubernetes.io/projected/1611d2d4-1d07-4ccb-aba0-2885b075dd9c-kube-api-access-lvnrx\") pod \"machine-config-operator-74547568cd-d8zgs\" (UID: \"1611d2d4-1d07-4ccb-aba0-2885b075dd9c\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-d8zgs" Dec 11 08:18:30 crc kubenswrapper[4881]: I1211 08:18:30.027463 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vhqgd\" (UniqueName: \"kubernetes.io/projected/a658d244-5927-4518-b8bb-0685d0e40a07-kube-api-access-vhqgd\") pod \"control-plane-machine-set-operator-78cbb6b69f-dw769\" (UID: \"a658d244-5927-4518-b8bb-0685d0e40a07\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-dw769" Dec 11 08:18:30 crc kubenswrapper[4881]: I1211 08:18:30.032465 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-dw769" Dec 11 08:18:30 crc kubenswrapper[4881]: W1211 08:18:30.033377 4881 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3c3589de_1b86_4b24_af28_d01a548fcd82.slice/crio-982a2e0428cd745b2b5f6b2b6c1b60cfa03d4dfae033c104c4644c22dbef20ff WatchSource:0}: Error finding container 982a2e0428cd745b2b5f6b2b6c1b60cfa03d4dfae033c104c4644c22dbef20ff: Status 404 returned error can't find the container with id 982a2e0428cd745b2b5f6b2b6c1b60cfa03d4dfae033c104c4644c22dbef20ff Dec 11 08:18:30 crc kubenswrapper[4881]: W1211 08:18:30.036562 4881 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod43b9a8a1_a19d_4fb1_b9ca_5bc87c2dac43.slice/crio-fad81b105095d001df353f397c275d34fa6109bece5fea3419dde80f99b8ff05 WatchSource:0}: Error finding container fad81b105095d001df353f397c275d34fa6109bece5fea3419dde80f99b8ff05: Status 404 returned error can't find the container with id fad81b105095d001df353f397c275d34fa6109bece5fea3419dde80f99b8ff05 Dec 11 08:18:30 crc kubenswrapper[4881]: I1211 08:18:30.054103 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2b9p9\" (UniqueName: \"kubernetes.io/projected/ed97acca-9dbf-4791-a11b-164ca4d74f55-kube-api-access-2b9p9\") pod \"service-ca-operator-777779d784-w78d4\" (UID: \"ed97acca-9dbf-4791-a11b-164ca4d74f55\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-w78d4" Dec 11 08:18:30 crc kubenswrapper[4881]: I1211 08:18:30.058642 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-cxnkh"] Dec 11 08:18:30 crc kubenswrapper[4881]: I1211 08:18:30.070093 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4xlqz\" (UniqueName: \"kubernetes.io/projected/0229d25c-9b61-4e79-b8a6-47188bd5de7f-kube-api-access-4xlqz\") pod \"kube-storage-version-migrator-operator-b67b599dd-8p5cn\" (UID: \"0229d25c-9b61-4e79-b8a6-47188bd5de7f\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-8p5cn" Dec 11 08:18:30 crc kubenswrapper[4881]: I1211 08:18:30.091159 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-84272\" (UniqueName: \"kubernetes.io/projected/ed625b9a-3c51-472f-9210-761f7a318acd-kube-api-access-84272\") pod \"packageserver-d55dfcdfc-459c4\" (UID: \"ed625b9a-3c51-472f-9210-761f7a318acd\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-459c4" Dec 11 08:18:30 crc kubenswrapper[4881]: I1211 08:18:30.102477 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-882gs"] Dec 11 08:18:30 crc kubenswrapper[4881]: I1211 08:18:30.104443 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 11 08:18:30 crc kubenswrapper[4881]: E1211 08:18:30.104777 4881 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-11 08:18:30.60474355 +0000 UTC m=+158.982112257 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 08:18:30 crc kubenswrapper[4881]: I1211 08:18:30.105471 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zn9v8\" (UID: \"ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-zn9v8" Dec 11 08:18:30 crc kubenswrapper[4881]: E1211 08:18:30.107625 4881 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-11 08:18:30.607607691 +0000 UTC m=+158.984976388 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zn9v8" (UID: "ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 08:18:30 crc kubenswrapper[4881]: I1211 08:18:30.115283 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d5gm2\" (UniqueName: \"kubernetes.io/projected/390d1287-ab65-447e-93a0-44beb7ec9a84-kube-api-access-d5gm2\") pod \"migrator-59844c95c7-9jj8n\" (UID: \"390d1287-ab65-447e-93a0-44beb7ec9a84\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-9jj8n" Dec 11 08:18:30 crc kubenswrapper[4881]: I1211 08:18:30.128257 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-hnk5n" Dec 11 08:18:30 crc kubenswrapper[4881]: I1211 08:18:30.160130 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-nhlpk"] Dec 11 08:18:30 crc kubenswrapper[4881]: I1211 08:18:30.163328 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gbxdj\" (UniqueName: \"kubernetes.io/projected/9589dff7-1c8f-4e58-b31c-b70ec577353a-kube-api-access-gbxdj\") pod \"package-server-manager-789f6589d5-tgcxv\" (UID: \"9589dff7-1c8f-4e58-b31c-b70ec577353a\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-tgcxv" Dec 11 08:18:30 crc kubenswrapper[4881]: I1211 08:18:30.167397 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-phj6p\" (UniqueName: \"kubernetes.io/projected/a648f1f8-f077-421b-9599-95e3ef459adf-kube-api-access-phj6p\") pod \"ingress-canary-kjptn\" (UID: \"a648f1f8-f077-421b-9599-95e3ef459adf\") " pod="openshift-ingress-canary/ingress-canary-kjptn" Dec 11 08:18:30 crc kubenswrapper[4881]: I1211 08:18:30.189405 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sx96d\" (UniqueName: \"kubernetes.io/projected/279350fe-c8bb-4389-9915-666eb8694c79-kube-api-access-sx96d\") pod \"service-ca-9c57cc56f-vbpk5\" (UID: \"279350fe-c8bb-4389-9915-666eb8694c79\") " pod="openshift-service-ca/service-ca-9c57cc56f-vbpk5" Dec 11 08:18:30 crc kubenswrapper[4881]: I1211 08:18:30.189665 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-w4j7x" Dec 11 08:18:30 crc kubenswrapper[4881]: I1211 08:18:30.192039 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c8qwh\" (UniqueName: \"kubernetes.io/projected/77d994ef-53f1-4ef8-a668-38226c6c460b-kube-api-access-c8qwh\") pod \"collect-profiles-29424015-bl9jf\" (UID: \"77d994ef-53f1-4ef8-a668-38226c6c460b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29424015-bl9jf" Dec 11 08:18:30 crc kubenswrapper[4881]: I1211 08:18:30.195821 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-b45778765-r75cd" Dec 11 08:18:30 crc kubenswrapper[4881]: I1211 08:18:30.207069 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 11 08:18:30 crc kubenswrapper[4881]: E1211 08:18:30.207586 4881 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-11 08:18:30.707562686 +0000 UTC m=+159.084931403 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 08:18:30 crc kubenswrapper[4881]: I1211 08:18:30.222166 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bcl8k\" (UniqueName: \"kubernetes.io/projected/09ace6bf-d3f8-407b-a17f-7163f94af7c7-kube-api-access-bcl8k\") pod \"csi-hostpathplugin-nh6jb\" (UID: \"09ace6bf-d3f8-407b-a17f-7163f94af7c7\") " pod="hostpath-provisioner/csi-hostpathplugin-nh6jb" Dec 11 08:18:30 crc kubenswrapper[4881]: I1211 08:18:30.232110 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/ece8a23d-9b7b-4b9b-bc55-e6a1288e6d4f-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-9stj8\" (UID: \"ece8a23d-9b7b-4b9b-bc55-e6a1288e6d4f\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-9stj8" Dec 11 08:18:30 crc kubenswrapper[4881]: I1211 08:18:30.253288 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-td2hv"] Dec 11 08:18:30 crc kubenswrapper[4881]: I1211 08:18:30.263173 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/0ca4c6e6-5e38-4798-acaf-8b2574668772-bound-sa-token\") pod \"ingress-operator-5b745b69d9-jzrlt\" (UID: \"0ca4c6e6-5e38-4798-acaf-8b2574668772\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-jzrlt" Dec 11 08:18:30 crc kubenswrapper[4881]: I1211 08:18:30.263431 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-jzrlt" Dec 11 08:18:30 crc kubenswrapper[4881]: I1211 08:18:30.263848 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-d8zgs" Dec 11 08:18:30 crc kubenswrapper[4881]: I1211 08:18:30.278827 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-777779d784-w78d4" Dec 11 08:18:30 crc kubenswrapper[4881]: I1211 08:18:30.287474 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mz66x\" (UniqueName: \"kubernetes.io/projected/d658247d-c8dd-49dd-9372-618a42ea566d-kube-api-access-mz66x\") pod \"machine-config-server-dcbjb\" (UID: \"d658247d-c8dd-49dd-9372-618a42ea566d\") " pod="openshift-machine-config-operator/machine-config-server-dcbjb" Dec 11 08:18:30 crc kubenswrapper[4881]: I1211 08:18:30.295170 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-9stj8" Dec 11 08:18:30 crc kubenswrapper[4881]: I1211 08:18:30.301013 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-tgcxv" Dec 11 08:18:30 crc kubenswrapper[4881]: I1211 08:18:30.309955 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-8p5cn" Dec 11 08:18:30 crc kubenswrapper[4881]: I1211 08:18:30.310040 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zn9v8\" (UID: \"ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-zn9v8" Dec 11 08:18:30 crc kubenswrapper[4881]: E1211 08:18:30.310576 4881 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-11 08:18:30.810558188 +0000 UTC m=+159.187926885 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zn9v8" (UID: "ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 08:18:30 crc kubenswrapper[4881]: I1211 08:18:30.316826 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-r546r"] Dec 11 08:18:30 crc kubenswrapper[4881]: I1211 08:18:30.319106 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29424015-bl9jf" Dec 11 08:18:30 crc kubenswrapper[4881]: W1211 08:18:30.338259 4881 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod8ecef833_b914_464d_a395_49bb7f66a180.slice/crio-fd2140536e87d1c3f486377a68b3a6e5a860819be2b4c4f0daf71fe60c3ad9b1 WatchSource:0}: Error finding container fd2140536e87d1c3f486377a68b3a6e5a860819be2b4c4f0daf71fe60c3ad9b1: Status 404 returned error can't find the container with id fd2140536e87d1c3f486377a68b3a6e5a860819be2b4c4f0daf71fe60c3ad9b1 Dec 11 08:18:30 crc kubenswrapper[4881]: I1211 08:18:30.341607 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-459c4" Dec 11 08:18:30 crc kubenswrapper[4881]: I1211 08:18:30.349615 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-9jj8n" Dec 11 08:18:30 crc kubenswrapper[4881]: I1211 08:18:30.351670 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-5dbn8"] Dec 11 08:18:30 crc kubenswrapper[4881]: I1211 08:18:30.352990 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ccndv\" (UniqueName: \"kubernetes.io/projected/3d0f98f5-f496-43cd-8b37-6f969af809d4-kube-api-access-ccndv\") pod \"multus-admission-controller-857f4d67dd-kjhrd\" (UID: \"3d0f98f5-f496-43cd-8b37-6f969af809d4\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-kjhrd" Dec 11 08:18:30 crc kubenswrapper[4881]: I1211 08:18:30.353184 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rs7vt\" (UniqueName: \"kubernetes.io/projected/4e5fd3de-8162-4bf5-bc4f-e704b605108c-kube-api-access-rs7vt\") pod \"dns-default-f5gdp\" (UID: \"4e5fd3de-8162-4bf5-bc4f-e704b605108c\") " pod="openshift-dns/dns-default-f5gdp" Dec 11 08:18:30 crc kubenswrapper[4881]: I1211 08:18:30.358050 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-9c57cc56f-vbpk5" Dec 11 08:18:30 crc kubenswrapper[4881]: I1211 08:18:30.358991 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/5c4fd066-f527-4568-91f8-71b92b5db286-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-rsvfs\" (UID: \"5c4fd066-f527-4568-91f8-71b92b5db286\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-rsvfs" Dec 11 08:18:30 crc kubenswrapper[4881]: I1211 08:18:30.365912 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-kjptn" Dec 11 08:18:30 crc kubenswrapper[4881]: I1211 08:18:30.376797 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bh4l2\" (UniqueName: \"kubernetes.io/projected/8dc5d9d6-b64d-494d-a6e6-917ed40c01ae-kube-api-access-bh4l2\") pod \"marketplace-operator-79b997595-2n9k5\" (UID: \"8dc5d9d6-b64d-494d-a6e6-917ed40c01ae\") " pod="openshift-marketplace/marketplace-operator-79b997595-2n9k5" Dec 11 08:18:30 crc kubenswrapper[4881]: I1211 08:18:30.384747 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-nh6jb" Dec 11 08:18:30 crc kubenswrapper[4881]: I1211 08:18:30.393035 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-f5gdp" Dec 11 08:18:30 crc kubenswrapper[4881]: I1211 08:18:30.396690 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-9srsc"] Dec 11 08:18:30 crc kubenswrapper[4881]: I1211 08:18:30.399063 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-server-dcbjb" Dec 11 08:18:30 crc kubenswrapper[4881]: I1211 08:18:30.415359 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 11 08:18:30 crc kubenswrapper[4881]: E1211 08:18:30.415833 4881 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-11 08:18:30.915814194 +0000 UTC m=+159.293182891 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 08:18:30 crc kubenswrapper[4881]: W1211 08:18:30.437324 4881 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc8180671_37d6_4315_a47f_bdaeef223448.slice/crio-d437c08927477d40962dad0e1ce98481e5f6ceae3cc1d37dd3a89b9c7901de28 WatchSource:0}: Error finding container d437c08927477d40962dad0e1ce98481e5f6ceae3cc1d37dd3a89b9c7901de28: Status 404 returned error can't find the container with id d437c08927477d40962dad0e1ce98481e5f6ceae3cc1d37dd3a89b9c7901de28 Dec 11 08:18:30 crc kubenswrapper[4881]: E1211 08:18:30.519488 4881 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-11 08:18:31.019470992 +0000 UTC m=+159.396839689 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zn9v8" (UID: "ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 08:18:30 crc kubenswrapper[4881]: I1211 08:18:30.519033 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zn9v8\" (UID: \"ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-zn9v8" Dec 11 08:18:30 crc kubenswrapper[4881]: I1211 08:18:30.521025 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console-operator/console-operator-58897d9998-74tpl"] Dec 11 08:18:30 crc kubenswrapper[4881]: I1211 08:18:30.534562 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-qgvl4"] Dec 11 08:18:30 crc kubenswrapper[4881]: I1211 08:18:30.572589 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-857f4d67dd-kjhrd" Dec 11 08:18:30 crc kubenswrapper[4881]: I1211 08:18:30.587650 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-rsvfs" Dec 11 08:18:30 crc kubenswrapper[4881]: I1211 08:18:30.618189 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-dw769"] Dec 11 08:18:30 crc kubenswrapper[4881]: I1211 08:18:30.622643 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 11 08:18:30 crc kubenswrapper[4881]: E1211 08:18:30.623704 4881 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-11 08:18:31.123654801 +0000 UTC m=+159.501023508 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 08:18:30 crc kubenswrapper[4881]: I1211 08:18:30.625074 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-2n9k5" Dec 11 08:18:30 crc kubenswrapper[4881]: I1211 08:18:30.638070 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-hnk5n"] Dec 11 08:18:30 crc kubenswrapper[4881]: W1211 08:18:30.707444 4881 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod27875db6_80f0_4378_ba4d_56df53952813.slice/crio-96cd43a3e3fbbf367ca4554208356f41cb62f74f70432dcaa6f1302e46fd4c81 WatchSource:0}: Error finding container 96cd43a3e3fbbf367ca4554208356f41cb62f74f70432dcaa6f1302e46fd4c81: Status 404 returned error can't find the container with id 96cd43a3e3fbbf367ca4554208356f41cb62f74f70432dcaa6f1302e46fd4c81 Dec 11 08:18:30 crc kubenswrapper[4881]: W1211 08:18:30.717685 4881 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda658d244_5927_4518_b8bb_0685d0e40a07.slice/crio-c82ed48bfc9711fea1b22a7d0f5c4e2bf08b459529f713f61a1a4cdc39c8dbe2 WatchSource:0}: Error finding container c82ed48bfc9711fea1b22a7d0f5c4e2bf08b459529f713f61a1a4cdc39c8dbe2: Status 404 returned error can't find the container with id c82ed48bfc9711fea1b22a7d0f5c4e2bf08b459529f713f61a1a4cdc39c8dbe2 Dec 11 08:18:30 crc kubenswrapper[4881]: I1211 08:18:30.726175 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zn9v8\" (UID: \"ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-zn9v8" Dec 11 08:18:30 crc kubenswrapper[4881]: E1211 08:18:30.727273 4881 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-11 08:18:31.227253517 +0000 UTC m=+159.604622214 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zn9v8" (UID: "ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 08:18:30 crc kubenswrapper[4881]: I1211 08:18:30.728222 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-w4j7x"] Dec 11 08:18:30 crc kubenswrapper[4881]: W1211 08:18:30.741752 4881 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd118b564_5f65_4f23_aa56_42316ba80ef0.slice/crio-722e42cb42158426c8a834724d2be5c462646682360a73af9d9b7a3c2d80ecfd WatchSource:0}: Error finding container 722e42cb42158426c8a834724d2be5c462646682360a73af9d9b7a3c2d80ecfd: Status 404 returned error can't find the container with id 722e42cb42158426c8a834724d2be5c462646682360a73af9d9b7a3c2d80ecfd Dec 11 08:18:30 crc kubenswrapper[4881]: E1211 08:18:30.828374 4881 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-11 08:18:31.32832063 +0000 UTC m=+159.705689327 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 08:18:30 crc kubenswrapper[4881]: I1211 08:18:30.829929 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 11 08:18:30 crc kubenswrapper[4881]: I1211 08:18:30.830179 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zn9v8\" (UID: \"ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-zn9v8" Dec 11 08:18:30 crc kubenswrapper[4881]: E1211 08:18:30.830548 4881 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-11 08:18:31.330531595 +0000 UTC m=+159.707900292 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zn9v8" (UID: "ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 08:18:30 crc kubenswrapper[4881]: I1211 08:18:30.843803 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-qgvl4" event={"ID":"899a9cd1-026b-40e1-a698-9f9f4f7ad857","Type":"ContainerStarted","Data":"cea522303acc06bab8730e18e6f4e685c71d8035d744abbc2e62b3e3f9717df2"} Dec 11 08:18:30 crc kubenswrapper[4881]: I1211 08:18:30.846864 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-lxwz8" event={"ID":"43b9a8a1-a19d-4fb1-b9ca-5bc87c2dac43","Type":"ContainerStarted","Data":"fad81b105095d001df353f397c275d34fa6109bece5fea3419dde80f99b8ff05"} Dec 11 08:18:30 crc kubenswrapper[4881]: I1211 08:18:30.851120 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-jwrst" event={"ID":"1b90c971-c734-4fa4-a385-0cce5ecacbd1","Type":"ContainerStarted","Data":"efe6db2a9ee93c5278fdc7885515a5849ffda2353b0d8c2107d75ae8ee78d2a8"} Dec 11 08:18:30 crc kubenswrapper[4881]: I1211 08:18:30.858970 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-58897d9998-74tpl" event={"ID":"27875db6-80f0-4378-ba4d-56df53952813","Type":"ContainerStarted","Data":"96cd43a3e3fbbf367ca4554208356f41cb62f74f70432dcaa6f1302e46fd4c81"} Dec 11 08:18:30 crc kubenswrapper[4881]: I1211 08:18:30.860704 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-nhlpk" event={"ID":"46332c07-10b7-4cd4-abcc-af9054cfb28d","Type":"ContainerStarted","Data":"828b83ad70f61d0f6383bc9d957ab9f2ed1d862688b2c76510b42217ad0e4124"} Dec 11 08:18:30 crc kubenswrapper[4881]: I1211 08:18:30.867848 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-hnk5n" event={"ID":"d118b564-5f65-4f23-aa56-42316ba80ef0","Type":"ContainerStarted","Data":"722e42cb42158426c8a834724d2be5c462646682360a73af9d9b7a3c2d80ecfd"} Dec 11 08:18:30 crc kubenswrapper[4881]: I1211 08:18:30.875816 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-w4j7x" event={"ID":"9b52d816-10ed-46d8-9421-678e446d568c","Type":"ContainerStarted","Data":"f5ca883d4dcc75dd82ed1ec679581154b09c0d36009b01f47025b079ad4f1493"} Dec 11 08:18:30 crc kubenswrapper[4881]: I1211 08:18:30.888660 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-r546r" event={"ID":"cb02b71d-7677-477a-8068-c687ebb146ee","Type":"ContainerStarted","Data":"2db3f862aba781656d5a2c8d6646bbd9c1f2143b09134536f394a5931a630a4c"} Dec 11 08:18:30 crc kubenswrapper[4881]: I1211 08:18:30.894888 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-9srsc" event={"ID":"445199dc-20f2-4401-a687-b2a126ddcdd2","Type":"ContainerStarted","Data":"f19aec233004c0dbb38981494881db5cf115b41ad306bf3a1af7ab09ed0c09d5"} Dec 11 08:18:30 crc kubenswrapper[4881]: I1211 08:18:30.903311 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-cxnkh" event={"ID":"79e6349a-afe4-412b-8d64-f0875a38ccf2","Type":"ContainerStarted","Data":"d2490ecd9f106adad1d6eda903f4ac9f0f89706fa40502516a0bca8ae8674182"} Dec 11 08:18:30 crc kubenswrapper[4881]: I1211 08:18:30.909291 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-td2hv" event={"ID":"8ecef833-b914-464d-a395-49bb7f66a180","Type":"ContainerStarted","Data":"fd2140536e87d1c3f486377a68b3a6e5a860819be2b4c4f0daf71fe60c3ad9b1"} Dec 11 08:18:30 crc kubenswrapper[4881]: I1211 08:18:30.911720 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5444994796-qbkd8" event={"ID":"301dbf85-d5d3-48f8-8d66-5d05a2d2d22b","Type":"ContainerStarted","Data":"9fa13e4fd728fc0b9558d66a2bd11057eed6877c606cdecaf44676f5994b1afb"} Dec 11 08:18:30 crc kubenswrapper[4881]: I1211 08:18:30.915897 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-7l2x6" event={"ID":"b6c9051a-b8c5-4127-9665-b07e92e60bab","Type":"ContainerStarted","Data":"d302faa8fbe474921cb24b12f3c818ea4633118c4776f7ca23f3599d1beb8871"} Dec 11 08:18:30 crc kubenswrapper[4881]: I1211 08:18:30.915938 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-7l2x6" event={"ID":"b6c9051a-b8c5-4127-9665-b07e92e60bab","Type":"ContainerStarted","Data":"7533dcd46af7257c044cf94da9eabdb91820f6199d501ed0980475a5fcc1dfe1"} Dec 11 08:18:30 crc kubenswrapper[4881]: I1211 08:18:30.933855 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-dw769" event={"ID":"a658d244-5927-4518-b8bb-0685d0e40a07","Type":"ContainerStarted","Data":"c82ed48bfc9711fea1b22a7d0f5c4e2bf08b459529f713f61a1a4cdc39c8dbe2"} Dec 11 08:18:30 crc kubenswrapper[4881]: I1211 08:18:30.934594 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 11 08:18:30 crc kubenswrapper[4881]: E1211 08:18:30.934858 4881 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-11 08:18:31.434838128 +0000 UTC m=+159.812206825 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 08:18:30 crc kubenswrapper[4881]: I1211 08:18:30.935151 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zn9v8\" (UID: \"ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-zn9v8" Dec 11 08:18:30 crc kubenswrapper[4881]: E1211 08:18:30.935411 4881 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-11 08:18:31.435402963 +0000 UTC m=+159.812771660 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zn9v8" (UID: "ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 08:18:30 crc kubenswrapper[4881]: I1211 08:18:30.974399 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-mdqnb" event={"ID":"3c3589de-1b86-4b24-af28-d01a548fcd82","Type":"ContainerStarted","Data":"b1e824f624123b7998e1b685fbbcc45bfc7e79818e4a32c8e3f76a158d3732b6"} Dec 11 08:18:30 crc kubenswrapper[4881]: I1211 08:18:30.974452 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-mdqnb" event={"ID":"3c3589de-1b86-4b24-af28-d01a548fcd82","Type":"ContainerStarted","Data":"982a2e0428cd745b2b5f6b2b6c1b60cfa03d4dfae033c104c4644c22dbef20ff"} Dec 11 08:18:30 crc kubenswrapper[4881]: I1211 08:18:30.996560 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-882gs" event={"ID":"317a7f01-2747-4994-8000-54613f522149","Type":"ContainerStarted","Data":"58d7d7483976135ae4372f256517c1db5701fd4c7db66c21c7c91467bcfdb9c1"} Dec 11 08:18:30 crc kubenswrapper[4881]: I1211 08:18:30.996608 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-882gs" event={"ID":"317a7f01-2747-4994-8000-54613f522149","Type":"ContainerStarted","Data":"edfd292e6ad31da44c6945dc5412228428f94067ec0fbf0eddce1c0b98023e6b"} Dec 11 08:18:31 crc kubenswrapper[4881]: I1211 08:18:30.999202 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-882gs" Dec 11 08:18:31 crc kubenswrapper[4881]: I1211 08:18:31.001653 4881 patch_prober.go:28] interesting pod/route-controller-manager-6576b87f9c-882gs container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.8:8443/healthz\": dial tcp 10.217.0.8:8443: connect: connection refused" start-of-body= Dec 11 08:18:31 crc kubenswrapper[4881]: I1211 08:18:31.001695 4881 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-882gs" podUID="317a7f01-2747-4994-8000-54613f522149" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.8:8443/healthz\": dial tcp 10.217.0.8:8443: connect: connection refused" Dec 11 08:18:31 crc kubenswrapper[4881]: I1211 08:18:31.006183 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-5dbn8" event={"ID":"c8180671-37d6-4315-a47f-bdaeef223448","Type":"ContainerStarted","Data":"d437c08927477d40962dad0e1ce98481e5f6ceae3cc1d37dd3a89b9c7901de28"} Dec 11 08:18:31 crc kubenswrapper[4881]: I1211 08:18:31.017064 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/downloads-7954f5f757-tzs74" podStartSLOduration=131.017035976 podStartE2EDuration="2m11.017035976s" podCreationTimestamp="2025-12-11 08:16:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 08:18:30.97624923 +0000 UTC m=+159.353617937" watchObservedRunningTime="2025-12-11 08:18:31.017035976 +0000 UTC m=+159.394404673" Dec 11 08:18:31 crc kubenswrapper[4881]: I1211 08:18:31.030707 4881 patch_prober.go:28] interesting pod/downloads-7954f5f757-tzs74 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.10:8080/\": dial tcp 10.217.0.10:8080: connect: connection refused" start-of-body= Dec 11 08:18:31 crc kubenswrapper[4881]: I1211 08:18:31.030772 4881 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-tzs74" podUID="26aecb96-f0ab-48d9-977c-89c3a1cf06e7" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.10:8080/\": dial tcp 10.217.0.10:8080: connect: connection refused" Dec 11 08:18:31 crc kubenswrapper[4881]: I1211 08:18:31.040390 4881 patch_prober.go:28] interesting pod/controller-manager-879f6c89f-zl8wv container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.11:8443/healthz\": dial tcp 10.217.0.11:8443: connect: connection refused" start-of-body= Dec 11 08:18:31 crc kubenswrapper[4881]: I1211 08:18:31.040439 4881 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-879f6c89f-zl8wv" podUID="69071241-3547-43b1-bf14-5bb03184a08a" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.11:8443/healthz\": dial tcp 10.217.0.11:8443: connect: connection refused" Dec 11 08:18:31 crc kubenswrapper[4881]: I1211 08:18:31.042568 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 11 08:18:31 crc kubenswrapper[4881]: I1211 08:18:31.047988 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-zl8wv" event={"ID":"69071241-3547-43b1-bf14-5bb03184a08a","Type":"ContainerStarted","Data":"70ff0aaec8f648b2dc4d291830b5d486b1802873c485f4f53ac14e413786f6c1"} Dec 11 08:18:31 crc kubenswrapper[4881]: I1211 08:18:31.048029 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-zl8wv" event={"ID":"69071241-3547-43b1-bf14-5bb03184a08a","Type":"ContainerStarted","Data":"3786245efae87026ed9e3a675598e3bf1b6dd1710c99ef6a13862685addf9161"} Dec 11 08:18:31 crc kubenswrapper[4881]: I1211 08:18:31.048064 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-879f6c89f-zl8wv" Dec 11 08:18:31 crc kubenswrapper[4881]: E1211 08:18:31.049593 4881 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-11 08:18:31.549568979 +0000 UTC m=+159.926937666 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 08:18:31 crc kubenswrapper[4881]: I1211 08:18:31.062657 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication-operator/authentication-operator-69f744f599-z9p6z" podStartSLOduration=131.062621401 podStartE2EDuration="2m11.062621401s" podCreationTimestamp="2025-12-11 08:16:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 08:18:31.012495484 +0000 UTC m=+159.389864201" watchObservedRunningTime="2025-12-11 08:18:31.062621401 +0000 UTC m=+159.439990098" Dec 11 08:18:31 crc kubenswrapper[4881]: I1211 08:18:31.085287 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-r75cd"] Dec 11 08:18:31 crc kubenswrapper[4881]: I1211 08:18:31.148415 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zn9v8\" (UID: \"ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-zn9v8" Dec 11 08:18:31 crc kubenswrapper[4881]: E1211 08:18:31.148744 4881 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-11 08:18:31.648728615 +0000 UTC m=+160.026097312 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zn9v8" (UID: "ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 08:18:31 crc kubenswrapper[4881]: I1211 08:18:31.254422 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 11 08:18:31 crc kubenswrapper[4881]: E1211 08:18:31.255063 4881 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-11 08:18:31.755036598 +0000 UTC m=+160.132405295 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 08:18:31 crc kubenswrapper[4881]: I1211 08:18:31.356880 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zn9v8\" (UID: \"ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-zn9v8" Dec 11 08:18:31 crc kubenswrapper[4881]: E1211 08:18:31.358594 4881 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-11 08:18:31.858573392 +0000 UTC m=+160.235942089 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zn9v8" (UID: "ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 08:18:31 crc kubenswrapper[4881]: I1211 08:18:31.359711 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-w78d4"] Dec 11 08:18:31 crc kubenswrapper[4881]: I1211 08:18:31.459353 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 11 08:18:31 crc kubenswrapper[4881]: E1211 08:18:31.459697 4881 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-11 08:18:31.959680746 +0000 UTC m=+160.337049443 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 08:18:31 crc kubenswrapper[4881]: I1211 08:18:31.562975 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zn9v8\" (UID: \"ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-zn9v8" Dec 11 08:18:31 crc kubenswrapper[4881]: E1211 08:18:31.564991 4881 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-11 08:18:32.064970484 +0000 UTC m=+160.442339181 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zn9v8" (UID: "ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 08:18:31 crc kubenswrapper[4881]: I1211 08:18:31.654112 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication/oauth-openshift-558db77b4-79b8l" podStartSLOduration=131.654093073 podStartE2EDuration="2m11.654093073s" podCreationTimestamp="2025-12-11 08:16:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 08:18:31.653653042 +0000 UTC m=+160.031021749" watchObservedRunningTime="2025-12-11 08:18:31.654093073 +0000 UTC m=+160.031461780" Dec 11 08:18:31 crc kubenswrapper[4881]: I1211 08:18:31.667375 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 11 08:18:31 crc kubenswrapper[4881]: E1211 08:18:31.667544 4881 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-11 08:18:32.167518324 +0000 UTC m=+160.544887021 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 08:18:31 crc kubenswrapper[4881]: I1211 08:18:31.668237 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zn9v8\" (UID: \"ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-zn9v8" Dec 11 08:18:31 crc kubenswrapper[4881]: E1211 08:18:31.668698 4881 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-11 08:18:32.168682243 +0000 UTC m=+160.546050940 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zn9v8" (UID: "ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 08:18:31 crc kubenswrapper[4881]: I1211 08:18:31.769360 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 11 08:18:31 crc kubenswrapper[4881]: E1211 08:18:31.769689 4881 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-11 08:18:32.269672265 +0000 UTC m=+160.647040952 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 08:18:31 crc kubenswrapper[4881]: I1211 08:18:31.871127 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zn9v8\" (UID: \"ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-zn9v8" Dec 11 08:18:31 crc kubenswrapper[4881]: E1211 08:18:31.871501 4881 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-11 08:18:32.371489616 +0000 UTC m=+160.748858313 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zn9v8" (UID: "ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 08:18:31 crc kubenswrapper[4881]: I1211 08:18:31.936167 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-api/machine-api-operator-5694c8668f-vk42n" podStartSLOduration=130.936116031 podStartE2EDuration="2m10.936116031s" podCreationTimestamp="2025-12-11 08:16:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 08:18:31.935566707 +0000 UTC m=+160.312935414" watchObservedRunningTime="2025-12-11 08:18:31.936116031 +0000 UTC m=+160.313484728" Dec 11 08:18:31 crc kubenswrapper[4881]: I1211 08:18:31.972207 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 11 08:18:31 crc kubenswrapper[4881]: E1211 08:18:31.972380 4881 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-11 08:18:32.472354404 +0000 UTC m=+160.849723101 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 08:18:31 crc kubenswrapper[4881]: I1211 08:18:31.972528 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zn9v8\" (UID: \"ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-zn9v8" Dec 11 08:18:31 crc kubenswrapper[4881]: E1211 08:18:31.972815 4881 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-11 08:18:32.472802696 +0000 UTC m=+160.850171393 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zn9v8" (UID: "ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 08:18:32 crc kubenswrapper[4881]: I1211 08:18:32.009003 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-f9d7485db-d6tb5" podStartSLOduration=132.008979749 podStartE2EDuration="2m12.008979749s" podCreationTimestamp="2025-12-11 08:16:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 08:18:31.970754265 +0000 UTC m=+160.348122972" watchObservedRunningTime="2025-12-11 08:18:32.008979749 +0000 UTC m=+160.386348446" Dec 11 08:18:32 crc kubenswrapper[4881]: I1211 08:18:32.075690 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 11 08:18:32 crc kubenswrapper[4881]: E1211 08:18:32.076322 4881 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-11 08:18:32.57630501 +0000 UTC m=+160.953673707 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 08:18:32 crc kubenswrapper[4881]: I1211 08:18:32.088721 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-7l2x6" event={"ID":"b6c9051a-b8c5-4127-9665-b07e92e60bab","Type":"ContainerStarted","Data":"7ad44b10d9c19b2eade5b9f31ab34e1382493c174c9276eb8eaa57e3583797c2"} Dec 11 08:18:32 crc kubenswrapper[4881]: I1211 08:18:32.145981 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-vbpk5"] Dec 11 08:18:32 crc kubenswrapper[4881]: I1211 08:18:32.167156 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-lxwz8" event={"ID":"43b9a8a1-a19d-4fb1-b9ca-5bc87c2dac43","Type":"ContainerStarted","Data":"7e9d00ec2bf399f2f708fb8f53ea67ceb3f2d57e34ce5e366d816d0786eba063"} Dec 11 08:18:32 crc kubenswrapper[4881]: I1211 08:18:32.180421 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zn9v8\" (UID: \"ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-zn9v8" Dec 11 08:18:32 crc kubenswrapper[4881]: E1211 08:18:32.180779 4881 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-11 08:18:32.680766686 +0000 UTC m=+161.058135383 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zn9v8" (UID: "ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 08:18:32 crc kubenswrapper[4881]: I1211 08:18:32.183004 4881 generic.go:334] "Generic (PLEG): container finished" podID="8ecef833-b914-464d-a395-49bb7f66a180" containerID="bb14e121c74474f61827f79a79a809a619ca8dc136167d75457820e043c82e93" exitCode=0 Dec 11 08:18:32 crc kubenswrapper[4881]: I1211 08:18:32.183364 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-td2hv" event={"ID":"8ecef833-b914-464d-a395-49bb7f66a180","Type":"ContainerDied","Data":"bb14e121c74474f61827f79a79a809a619ca8dc136167d75457820e043c82e93"} Dec 11 08:18:32 crc kubenswrapper[4881]: I1211 08:18:32.185736 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5444994796-qbkd8" event={"ID":"301dbf85-d5d3-48f8-8d66-5d05a2d2d22b","Type":"ContainerStarted","Data":"72ab775fd966d543519d2dc1b97b127cd011df48ddf297dab21629cada323a00"} Dec 11 08:18:32 crc kubenswrapper[4881]: I1211 08:18:32.188568 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29424015-bl9jf"] Dec 11 08:18:32 crc kubenswrapper[4881]: I1211 08:18:32.196349 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-2n9k5"] Dec 11 08:18:32 crc kubenswrapper[4881]: I1211 08:18:32.209754 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-d8zgs"] Dec 11 08:18:32 crc kubenswrapper[4881]: W1211 08:18:32.215180 4881 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod1611d2d4_1d07_4ccb_aba0_2885b075dd9c.slice/crio-90302804b8e0d4339062709c861667df992a16b478c9b517decf6d24d72195c7 WatchSource:0}: Error finding container 90302804b8e0d4339062709c861667df992a16b478c9b517decf6d24d72195c7: Status 404 returned error can't find the container with id 90302804b8e0d4339062709c861667df992a16b478c9b517decf6d24d72195c7 Dec 11 08:18:32 crc kubenswrapper[4881]: I1211 08:18:32.225409 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-882gs" podStartSLOduration=131.225390127 podStartE2EDuration="2m11.225390127s" podCreationTimestamp="2025-12-11 08:16:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 08:18:32.2170152 +0000 UTC m=+160.594383897" watchObservedRunningTime="2025-12-11 08:18:32.225390127 +0000 UTC m=+160.602758824" Dec 11 08:18:32 crc kubenswrapper[4881]: W1211 08:18:32.230848 4881 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod8dc5d9d6_b64d_494d_a6e6_917ed40c01ae.slice/crio-36a1d3860b1d213616d461a6d73e90db85c93508eb35e714b061c4fbafbcf49a WatchSource:0}: Error finding container 36a1d3860b1d213616d461a6d73e90db85c93508eb35e714b061c4fbafbcf49a: Status 404 returned error can't find the container with id 36a1d3860b1d213616d461a6d73e90db85c93508eb35e714b061c4fbafbcf49a Dec 11 08:18:32 crc kubenswrapper[4881]: I1211 08:18:32.285313 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 11 08:18:32 crc kubenswrapper[4881]: E1211 08:18:32.285538 4881 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-11 08:18:32.78550972 +0000 UTC m=+161.162878437 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 08:18:32 crc kubenswrapper[4881]: I1211 08:18:32.286157 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zn9v8\" (UID: \"ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-zn9v8" Dec 11 08:18:32 crc kubenswrapper[4881]: E1211 08:18:32.289302 4881 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-11 08:18:32.789288344 +0000 UTC m=+161.166657041 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zn9v8" (UID: "ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 08:18:32 crc kubenswrapper[4881]: W1211 08:18:32.298272 4881 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod77d994ef_53f1_4ef8_a668_38226c6c460b.slice/crio-35cc20635c26a0876dca7c895f441e5d6db02b033c3957b9fa19ceddbdf072ab WatchSource:0}: Error finding container 35cc20635c26a0876dca7c895f441e5d6db02b033c3957b9fa19ceddbdf072ab: Status 404 returned error can't find the container with id 35cc20635c26a0876dca7c895f441e5d6db02b033c3957b9fa19ceddbdf072ab Dec 11 08:18:32 crc kubenswrapper[4881]: I1211 08:18:32.321606 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-server-dcbjb" event={"ID":"d658247d-c8dd-49dd-9372-618a42ea566d","Type":"ContainerStarted","Data":"198f8f917b0d6b72b818fad83adb92cf69f0f9c22fe6764df86e033549770dff"} Dec 11 08:18:32 crc kubenswrapper[4881]: I1211 08:18:32.391458 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 11 08:18:32 crc kubenswrapper[4881]: E1211 08:18:32.391896 4881 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-11 08:18:32.891881984 +0000 UTC m=+161.269250681 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 08:18:32 crc kubenswrapper[4881]: I1211 08:18:32.438191 4881 generic.go:334] "Generic (PLEG): container finished" podID="79e6349a-afe4-412b-8d64-f0875a38ccf2" containerID="2260256f753932a6c86feec9b32846df774209108a484e62a0d599d3810a216a" exitCode=0 Dec 11 08:18:32 crc kubenswrapper[4881]: I1211 08:18:32.438313 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-cxnkh" event={"ID":"79e6349a-afe4-412b-8d64-f0875a38ccf2","Type":"ContainerDied","Data":"2260256f753932a6c86feec9b32846df774209108a484e62a0d599d3810a216a"} Dec 11 08:18:32 crc kubenswrapper[4881]: I1211 08:18:32.439742 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-7l2x6" podStartSLOduration=131.439720815 podStartE2EDuration="2m11.439720815s" podCreationTimestamp="2025-12-11 08:16:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 08:18:32.362133711 +0000 UTC m=+160.739502408" watchObservedRunningTime="2025-12-11 08:18:32.439720815 +0000 UTC m=+160.817089512" Dec 11 08:18:32 crc kubenswrapper[4881]: I1211 08:18:32.440221 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-lxwz8" podStartSLOduration=132.440212857 podStartE2EDuration="2m12.440212857s" podCreationTimestamp="2025-12-11 08:16:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 08:18:32.4395209 +0000 UTC m=+160.816889607" watchObservedRunningTime="2025-12-11 08:18:32.440212857 +0000 UTC m=+160.817581584" Dec 11 08:18:32 crc kubenswrapper[4881]: I1211 08:18:32.452237 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-9stj8"] Dec 11 08:18:32 crc kubenswrapper[4881]: I1211 08:18:32.492768 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zn9v8\" (UID: \"ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-zn9v8" Dec 11 08:18:32 crc kubenswrapper[4881]: E1211 08:18:32.493602 4881 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-11 08:18:32.993588204 +0000 UTC m=+161.370956891 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zn9v8" (UID: "ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 08:18:32 crc kubenswrapper[4881]: I1211 08:18:32.503893 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd-operator/etcd-operator-b45778765-r75cd" event={"ID":"35345934-239d-4ccb-b388-e861e51f49a6","Type":"ContainerStarted","Data":"93fabc1b490435a1e95d9d1dbb9dd2b8d6a027d61b8ca3629b4ded818890662d"} Dec 11 08:18:32 crc kubenswrapper[4881]: I1211 08:18:32.536878 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-r546r" podStartSLOduration=132.536852222 podStartE2EDuration="2m12.536852222s" podCreationTimestamp="2025-12-11 08:16:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 08:18:32.53396041 +0000 UTC m=+160.911329107" watchObservedRunningTime="2025-12-11 08:18:32.536852222 +0000 UTC m=+160.914220919" Dec 11 08:18:32 crc kubenswrapper[4881]: I1211 08:18:32.536933 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca-operator/service-ca-operator-777779d784-w78d4" event={"ID":"ed97acca-9dbf-4791-a11b-164ca4d74f55","Type":"ContainerStarted","Data":"0042f9a7848894bc586a8cdb55bbd4e73d8ee075447975bdfc22d425779571cd"} Dec 11 08:18:32 crc kubenswrapper[4881]: I1211 08:18:32.583305 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-tgcxv"] Dec 11 08:18:32 crc kubenswrapper[4881]: I1211 08:18:32.593863 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 11 08:18:32 crc kubenswrapper[4881]: E1211 08:18:32.593997 4881 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-11 08:18:33.0939607 +0000 UTC m=+161.471329407 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 08:18:32 crc kubenswrapper[4881]: I1211 08:18:32.594078 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zn9v8\" (UID: \"ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-zn9v8" Dec 11 08:18:32 crc kubenswrapper[4881]: E1211 08:18:32.595167 4881 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-11 08:18:33.095150439 +0000 UTC m=+161.472519136 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zn9v8" (UID: "ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 08:18:32 crc kubenswrapper[4881]: I1211 08:18:32.596459 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-879f6c89f-zl8wv" podStartSLOduration=132.596435521 podStartE2EDuration="2m12.596435521s" podCreationTimestamp="2025-12-11 08:16:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 08:18:32.583943903 +0000 UTC m=+160.961312600" watchObservedRunningTime="2025-12-11 08:18:32.596435521 +0000 UTC m=+160.973804218" Dec 11 08:18:32 crc kubenswrapper[4881]: I1211 08:18:32.596683 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-882gs" Dec 11 08:18:32 crc kubenswrapper[4881]: I1211 08:18:32.616416 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-9jj8n"] Dec 11 08:18:32 crc kubenswrapper[4881]: I1211 08:18:32.622717 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-879f6c89f-zl8wv" Dec 11 08:18:32 crc kubenswrapper[4881]: I1211 08:18:32.635476 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-nh6jb"] Dec 11 08:18:32 crc kubenswrapper[4881]: I1211 08:18:32.672708 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-jzrlt"] Dec 11 08:18:32 crc kubenswrapper[4881]: I1211 08:18:32.695078 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 11 08:18:32 crc kubenswrapper[4881]: E1211 08:18:32.695560 4881 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-11 08:18:33.195543286 +0000 UTC m=+161.572911983 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 08:18:32 crc kubenswrapper[4881]: I1211 08:18:32.696029 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-canary/ingress-canary-kjptn"] Dec 11 08:18:32 crc kubenswrapper[4881]: W1211 08:18:32.747751 4881 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod390d1287_ab65_447e_93a0_44beb7ec9a84.slice/crio-27f9b2d19a24e080ba9675708398dcf7def26235a0c02fed76e6145967fe6f99 WatchSource:0}: Error finding container 27f9b2d19a24e080ba9675708398dcf7def26235a0c02fed76e6145967fe6f99: Status 404 returned error can't find the container with id 27f9b2d19a24e080ba9675708398dcf7def26235a0c02fed76e6145967fe6f99 Dec 11 08:18:32 crc kubenswrapper[4881]: I1211 08:18:32.748359 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress/router-default-5444994796-qbkd8" podStartSLOduration=132.748324549 podStartE2EDuration="2m12.748324549s" podCreationTimestamp="2025-12-11 08:16:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 08:18:32.748224656 +0000 UTC m=+161.125593353" watchObservedRunningTime="2025-12-11 08:18:32.748324549 +0000 UTC m=+161.125693246" Dec 11 08:18:32 crc kubenswrapper[4881]: I1211 08:18:32.748753 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-service-ca-operator/service-ca-operator-777779d784-w78d4" podStartSLOduration=131.748749639 podStartE2EDuration="2m11.748749639s" podCreationTimestamp="2025-12-11 08:16:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 08:18:32.711060729 +0000 UTC m=+161.088429446" watchObservedRunningTime="2025-12-11 08:18:32.748749639 +0000 UTC m=+161.126118336" Dec 11 08:18:32 crc kubenswrapper[4881]: I1211 08:18:32.764077 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-8p5cn"] Dec 11 08:18:32 crc kubenswrapper[4881]: I1211 08:18:32.796641 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zn9v8\" (UID: \"ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-zn9v8" Dec 11 08:18:32 crc kubenswrapper[4881]: E1211 08:18:32.797124 4881 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-11 08:18:33.297109701 +0000 UTC m=+161.674478398 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zn9v8" (UID: "ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 08:18:32 crc kubenswrapper[4881]: I1211 08:18:32.850468 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-459c4"] Dec 11 08:18:32 crc kubenswrapper[4881]: I1211 08:18:32.903135 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns/dns-default-f5gdp"] Dec 11 08:18:32 crc kubenswrapper[4881]: I1211 08:18:32.905832 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 11 08:18:32 crc kubenswrapper[4881]: E1211 08:18:32.912221 4881 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-11 08:18:33.412181841 +0000 UTC m=+161.789550578 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 08:18:32 crc kubenswrapper[4881]: I1211 08:18:32.942166 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-ingress/router-default-5444994796-qbkd8" Dec 11 08:18:32 crc kubenswrapper[4881]: I1211 08:18:32.956692 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-rsvfs"] Dec 11 08:18:32 crc kubenswrapper[4881]: I1211 08:18:32.965180 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-kjhrd"] Dec 11 08:18:32 crc kubenswrapper[4881]: I1211 08:18:32.965936 4881 patch_prober.go:28] interesting pod/router-default-5444994796-qbkd8 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Dec 11 08:18:32 crc kubenswrapper[4881]: [-]has-synced failed: reason withheld Dec 11 08:18:32 crc kubenswrapper[4881]: [+]process-running ok Dec 11 08:18:32 crc kubenswrapper[4881]: healthz check failed Dec 11 08:18:32 crc kubenswrapper[4881]: I1211 08:18:32.965996 4881 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-qbkd8" podUID="301dbf85-d5d3-48f8-8d66-5d05a2d2d22b" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 11 08:18:33 crc kubenswrapper[4881]: I1211 08:18:33.007032 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zn9v8\" (UID: \"ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-zn9v8" Dec 11 08:18:33 crc kubenswrapper[4881]: E1211 08:18:33.007386 4881 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-11 08:18:33.507373619 +0000 UTC m=+161.884742316 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zn9v8" (UID: "ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 08:18:33 crc kubenswrapper[4881]: I1211 08:18:33.108950 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 11 08:18:33 crc kubenswrapper[4881]: E1211 08:18:33.109933 4881 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-11 08:18:33.609910719 +0000 UTC m=+161.987279416 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 08:18:33 crc kubenswrapper[4881]: I1211 08:18:33.211551 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zn9v8\" (UID: \"ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-zn9v8" Dec 11 08:18:33 crc kubenswrapper[4881]: E1211 08:18:33.212613 4881 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-11 08:18:33.712591782 +0000 UTC m=+162.089960479 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zn9v8" (UID: "ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 08:18:33 crc kubenswrapper[4881]: I1211 08:18:33.315895 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 11 08:18:33 crc kubenswrapper[4881]: E1211 08:18:33.316228 4881 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-11 08:18:33.816211338 +0000 UTC m=+162.193580035 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 08:18:33 crc kubenswrapper[4881]: I1211 08:18:33.417109 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zn9v8\" (UID: \"ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-zn9v8" Dec 11 08:18:33 crc kubenswrapper[4881]: E1211 08:18:33.417604 4881 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-11 08:18:33.917582429 +0000 UTC m=+162.294951126 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zn9v8" (UID: "ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 08:18:33 crc kubenswrapper[4881]: I1211 08:18:33.524676 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 11 08:18:33 crc kubenswrapper[4881]: E1211 08:18:33.525308 4881 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-11 08:18:34.025290177 +0000 UTC m=+162.402658874 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 08:18:33 crc kubenswrapper[4881]: I1211 08:18:33.632756 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zn9v8\" (UID: \"ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-zn9v8" Dec 11 08:18:33 crc kubenswrapper[4881]: E1211 08:18:33.633545 4881 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-11 08:18:34.133521136 +0000 UTC m=+162.510889833 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zn9v8" (UID: "ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 08:18:33 crc kubenswrapper[4881]: I1211 08:18:33.704233 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-td2hv" event={"ID":"8ecef833-b914-464d-a395-49bb7f66a180","Type":"ContainerStarted","Data":"929952d37d6a823c5da998e5b0267c9af1b9e6a6fd4f46051f9ef2c29819de91"} Dec 11 08:18:33 crc kubenswrapper[4881]: I1211 08:18:33.705454 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-config-operator/openshift-config-operator-7777fb866f-td2hv" Dec 11 08:18:33 crc kubenswrapper[4881]: I1211 08:18:33.734716 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 11 08:18:33 crc kubenswrapper[4881]: E1211 08:18:33.735073 4881 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-11 08:18:34.235058661 +0000 UTC m=+162.612427358 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 08:18:33 crc kubenswrapper[4881]: I1211 08:18:33.744223 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-kjhrd" event={"ID":"3d0f98f5-f496-43cd-8b37-6f969af809d4","Type":"ContainerStarted","Data":"a590826188a275337593477892fe7ebdeb84ac43b413fca48c1fbd3a7291cf87"} Dec 11 08:18:33 crc kubenswrapper[4881]: I1211 08:18:33.793392 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-config-operator/openshift-config-operator-7777fb866f-td2hv" podStartSLOduration=133.79336467 podStartE2EDuration="2m13.79336467s" podCreationTimestamp="2025-12-11 08:16:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 08:18:33.776219147 +0000 UTC m=+162.153587854" watchObservedRunningTime="2025-12-11 08:18:33.79336467 +0000 UTC m=+162.170733377" Dec 11 08:18:33 crc kubenswrapper[4881]: I1211 08:18:33.801182 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-d8zgs" event={"ID":"1611d2d4-1d07-4ccb-aba0-2885b075dd9c","Type":"ContainerStarted","Data":"bd3abae7513ff9902be2fb460bf160630143aec222d8ec9418ceb8582b9312e7"} Dec 11 08:18:33 crc kubenswrapper[4881]: I1211 08:18:33.801240 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-d8zgs" event={"ID":"1611d2d4-1d07-4ccb-aba0-2885b075dd9c","Type":"ContainerStarted","Data":"90302804b8e0d4339062709c861667df992a16b478c9b517decf6d24d72195c7"} Dec 11 08:18:33 crc kubenswrapper[4881]: I1211 08:18:33.832761 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-canary/ingress-canary-kjptn" event={"ID":"a648f1f8-f077-421b-9599-95e3ef459adf","Type":"ContainerStarted","Data":"704133403bdcc5f8aeab7bceb8225a6564291883cdad7999b5b5279be5d78db1"} Dec 11 08:18:33 crc kubenswrapper[4881]: I1211 08:18:33.846304 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zn9v8\" (UID: \"ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-zn9v8" Dec 11 08:18:33 crc kubenswrapper[4881]: E1211 08:18:33.847466 4881 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-11 08:18:34.347451724 +0000 UTC m=+162.724820421 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zn9v8" (UID: "ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 08:18:33 crc kubenswrapper[4881]: I1211 08:18:33.892783 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-tgcxv" event={"ID":"9589dff7-1c8f-4e58-b31c-b70ec577353a","Type":"ContainerStarted","Data":"17ea227fcef0d3d1189d77e66995292333c1550b605b7d5d891641912ece76d7"} Dec 11 08:18:33 crc kubenswrapper[4881]: I1211 08:18:33.892840 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-tgcxv" event={"ID":"9589dff7-1c8f-4e58-b31c-b70ec577353a","Type":"ContainerStarted","Data":"3432fbc9cf6e9820cf136b58113965f919dc7c03d3d53ec5e105fd7cd3b6c296"} Dec 11 08:18:33 crc kubenswrapper[4881]: I1211 08:18:33.910728 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress-canary/ingress-canary-kjptn" podStartSLOduration=6.910704495 podStartE2EDuration="6.910704495s" podCreationTimestamp="2025-12-11 08:18:27 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 08:18:33.893046658 +0000 UTC m=+162.270415355" watchObservedRunningTime="2025-12-11 08:18:33.910704495 +0000 UTC m=+162.288073192" Dec 11 08:18:33 crc kubenswrapper[4881]: I1211 08:18:33.921982 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29424015-bl9jf" event={"ID":"77d994ef-53f1-4ef8-a668-38226c6c460b","Type":"ContainerStarted","Data":"309bbeac67bb53aa0969a3e3d31bddc1e3ca5d095eb1d312a01e5c0ff16c920c"} Dec 11 08:18:33 crc kubenswrapper[4881]: I1211 08:18:33.923410 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29424015-bl9jf" event={"ID":"77d994ef-53f1-4ef8-a668-38226c6c460b","Type":"ContainerStarted","Data":"35cc20635c26a0876dca7c895f441e5d6db02b033c3957b9fa19ceddbdf072ab"} Dec 11 08:18:33 crc kubenswrapper[4881]: I1211 08:18:33.950178 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 11 08:18:33 crc kubenswrapper[4881]: E1211 08:18:33.952444 4881 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-11 08:18:34.452384063 +0000 UTC m=+162.829752900 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 08:18:33 crc kubenswrapper[4881]: I1211 08:18:33.954076 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-dw769" event={"ID":"a658d244-5927-4518-b8bb-0685d0e40a07","Type":"ContainerStarted","Data":"e89abc6bd30f45227b4631dd3cc7bc7a1512c088d027bbb616b934edfd737f73"} Dec 11 08:18:33 crc kubenswrapper[4881]: I1211 08:18:33.968394 4881 patch_prober.go:28] interesting pod/router-default-5444994796-qbkd8 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Dec 11 08:18:33 crc kubenswrapper[4881]: [-]has-synced failed: reason withheld Dec 11 08:18:33 crc kubenswrapper[4881]: [+]process-running ok Dec 11 08:18:33 crc kubenswrapper[4881]: healthz check failed Dec 11 08:18:33 crc kubenswrapper[4881]: I1211 08:18:33.968474 4881 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-qbkd8" podUID="301dbf85-d5d3-48f8-8d66-5d05a2d2d22b" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 11 08:18:34 crc kubenswrapper[4881]: I1211 08:18:34.019507 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29424015-bl9jf" podStartSLOduration=134.019485169 podStartE2EDuration="2m14.019485169s" podCreationTimestamp="2025-12-11 08:16:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 08:18:34.011518722 +0000 UTC m=+162.388887419" watchObservedRunningTime="2025-12-11 08:18:34.019485169 +0000 UTC m=+162.396853866" Dec 11 08:18:34 crc kubenswrapper[4881]: I1211 08:18:34.045004 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-2n9k5" event={"ID":"8dc5d9d6-b64d-494d-a6e6-917ed40c01ae","Type":"ContainerStarted","Data":"ccde261349a9cdfe045229c11857866097d56efdcf65bf7a367ca36cc7d89aee"} Dec 11 08:18:34 crc kubenswrapper[4881]: I1211 08:18:34.045051 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-2n9k5" event={"ID":"8dc5d9d6-b64d-494d-a6e6-917ed40c01ae","Type":"ContainerStarted","Data":"36a1d3860b1d213616d461a6d73e90db85c93508eb35e714b061c4fbafbcf49a"} Dec 11 08:18:34 crc kubenswrapper[4881]: I1211 08:18:34.045066 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-2n9k5" Dec 11 08:18:34 crc kubenswrapper[4881]: I1211 08:18:34.052918 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zn9v8\" (UID: \"ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-zn9v8" Dec 11 08:18:34 crc kubenswrapper[4881]: E1211 08:18:34.053245 4881 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-11 08:18:34.553232931 +0000 UTC m=+162.930601628 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zn9v8" (UID: "ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 08:18:34 crc kubenswrapper[4881]: I1211 08:18:34.055283 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-hnk5n" event={"ID":"d118b564-5f65-4f23-aa56-42316ba80ef0","Type":"ContainerStarted","Data":"13a136c2cde926c3076fd52c30904a7a3e3a0989956132bb5036eb8bee1911a3"} Dec 11 08:18:34 crc kubenswrapper[4881]: I1211 08:18:34.090932 4881 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-2n9k5 container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.30:8080/healthz\": dial tcp 10.217.0.30:8080: connect: connection refused" start-of-body= Dec 11 08:18:34 crc kubenswrapper[4881]: I1211 08:18:34.091012 4881 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-2n9k5" podUID="8dc5d9d6-b64d-494d-a6e6-917ed40c01ae" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.30:8080/healthz\": dial tcp 10.217.0.30:8080: connect: connection refused" Dec 11 08:18:34 crc kubenswrapper[4881]: I1211 08:18:34.095986 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-f5gdp" event={"ID":"4e5fd3de-8162-4bf5-bc4f-e704b605108c","Type":"ContainerStarted","Data":"9a0b6ffc98e1a85ef166cbff74c48320037d358f1a717b03168b5db440d3d3fd"} Dec 11 08:18:34 crc kubenswrapper[4881]: I1211 08:18:34.156616 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-w4j7x" event={"ID":"9b52d816-10ed-46d8-9421-678e446d568c","Type":"ContainerStarted","Data":"c1191244072dba5f18b7643ddc9daa4f5ea3df4e01bc9b6cc35c4930eb96ba0b"} Dec 11 08:18:34 crc kubenswrapper[4881]: I1211 08:18:34.157547 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 11 08:18:34 crc kubenswrapper[4881]: E1211 08:18:34.159480 4881 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-11 08:18:34.659445221 +0000 UTC m=+163.036813918 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 08:18:34 crc kubenswrapper[4881]: I1211 08:18:34.198133 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-dw769" podStartSLOduration=133.198110315 podStartE2EDuration="2m13.198110315s" podCreationTimestamp="2025-12-11 08:16:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 08:18:34.110949485 +0000 UTC m=+162.488318182" watchObservedRunningTime="2025-12-11 08:18:34.198110315 +0000 UTC m=+162.575479012" Dec 11 08:18:34 crc kubenswrapper[4881]: I1211 08:18:34.240224 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca-operator/service-ca-operator-777779d784-w78d4" event={"ID":"ed97acca-9dbf-4791-a11b-164ca4d74f55","Type":"ContainerStarted","Data":"537369b3c9bc905bb821bcaf8914a808c8e0e1140c2c768f7184f23d98822eab"} Dec 11 08:18:34 crc kubenswrapper[4881]: I1211 08:18:34.260153 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zn9v8\" (UID: \"ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-zn9v8" Dec 11 08:18:34 crc kubenswrapper[4881]: E1211 08:18:34.260466 4881 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-11 08:18:34.760453163 +0000 UTC m=+163.137821860 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zn9v8" (UID: "ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 08:18:34 crc kubenswrapper[4881]: I1211 08:18:34.262654 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-r546r" event={"ID":"cb02b71d-7677-477a-8068-c687ebb146ee","Type":"ContainerStarted","Data":"91dd8ae28327934bdcc72c81e3b3c5071b14d175978264514367eb2b42e259eb"} Dec 11 08:18:34 crc kubenswrapper[4881]: I1211 08:18:34.279986 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/marketplace-operator-79b997595-2n9k5" podStartSLOduration=133.279964635 podStartE2EDuration="2m13.279964635s" podCreationTimestamp="2025-12-11 08:16:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 08:18:34.279795569 +0000 UTC m=+162.657164276" watchObservedRunningTime="2025-12-11 08:18:34.279964635 +0000 UTC m=+162.657333332" Dec 11 08:18:34 crc kubenswrapper[4881]: I1211 08:18:34.281915 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-hnk5n" podStartSLOduration=134.281906772 podStartE2EDuration="2m14.281906772s" podCreationTimestamp="2025-12-11 08:16:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 08:18:34.198870564 +0000 UTC m=+162.576239261" watchObservedRunningTime="2025-12-11 08:18:34.281906772 +0000 UTC m=+162.659275459" Dec 11 08:18:34 crc kubenswrapper[4881]: I1211 08:18:34.313423 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-9stj8" event={"ID":"ece8a23d-9b7b-4b9b-bc55-e6a1288e6d4f","Type":"ContainerStarted","Data":"3e7975bb1619c64c79598b78473d3369a9052cd6afbaa397b292d8437aa2655c"} Dec 11 08:18:34 crc kubenswrapper[4881]: I1211 08:18:34.346231 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-w4j7x" podStartSLOduration=134.346218549 podStartE2EDuration="2m14.346218549s" podCreationTimestamp="2025-12-11 08:16:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 08:18:34.344838585 +0000 UTC m=+162.722207282" watchObservedRunningTime="2025-12-11 08:18:34.346218549 +0000 UTC m=+162.723587246" Dec 11 08:18:34 crc kubenswrapper[4881]: I1211 08:18:34.352214 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-jzrlt" event={"ID":"0ca4c6e6-5e38-4798-acaf-8b2574668772","Type":"ContainerStarted","Data":"8a46400e0846540ee6f4cd04a8a703f9695881b7efcb60273acf2e38e8622ddb"} Dec 11 08:18:34 crc kubenswrapper[4881]: I1211 08:18:34.352300 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-jzrlt" event={"ID":"0ca4c6e6-5e38-4798-acaf-8b2574668772","Type":"ContainerStarted","Data":"4ff4080bef3f7ce874dfccdfb4e475565b818ac328e790de7cb38ad557737d1c"} Dec 11 08:18:34 crc kubenswrapper[4881]: I1211 08:18:34.364285 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 11 08:18:34 crc kubenswrapper[4881]: I1211 08:18:34.366241 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-rsvfs" event={"ID":"5c4fd066-f527-4568-91f8-71b92b5db286","Type":"ContainerStarted","Data":"5354e411695e164a5ce40ee8c245be52bed62e9b588d18572d526f5be17529fb"} Dec 11 08:18:34 crc kubenswrapper[4881]: E1211 08:18:34.366761 4881 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-11 08:18:34.866735475 +0000 UTC m=+163.244104382 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 08:18:34 crc kubenswrapper[4881]: I1211 08:18:34.388901 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-nhlpk" event={"ID":"46332c07-10b7-4cd4-abcc-af9054cfb28d","Type":"ContainerStarted","Data":"25bbbf7b40808a5f01d4f5e4dcd6ffb223a57cab7b1cd84a6c8ad6034133accc"} Dec 11 08:18:34 crc kubenswrapper[4881]: I1211 08:18:34.388966 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-nhlpk" event={"ID":"46332c07-10b7-4cd4-abcc-af9054cfb28d","Type":"ContainerStarted","Data":"fd7e5954dbaf0d208d3652429da025e9f63c720aab2ada1cde7324d9ebd36c50"} Dec 11 08:18:34 crc kubenswrapper[4881]: I1211 08:18:34.397189 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-9stj8" podStartSLOduration=134.397172015 podStartE2EDuration="2m14.397172015s" podCreationTimestamp="2025-12-11 08:16:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 08:18:34.3896354 +0000 UTC m=+162.767004097" watchObservedRunningTime="2025-12-11 08:18:34.397172015 +0000 UTC m=+162.774540712" Dec 11 08:18:34 crc kubenswrapper[4881]: I1211 08:18:34.419657 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-8p5cn" event={"ID":"0229d25c-9b61-4e79-b8a6-47188bd5de7f","Type":"ContainerStarted","Data":"79ed0590799a6f62f28d852b6bc147b7bc7f51cb3cd37ec4bf3f6109e1fea98a"} Dec 11 08:18:34 crc kubenswrapper[4881]: I1211 08:18:34.435618 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-9c57cc56f-vbpk5" event={"ID":"279350fe-c8bb-4389-9915-666eb8694c79","Type":"ContainerStarted","Data":"4bfe4f2a9233f56676eb1e3910ae0ef341d3ed437a972cf63e1faf02c4fc70d0"} Dec 11 08:18:34 crc kubenswrapper[4881]: I1211 08:18:34.435666 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-9c57cc56f-vbpk5" event={"ID":"279350fe-c8bb-4389-9915-666eb8694c79","Type":"ContainerStarted","Data":"5f602fbc020885d42f9b6386bfc4b93216c8f5e7b897afb247e9b63b195fbe91"} Dec 11 08:18:34 crc kubenswrapper[4881]: I1211 08:18:34.467285 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zn9v8\" (UID: \"ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-zn9v8" Dec 11 08:18:34 crc kubenswrapper[4881]: E1211 08:18:34.487858 4881 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-11 08:18:34.987840192 +0000 UTC m=+163.365208889 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zn9v8" (UID: "ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 08:18:34 crc kubenswrapper[4881]: I1211 08:18:34.488195 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd-operator/etcd-operator-b45778765-r75cd" event={"ID":"35345934-239d-4ccb-b388-e861e51f49a6","Type":"ContainerStarted","Data":"2de9da29e7f0c8681be59821edfc7d99e82a388b1db44cbb57373483c29179ec"} Dec 11 08:18:34 crc kubenswrapper[4881]: I1211 08:18:34.502004 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-5dbn8" event={"ID":"c8180671-37d6-4315-a47f-bdaeef223448","Type":"ContainerStarted","Data":"caeaaeef1232c408c3924a7a2c84cc8b43ce5fff433bc62f2190b5aeaa95dee9"} Dec 11 08:18:34 crc kubenswrapper[4881]: I1211 08:18:34.502086 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-5dbn8" event={"ID":"c8180671-37d6-4315-a47f-bdaeef223448","Type":"ContainerStarted","Data":"25d432fab224a12b9bf6ddc6b7eae7fafcc0e76c5a658b111c9ca9d5bc40965a"} Dec 11 08:18:34 crc kubenswrapper[4881]: I1211 08:18:34.526214 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-jzrlt" podStartSLOduration=134.526188648 podStartE2EDuration="2m14.526188648s" podCreationTimestamp="2025-12-11 08:16:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 08:18:34.444643007 +0000 UTC m=+162.822011704" watchObservedRunningTime="2025-12-11 08:18:34.526188648 +0000 UTC m=+162.903557345" Dec 11 08:18:34 crc kubenswrapper[4881]: I1211 08:18:34.526472 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-nhlpk" podStartSLOduration=134.526464096 podStartE2EDuration="2m14.526464096s" podCreationTimestamp="2025-12-11 08:16:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 08:18:34.524925528 +0000 UTC m=+162.902294225" watchObservedRunningTime="2025-12-11 08:18:34.526464096 +0000 UTC m=+162.903832783" Dec 11 08:18:34 crc kubenswrapper[4881]: I1211 08:18:34.528063 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-qgvl4" event={"ID":"899a9cd1-026b-40e1-a698-9f9f4f7ad857","Type":"ContainerStarted","Data":"3ffad7d0fa1bfa90e0a87e5aa23d50c1ca1754987a26a55e6ad2cb23319baa96"} Dec 11 08:18:34 crc kubenswrapper[4881]: I1211 08:18:34.530047 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-qgvl4" Dec 11 08:18:34 crc kubenswrapper[4881]: I1211 08:18:34.538325 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-9srsc" event={"ID":"445199dc-20f2-4401-a687-b2a126ddcdd2","Type":"ContainerStarted","Data":"41e2debe5efeadff6b66a2c6688fe2e91a808e7c2b7cfdf48a68a8f787ecf8d0"} Dec 11 08:18:34 crc kubenswrapper[4881]: I1211 08:18:34.538641 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-9srsc" Dec 11 08:18:34 crc kubenswrapper[4881]: I1211 08:18:34.543322 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-server-dcbjb" event={"ID":"d658247d-c8dd-49dd-9372-618a42ea566d","Type":"ContainerStarted","Data":"4f68658190512acade6c53fe72d26725bb3f663e926ce719144e6849d5cb2eba"} Dec 11 08:18:34 crc kubenswrapper[4881]: I1211 08:18:34.556681 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-9jj8n" event={"ID":"390d1287-ab65-447e-93a0-44beb7ec9a84","Type":"ContainerStarted","Data":"27f9b2d19a24e080ba9675708398dcf7def26235a0c02fed76e6145967fe6f99"} Dec 11 08:18:34 crc kubenswrapper[4881]: I1211 08:18:34.557615 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-etcd-operator/etcd-operator-b45778765-r75cd" podStartSLOduration=134.557600404 podStartE2EDuration="2m14.557600404s" podCreationTimestamp="2025-12-11 08:16:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 08:18:34.556833764 +0000 UTC m=+162.934202461" watchObservedRunningTime="2025-12-11 08:18:34.557600404 +0000 UTC m=+162.934969101" Dec 11 08:18:34 crc kubenswrapper[4881]: I1211 08:18:34.558499 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-9srsc" Dec 11 08:18:34 crc kubenswrapper[4881]: I1211 08:18:34.558702 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-qgvl4" Dec 11 08:18:34 crc kubenswrapper[4881]: I1211 08:18:34.568047 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 11 08:18:34 crc kubenswrapper[4881]: E1211 08:18:34.571280 4881 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-11 08:18:35.071189429 +0000 UTC m=+163.448558126 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 08:18:34 crc kubenswrapper[4881]: I1211 08:18:34.571259 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-jwrst" event={"ID":"1b90c971-c734-4fa4-a385-0cce5ecacbd1","Type":"ContainerStarted","Data":"5a806916640789186d97d28ed1086eb14368ebfeb3950de71c30a7fda0a385f6"} Dec 11 08:18:34 crc kubenswrapper[4881]: I1211 08:18:34.577911 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-mdqnb" event={"ID":"3c3589de-1b86-4b24-af28-d01a548fcd82","Type":"ContainerStarted","Data":"1fa0cb0b8982254508f980b4afea057a2cbba402eb24c7142c09a92cdb1b5659"} Dec 11 08:18:34 crc kubenswrapper[4881]: I1211 08:18:34.580056 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-58897d9998-74tpl" event={"ID":"27875db6-80f0-4378-ba4d-56df53952813","Type":"ContainerStarted","Data":"e43dfc352cff7141ae7c57336964134fffea3c79fb53e55486c61bc05cb0ed5c"} Dec 11 08:18:34 crc kubenswrapper[4881]: I1211 08:18:34.580775 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console-operator/console-operator-58897d9998-74tpl" Dec 11 08:18:34 crc kubenswrapper[4881]: I1211 08:18:34.582909 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-nh6jb" event={"ID":"09ace6bf-d3f8-407b-a17f-7163f94af7c7","Type":"ContainerStarted","Data":"b7a710b86f33f9179781ccaa274eaa54d76e73602a009d7f234994045d5e952a"} Dec 11 08:18:34 crc kubenswrapper[4881]: I1211 08:18:34.595080 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-459c4" event={"ID":"ed625b9a-3c51-472f-9210-761f7a318acd","Type":"ContainerStarted","Data":"33bc175edd541a55721999d82331fe8bdd26286ed2a4491fcaab3935a2544d70"} Dec 11 08:18:34 crc kubenswrapper[4881]: I1211 08:18:34.595842 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns-operator/dns-operator-744455d44c-5dbn8" podStartSLOduration=134.595826136 podStartE2EDuration="2m14.595826136s" podCreationTimestamp="2025-12-11 08:16:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 08:18:34.592761671 +0000 UTC m=+162.970130368" watchObservedRunningTime="2025-12-11 08:18:34.595826136 +0000 UTC m=+162.973194833" Dec 11 08:18:34 crc kubenswrapper[4881]: I1211 08:18:34.623882 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-service-ca/service-ca-9c57cc56f-vbpk5" podStartSLOduration=133.623851048 podStartE2EDuration="2m13.623851048s" podCreationTimestamp="2025-12-11 08:16:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 08:18:34.623461278 +0000 UTC m=+163.000829985" watchObservedRunningTime="2025-12-11 08:18:34.623851048 +0000 UTC m=+163.001219755" Dec 11 08:18:34 crc kubenswrapper[4881]: I1211 08:18:34.652777 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-8p5cn" podStartSLOduration=134.652761071 podStartE2EDuration="2m14.652761071s" podCreationTimestamp="2025-12-11 08:16:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 08:18:34.64986575 +0000 UTC m=+163.027234447" watchObservedRunningTime="2025-12-11 08:18:34.652761071 +0000 UTC m=+163.030129768" Dec 11 08:18:34 crc kubenswrapper[4881]: I1211 08:18:34.676179 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zn9v8\" (UID: \"ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-zn9v8" Dec 11 08:18:34 crc kubenswrapper[4881]: E1211 08:18:34.680945 4881 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-11 08:18:35.180926206 +0000 UTC m=+163.558294993 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zn9v8" (UID: "ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 08:18:34 crc kubenswrapper[4881]: I1211 08:18:34.684059 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console-operator/console-operator-58897d9998-74tpl" podStartSLOduration=134.684030462 podStartE2EDuration="2m14.684030462s" podCreationTimestamp="2025-12-11 08:16:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 08:18:34.683249524 +0000 UTC m=+163.060618221" watchObservedRunningTime="2025-12-11 08:18:34.684030462 +0000 UTC m=+163.061399149" Dec 11 08:18:34 crc kubenswrapper[4881]: I1211 08:18:34.729002 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-qgvl4" podStartSLOduration=133.728980691 podStartE2EDuration="2m13.728980691s" podCreationTimestamp="2025-12-11 08:16:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 08:18:34.704781005 +0000 UTC m=+163.082149702" watchObservedRunningTime="2025-12-11 08:18:34.728980691 +0000 UTC m=+163.106349388" Dec 11 08:18:34 crc kubenswrapper[4881]: I1211 08:18:34.768440 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-server-dcbjb" podStartSLOduration=7.768417864 podStartE2EDuration="7.768417864s" podCreationTimestamp="2025-12-11 08:18:27 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 08:18:34.73298358 +0000 UTC m=+163.110352277" watchObservedRunningTime="2025-12-11 08:18:34.768417864 +0000 UTC m=+163.145786561" Dec 11 08:18:34 crc kubenswrapper[4881]: I1211 08:18:34.769073 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-mdqnb" podStartSLOduration=134.769067391 podStartE2EDuration="2m14.769067391s" podCreationTimestamp="2025-12-11 08:16:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 08:18:34.765771 +0000 UTC m=+163.143139697" watchObservedRunningTime="2025-12-11 08:18:34.769067391 +0000 UTC m=+163.146436088" Dec 11 08:18:34 crc kubenswrapper[4881]: I1211 08:18:34.777593 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 11 08:18:34 crc kubenswrapper[4881]: E1211 08:18:34.777770 4881 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-11 08:18:35.277737894 +0000 UTC m=+163.655106591 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 08:18:34 crc kubenswrapper[4881]: I1211 08:18:34.777935 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zn9v8\" (UID: \"ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-zn9v8" Dec 11 08:18:34 crc kubenswrapper[4881]: E1211 08:18:34.778324 4881 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-11 08:18:35.278311348 +0000 UTC m=+163.655680235 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zn9v8" (UID: "ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 08:18:34 crc kubenswrapper[4881]: I1211 08:18:34.810954 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-apiserver/apiserver-76f77b778f-jwrst" podStartSLOduration=134.810930673 podStartE2EDuration="2m14.810930673s" podCreationTimestamp="2025-12-11 08:16:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 08:18:34.809928108 +0000 UTC m=+163.187296825" watchObservedRunningTime="2025-12-11 08:18:34.810930673 +0000 UTC m=+163.188299370" Dec 11 08:18:34 crc kubenswrapper[4881]: I1211 08:18:34.830863 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-9srsc" podStartSLOduration=133.830836494 podStartE2EDuration="2m13.830836494s" podCreationTimestamp="2025-12-11 08:16:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 08:18:34.830172898 +0000 UTC m=+163.207541595" watchObservedRunningTime="2025-12-11 08:18:34.830836494 +0000 UTC m=+163.208205191" Dec 11 08:18:34 crc kubenswrapper[4881]: I1211 08:18:34.867084 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-459c4" podStartSLOduration=133.867010987 podStartE2EDuration="2m13.867010987s" podCreationTimestamp="2025-12-11 08:16:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 08:18:34.853686678 +0000 UTC m=+163.231055375" watchObservedRunningTime="2025-12-11 08:18:34.867010987 +0000 UTC m=+163.244379684" Dec 11 08:18:34 crc kubenswrapper[4881]: I1211 08:18:34.878902 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 11 08:18:34 crc kubenswrapper[4881]: E1211 08:18:34.879157 4881 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-11 08:18:35.379115895 +0000 UTC m=+163.756484592 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 08:18:34 crc kubenswrapper[4881]: I1211 08:18:34.879244 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zn9v8\" (UID: \"ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-zn9v8" Dec 11 08:18:34 crc kubenswrapper[4881]: E1211 08:18:34.879698 4881 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-11 08:18:35.37968601 +0000 UTC m=+163.757054777 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zn9v8" (UID: "ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 08:18:34 crc kubenswrapper[4881]: I1211 08:18:34.942660 4881 patch_prober.go:28] interesting pod/router-default-5444994796-qbkd8 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Dec 11 08:18:34 crc kubenswrapper[4881]: [-]has-synced failed: reason withheld Dec 11 08:18:34 crc kubenswrapper[4881]: [+]process-running ok Dec 11 08:18:34 crc kubenswrapper[4881]: healthz check failed Dec 11 08:18:34 crc kubenswrapper[4881]: I1211 08:18:34.942740 4881 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-qbkd8" podUID="301dbf85-d5d3-48f8-8d66-5d05a2d2d22b" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 11 08:18:34 crc kubenswrapper[4881]: I1211 08:18:34.984876 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 11 08:18:34 crc kubenswrapper[4881]: E1211 08:18:34.985086 4881 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-11 08:18:35.485044249 +0000 UTC m=+163.862412946 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 08:18:34 crc kubenswrapper[4881]: I1211 08:18:34.985576 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zn9v8\" (UID: \"ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-zn9v8" Dec 11 08:18:34 crc kubenswrapper[4881]: E1211 08:18:34.985924 4881 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-11 08:18:35.48590984 +0000 UTC m=+163.863278537 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zn9v8" (UID: "ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 08:18:35 crc kubenswrapper[4881]: I1211 08:18:35.037170 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console-operator/console-operator-58897d9998-74tpl" Dec 11 08:18:35 crc kubenswrapper[4881]: I1211 08:18:35.086594 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 11 08:18:35 crc kubenswrapper[4881]: E1211 08:18:35.086968 4881 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-11 08:18:35.586949243 +0000 UTC m=+163.964317940 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 08:18:35 crc kubenswrapper[4881]: I1211 08:18:35.190171 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zn9v8\" (UID: \"ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-zn9v8" Dec 11 08:18:35 crc kubenswrapper[4881]: E1211 08:18:35.190797 4881 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-11 08:18:35.690777554 +0000 UTC m=+164.068146311 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zn9v8" (UID: "ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 08:18:35 crc kubenswrapper[4881]: I1211 08:18:35.292165 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 11 08:18:35 crc kubenswrapper[4881]: E1211 08:18:35.292378 4881 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-11 08:18:35.79234899 +0000 UTC m=+164.169717687 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 08:18:35 crc kubenswrapper[4881]: I1211 08:18:35.292442 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zn9v8\" (UID: \"ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-zn9v8" Dec 11 08:18:35 crc kubenswrapper[4881]: E1211 08:18:35.292806 4881 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-11 08:18:35.792795241 +0000 UTC m=+164.170163998 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zn9v8" (UID: "ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 08:18:35 crc kubenswrapper[4881]: I1211 08:18:35.393414 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 11 08:18:35 crc kubenswrapper[4881]: E1211 08:18:35.393887 4881 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-11 08:18:35.893866344 +0000 UTC m=+164.271235041 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 08:18:35 crc kubenswrapper[4881]: I1211 08:18:35.478959 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-q7zkc"] Dec 11 08:18:35 crc kubenswrapper[4881]: I1211 08:18:35.480146 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-q7zkc" Dec 11 08:18:35 crc kubenswrapper[4881]: I1211 08:18:35.488630 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Dec 11 08:18:35 crc kubenswrapper[4881]: I1211 08:18:35.494448 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zn9v8\" (UID: \"ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-zn9v8" Dec 11 08:18:35 crc kubenswrapper[4881]: E1211 08:18:35.494845 4881 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-11 08:18:35.994827095 +0000 UTC m=+164.372195792 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zn9v8" (UID: "ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 08:18:35 crc kubenswrapper[4881]: I1211 08:18:35.509450 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-q7zkc"] Dec 11 08:18:35 crc kubenswrapper[4881]: I1211 08:18:35.597068 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 11 08:18:35 crc kubenswrapper[4881]: I1211 08:18:35.597268 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sgf8x\" (UniqueName: \"kubernetes.io/projected/81795d8f-af22-4a16-92de-455b31623c53-kube-api-access-sgf8x\") pod \"community-operators-q7zkc\" (UID: \"81795d8f-af22-4a16-92de-455b31623c53\") " pod="openshift-marketplace/community-operators-q7zkc" Dec 11 08:18:35 crc kubenswrapper[4881]: E1211 08:18:35.597321 4881 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-11 08:18:36.097297293 +0000 UTC m=+164.474665990 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 08:18:35 crc kubenswrapper[4881]: I1211 08:18:35.597368 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/81795d8f-af22-4a16-92de-455b31623c53-catalog-content\") pod \"community-operators-q7zkc\" (UID: \"81795d8f-af22-4a16-92de-455b31623c53\") " pod="openshift-marketplace/community-operators-q7zkc" Dec 11 08:18:35 crc kubenswrapper[4881]: I1211 08:18:35.597404 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/81795d8f-af22-4a16-92de-455b31623c53-utilities\") pod \"community-operators-q7zkc\" (UID: \"81795d8f-af22-4a16-92de-455b31623c53\") " pod="openshift-marketplace/community-operators-q7zkc" Dec 11 08:18:35 crc kubenswrapper[4881]: I1211 08:18:35.597431 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zn9v8\" (UID: \"ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-zn9v8" Dec 11 08:18:35 crc kubenswrapper[4881]: E1211 08:18:35.597737 4881 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-11 08:18:36.097725384 +0000 UTC m=+164.475094081 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zn9v8" (UID: "ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 08:18:35 crc kubenswrapper[4881]: I1211 08:18:35.637592 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-gpmrb"] Dec 11 08:18:35 crc kubenswrapper[4881]: I1211 08:18:35.682038 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-dns/dns-default-f5gdp" Dec 11 08:18:35 crc kubenswrapper[4881]: I1211 08:18:35.682384 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-f5gdp" event={"ID":"4e5fd3de-8162-4bf5-bc4f-e704b605108c","Type":"ContainerStarted","Data":"7c5f62e90b810b23ff692b7b0f4fbbc316c1e55439ac4405165ef691be37cf7f"} Dec 11 08:18:35 crc kubenswrapper[4881]: I1211 08:18:35.682401 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-f5gdp" event={"ID":"4e5fd3de-8162-4bf5-bc4f-e704b605108c","Type":"ContainerStarted","Data":"73813c1eb3ded00dec11825e223974c72eceaeed2056f5db4a83464f02a3356d"} Dec 11 08:18:35 crc kubenswrapper[4881]: I1211 08:18:35.682516 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-gpmrb" Dec 11 08:18:35 crc kubenswrapper[4881]: I1211 08:18:35.694444 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Dec 11 08:18:35 crc kubenswrapper[4881]: I1211 08:18:35.695171 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-tgcxv" event={"ID":"9589dff7-1c8f-4e58-b31c-b70ec577353a","Type":"ContainerStarted","Data":"6b84359a38bc3318d219d214b82c89c6dac114dc25c4147c1631420b1ab59497"} Dec 11 08:18:35 crc kubenswrapper[4881]: I1211 08:18:35.695758 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-tgcxv" Dec 11 08:18:35 crc kubenswrapper[4881]: I1211 08:18:35.696814 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-rsvfs" event={"ID":"5c4fd066-f527-4568-91f8-71b92b5db286","Type":"ContainerStarted","Data":"a0a6a2eb9b8a9a2bcb918404d2197b0ae6e19ffefedd344d19a99d07f1e961af"} Dec 11 08:18:35 crc kubenswrapper[4881]: I1211 08:18:35.697927 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 11 08:18:35 crc kubenswrapper[4881]: I1211 08:18:35.698181 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sgf8x\" (UniqueName: \"kubernetes.io/projected/81795d8f-af22-4a16-92de-455b31623c53-kube-api-access-sgf8x\") pod \"community-operators-q7zkc\" (UID: \"81795d8f-af22-4a16-92de-455b31623c53\") " pod="openshift-marketplace/community-operators-q7zkc" Dec 11 08:18:35 crc kubenswrapper[4881]: I1211 08:18:35.698225 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/81795d8f-af22-4a16-92de-455b31623c53-catalog-content\") pod \"community-operators-q7zkc\" (UID: \"81795d8f-af22-4a16-92de-455b31623c53\") " pod="openshift-marketplace/community-operators-q7zkc" Dec 11 08:18:35 crc kubenswrapper[4881]: I1211 08:18:35.698254 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/81795d8f-af22-4a16-92de-455b31623c53-utilities\") pod \"community-operators-q7zkc\" (UID: \"81795d8f-af22-4a16-92de-455b31623c53\") " pod="openshift-marketplace/community-operators-q7zkc" Dec 11 08:18:35 crc kubenswrapper[4881]: I1211 08:18:35.698709 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-gpmrb"] Dec 11 08:18:35 crc kubenswrapper[4881]: I1211 08:18:35.698899 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/81795d8f-af22-4a16-92de-455b31623c53-utilities\") pod \"community-operators-q7zkc\" (UID: \"81795d8f-af22-4a16-92de-455b31623c53\") " pod="openshift-marketplace/community-operators-q7zkc" Dec 11 08:18:35 crc kubenswrapper[4881]: E1211 08:18:35.699055 4881 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-11 08:18:36.199038093 +0000 UTC m=+164.576406790 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 08:18:35 crc kubenswrapper[4881]: I1211 08:18:35.699581 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/81795d8f-af22-4a16-92de-455b31623c53-catalog-content\") pod \"community-operators-q7zkc\" (UID: \"81795d8f-af22-4a16-92de-455b31623c53\") " pod="openshift-marketplace/community-operators-q7zkc" Dec 11 08:18:35 crc kubenswrapper[4881]: I1211 08:18:35.709143 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-459c4" event={"ID":"ed625b9a-3c51-472f-9210-761f7a318acd","Type":"ContainerStarted","Data":"dc7df96e0b38164a70769ee8de75103f5c3505ddc009d9903393707f0a898ba3"} Dec 11 08:18:35 crc kubenswrapper[4881]: I1211 08:18:35.720531 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-459c4" Dec 11 08:18:35 crc kubenswrapper[4881]: I1211 08:18:35.747924 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-d8zgs" event={"ID":"1611d2d4-1d07-4ccb-aba0-2885b075dd9c","Type":"ContainerStarted","Data":"8b167b17937fefb8247e8559c8ba94b3745efcce6ac3a8ed040ceab4a862d3ae"} Dec 11 08:18:35 crc kubenswrapper[4881]: I1211 08:18:35.750097 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-9stj8" event={"ID":"ece8a23d-9b7b-4b9b-bc55-e6a1288e6d4f","Type":"ContainerStarted","Data":"abaa730e1030ae9389eb7e33c1a7a76f22da4503bc1ce845e3cd26e66cccb8d0"} Dec 11 08:18:35 crc kubenswrapper[4881]: I1211 08:18:35.751888 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-jzrlt" event={"ID":"0ca4c6e6-5e38-4798-acaf-8b2574668772","Type":"ContainerStarted","Data":"ff1fbd1008457dc10347c64000b4ec1cbf40100e59864b68f04b9844b1182bc9"} Dec 11 08:18:35 crc kubenswrapper[4881]: I1211 08:18:35.759048 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-8p5cn" event={"ID":"0229d25c-9b61-4e79-b8a6-47188bd5de7f","Type":"ContainerStarted","Data":"aff1fb75715414cc7510de1e35904219860215443703814924448e50a992aa9c"} Dec 11 08:18:35 crc kubenswrapper[4881]: I1211 08:18:35.794971 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sgf8x\" (UniqueName: \"kubernetes.io/projected/81795d8f-af22-4a16-92de-455b31623c53-kube-api-access-sgf8x\") pod \"community-operators-q7zkc\" (UID: \"81795d8f-af22-4a16-92de-455b31623c53\") " pod="openshift-marketplace/community-operators-q7zkc" Dec 11 08:18:35 crc kubenswrapper[4881]: I1211 08:18:35.808154 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zn9v8\" (UID: \"ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-zn9v8" Dec 11 08:18:35 crc kubenswrapper[4881]: I1211 08:18:35.808239 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m57fs\" (UniqueName: \"kubernetes.io/projected/5d19974b-baa0-46b1-b1ba-24411f62a0c0-kube-api-access-m57fs\") pod \"certified-operators-gpmrb\" (UID: \"5d19974b-baa0-46b1-b1ba-24411f62a0c0\") " pod="openshift-marketplace/certified-operators-gpmrb" Dec 11 08:18:35 crc kubenswrapper[4881]: I1211 08:18:35.808401 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5d19974b-baa0-46b1-b1ba-24411f62a0c0-catalog-content\") pod \"certified-operators-gpmrb\" (UID: \"5d19974b-baa0-46b1-b1ba-24411f62a0c0\") " pod="openshift-marketplace/certified-operators-gpmrb" Dec 11 08:18:35 crc kubenswrapper[4881]: I1211 08:18:35.808476 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5d19974b-baa0-46b1-b1ba-24411f62a0c0-utilities\") pod \"certified-operators-gpmrb\" (UID: \"5d19974b-baa0-46b1-b1ba-24411f62a0c0\") " pod="openshift-marketplace/certified-operators-gpmrb" Dec 11 08:18:35 crc kubenswrapper[4881]: E1211 08:18:35.810725 4881 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-11 08:18:36.310712528 +0000 UTC m=+164.688081225 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zn9v8" (UID: "ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 08:18:35 crc kubenswrapper[4881]: I1211 08:18:35.824728 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-9jj8n" event={"ID":"390d1287-ab65-447e-93a0-44beb7ec9a84","Type":"ContainerStarted","Data":"ec093d2e4c06a86dcba7b802f4aa5616e750a06d7e2c4cf68ad49d91641a7e1e"} Dec 11 08:18:35 crc kubenswrapper[4881]: I1211 08:18:35.824778 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-9jj8n" event={"ID":"390d1287-ab65-447e-93a0-44beb7ec9a84","Type":"ContainerStarted","Data":"0c5c74db7e0373d46d8a7edb69d7b92e83e3bdce3367a1362b23ddc646655447"} Dec 11 08:18:35 crc kubenswrapper[4881]: I1211 08:18:35.827495 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns/dns-default-f5gdp" podStartSLOduration=8.827478952 podStartE2EDuration="8.827478952s" podCreationTimestamp="2025-12-11 08:18:27 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 08:18:35.732441537 +0000 UTC m=+164.109810264" watchObservedRunningTime="2025-12-11 08:18:35.827478952 +0000 UTC m=+164.204847649" Dec 11 08:18:35 crc kubenswrapper[4881]: I1211 08:18:35.857886 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-rxnhw"] Dec 11 08:18:35 crc kubenswrapper[4881]: I1211 08:18:35.867245 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-rxnhw" Dec 11 08:18:35 crc kubenswrapper[4881]: I1211 08:18:35.884388 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-kjhrd" event={"ID":"3d0f98f5-f496-43cd-8b37-6f969af809d4","Type":"ContainerStarted","Data":"a289804d7a8e3ed642011fd45eff953e420086edbf8b77e987384971932fddca"} Dec 11 08:18:35 crc kubenswrapper[4881]: I1211 08:18:35.884437 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-kjhrd" event={"ID":"3d0f98f5-f496-43cd-8b37-6f969af809d4","Type":"ContainerStarted","Data":"f3e84509cf908e7030796dff4e7349a77750196d88815ccfbb58cc50f04987af"} Dec 11 08:18:35 crc kubenswrapper[4881]: I1211 08:18:35.885724 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-rsvfs" podStartSLOduration=135.885702968 podStartE2EDuration="2m15.885702968s" podCreationTimestamp="2025-12-11 08:16:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 08:18:35.88537712 +0000 UTC m=+164.262745817" watchObservedRunningTime="2025-12-11 08:18:35.885702968 +0000 UTC m=+164.263071655" Dec 11 08:18:35 crc kubenswrapper[4881]: I1211 08:18:35.886846 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-rxnhw"] Dec 11 08:18:35 crc kubenswrapper[4881]: I1211 08:18:35.911020 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 11 08:18:35 crc kubenswrapper[4881]: I1211 08:18:35.911231 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m57fs\" (UniqueName: \"kubernetes.io/projected/5d19974b-baa0-46b1-b1ba-24411f62a0c0-kube-api-access-m57fs\") pod \"certified-operators-gpmrb\" (UID: \"5d19974b-baa0-46b1-b1ba-24411f62a0c0\") " pod="openshift-marketplace/certified-operators-gpmrb" Dec 11 08:18:35 crc kubenswrapper[4881]: I1211 08:18:35.911293 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5d19974b-baa0-46b1-b1ba-24411f62a0c0-catalog-content\") pod \"certified-operators-gpmrb\" (UID: \"5d19974b-baa0-46b1-b1ba-24411f62a0c0\") " pod="openshift-marketplace/certified-operators-gpmrb" Dec 11 08:18:35 crc kubenswrapper[4881]: I1211 08:18:35.911320 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5d19974b-baa0-46b1-b1ba-24411f62a0c0-utilities\") pod \"certified-operators-gpmrb\" (UID: \"5d19974b-baa0-46b1-b1ba-24411f62a0c0\") " pod="openshift-marketplace/certified-operators-gpmrb" Dec 11 08:18:35 crc kubenswrapper[4881]: I1211 08:18:35.911367 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2bf5f48d-6c07-40f7-8f84-3762251f1d1d-utilities\") pod \"community-operators-rxnhw\" (UID: \"2bf5f48d-6c07-40f7-8f84-3762251f1d1d\") " pod="openshift-marketplace/community-operators-rxnhw" Dec 11 08:18:35 crc kubenswrapper[4881]: I1211 08:18:35.911397 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2bf5f48d-6c07-40f7-8f84-3762251f1d1d-catalog-content\") pod \"community-operators-rxnhw\" (UID: \"2bf5f48d-6c07-40f7-8f84-3762251f1d1d\") " pod="openshift-marketplace/community-operators-rxnhw" Dec 11 08:18:35 crc kubenswrapper[4881]: I1211 08:18:35.911434 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fgmrn\" (UniqueName: \"kubernetes.io/projected/2bf5f48d-6c07-40f7-8f84-3762251f1d1d-kube-api-access-fgmrn\") pod \"community-operators-rxnhw\" (UID: \"2bf5f48d-6c07-40f7-8f84-3762251f1d1d\") " pod="openshift-marketplace/community-operators-rxnhw" Dec 11 08:18:35 crc kubenswrapper[4881]: E1211 08:18:35.911517 4881 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-11 08:18:36.411501945 +0000 UTC m=+164.788870642 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 08:18:35 crc kubenswrapper[4881]: I1211 08:18:35.912098 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5d19974b-baa0-46b1-b1ba-24411f62a0c0-catalog-content\") pod \"certified-operators-gpmrb\" (UID: \"5d19974b-baa0-46b1-b1ba-24411f62a0c0\") " pod="openshift-marketplace/certified-operators-gpmrb" Dec 11 08:18:35 crc kubenswrapper[4881]: I1211 08:18:35.912320 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5d19974b-baa0-46b1-b1ba-24411f62a0c0-utilities\") pod \"certified-operators-gpmrb\" (UID: \"5d19974b-baa0-46b1-b1ba-24411f62a0c0\") " pod="openshift-marketplace/certified-operators-gpmrb" Dec 11 08:18:35 crc kubenswrapper[4881]: I1211 08:18:35.925098 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-cxnkh" event={"ID":"79e6349a-afe4-412b-8d64-f0875a38ccf2","Type":"ContainerStarted","Data":"c96f3ca0721ca2771895c52bcf830220de30833f5e94c73344ee2c53aa85f3ac"} Dec 11 08:18:35 crc kubenswrapper[4881]: I1211 08:18:35.946582 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-nh6jb" event={"ID":"09ace6bf-d3f8-407b-a17f-7163f94af7c7","Type":"ContainerStarted","Data":"37cc9e7105f8a46f78d2981750021f1446d03ee3984be103f32a1dfdcd8775f8"} Dec 11 08:18:35 crc kubenswrapper[4881]: I1211 08:18:35.946791 4881 patch_prober.go:28] interesting pod/router-default-5444994796-qbkd8 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Dec 11 08:18:35 crc kubenswrapper[4881]: [-]has-synced failed: reason withheld Dec 11 08:18:35 crc kubenswrapper[4881]: [+]process-running ok Dec 11 08:18:35 crc kubenswrapper[4881]: healthz check failed Dec 11 08:18:35 crc kubenswrapper[4881]: I1211 08:18:35.946832 4881 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-qbkd8" podUID="301dbf85-d5d3-48f8-8d66-5d05a2d2d22b" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 11 08:18:35 crc kubenswrapper[4881]: I1211 08:18:35.964227 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m57fs\" (UniqueName: \"kubernetes.io/projected/5d19974b-baa0-46b1-b1ba-24411f62a0c0-kube-api-access-m57fs\") pod \"certified-operators-gpmrb\" (UID: \"5d19974b-baa0-46b1-b1ba-24411f62a0c0\") " pod="openshift-marketplace/certified-operators-gpmrb" Dec 11 08:18:35 crc kubenswrapper[4881]: I1211 08:18:35.972818 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-canary/ingress-canary-kjptn" event={"ID":"a648f1f8-f077-421b-9599-95e3ef459adf","Type":"ContainerStarted","Data":"753668c7976fb5b6228329e908376757a6243274a6a19a0982c7eb2e92718aed"} Dec 11 08:18:35 crc kubenswrapper[4881]: I1211 08:18:35.974292 4881 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-2n9k5 container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.30:8080/healthz\": dial tcp 10.217.0.30:8080: connect: connection refused" start-of-body= Dec 11 08:18:35 crc kubenswrapper[4881]: I1211 08:18:35.974354 4881 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-2n9k5" podUID="8dc5d9d6-b64d-494d-a6e6-917ed40c01ae" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.30:8080/healthz\": dial tcp 10.217.0.30:8080: connect: connection refused" Dec 11 08:18:36 crc kubenswrapper[4881]: I1211 08:18:36.015818 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-gpmrb" Dec 11 08:18:36 crc kubenswrapper[4881]: I1211 08:18:36.035478 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2bf5f48d-6c07-40f7-8f84-3762251f1d1d-utilities\") pod \"community-operators-rxnhw\" (UID: \"2bf5f48d-6c07-40f7-8f84-3762251f1d1d\") " pod="openshift-marketplace/community-operators-rxnhw" Dec 11 08:18:36 crc kubenswrapper[4881]: I1211 08:18:36.035615 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zn9v8\" (UID: \"ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-zn9v8" Dec 11 08:18:36 crc kubenswrapper[4881]: I1211 08:18:36.035651 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2bf5f48d-6c07-40f7-8f84-3762251f1d1d-catalog-content\") pod \"community-operators-rxnhw\" (UID: \"2bf5f48d-6c07-40f7-8f84-3762251f1d1d\") " pod="openshift-marketplace/community-operators-rxnhw" Dec 11 08:18:36 crc kubenswrapper[4881]: I1211 08:18:36.035769 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fgmrn\" (UniqueName: \"kubernetes.io/projected/2bf5f48d-6c07-40f7-8f84-3762251f1d1d-kube-api-access-fgmrn\") pod \"community-operators-rxnhw\" (UID: \"2bf5f48d-6c07-40f7-8f84-3762251f1d1d\") " pod="openshift-marketplace/community-operators-rxnhw" Dec 11 08:18:36 crc kubenswrapper[4881]: I1211 08:18:36.048136 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2bf5f48d-6c07-40f7-8f84-3762251f1d1d-utilities\") pod \"community-operators-rxnhw\" (UID: \"2bf5f48d-6c07-40f7-8f84-3762251f1d1d\") " pod="openshift-marketplace/community-operators-rxnhw" Dec 11 08:18:36 crc kubenswrapper[4881]: I1211 08:18:36.049294 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2bf5f48d-6c07-40f7-8f84-3762251f1d1d-catalog-content\") pod \"community-operators-rxnhw\" (UID: \"2bf5f48d-6c07-40f7-8f84-3762251f1d1d\") " pod="openshift-marketplace/community-operators-rxnhw" Dec 11 08:18:36 crc kubenswrapper[4881]: E1211 08:18:36.050103 4881 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-11 08:18:36.550086894 +0000 UTC m=+164.927455591 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zn9v8" (UID: "ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 08:18:36 crc kubenswrapper[4881]: I1211 08:18:36.065446 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-admission-controller-857f4d67dd-kjhrd" podStartSLOduration=135.065428042 podStartE2EDuration="2m15.065428042s" podCreationTimestamp="2025-12-11 08:16:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 08:18:36.065018952 +0000 UTC m=+164.442387649" watchObservedRunningTime="2025-12-11 08:18:36.065428042 +0000 UTC m=+164.442796739" Dec 11 08:18:36 crc kubenswrapper[4881]: I1211 08:18:36.068285 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-tgcxv" podStartSLOduration=135.068275633 podStartE2EDuration="2m15.068275633s" podCreationTimestamp="2025-12-11 08:16:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 08:18:35.97090034 +0000 UTC m=+164.348269037" watchObservedRunningTime="2025-12-11 08:18:36.068275633 +0000 UTC m=+164.445644330" Dec 11 08:18:36 crc kubenswrapper[4881]: I1211 08:18:36.076356 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-q4t54"] Dec 11 08:18:36 crc kubenswrapper[4881]: I1211 08:18:36.077381 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-q4t54" Dec 11 08:18:36 crc kubenswrapper[4881]: I1211 08:18:36.099718 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-q7zkc" Dec 11 08:18:36 crc kubenswrapper[4881]: I1211 08:18:36.136511 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 11 08:18:36 crc kubenswrapper[4881]: I1211 08:18:36.136763 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5433ad4c-65f7-4ec1-8a01-f358f14685c4-utilities\") pod \"certified-operators-q4t54\" (UID: \"5433ad4c-65f7-4ec1-8a01-f358f14685c4\") " pod="openshift-marketplace/certified-operators-q4t54" Dec 11 08:18:36 crc kubenswrapper[4881]: I1211 08:18:36.136855 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5433ad4c-65f7-4ec1-8a01-f358f14685c4-catalog-content\") pod \"certified-operators-q4t54\" (UID: \"5433ad4c-65f7-4ec1-8a01-f358f14685c4\") " pod="openshift-marketplace/certified-operators-q4t54" Dec 11 08:18:36 crc kubenswrapper[4881]: I1211 08:18:36.136880 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rldfw\" (UniqueName: \"kubernetes.io/projected/5433ad4c-65f7-4ec1-8a01-f358f14685c4-kube-api-access-rldfw\") pod \"certified-operators-q4t54\" (UID: \"5433ad4c-65f7-4ec1-8a01-f358f14685c4\") " pod="openshift-marketplace/certified-operators-q4t54" Dec 11 08:18:36 crc kubenswrapper[4881]: E1211 08:18:36.136991 4881 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-11 08:18:36.636975767 +0000 UTC m=+165.014344464 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 08:18:36 crc kubenswrapper[4881]: I1211 08:18:36.165268 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fgmrn\" (UniqueName: \"kubernetes.io/projected/2bf5f48d-6c07-40f7-8f84-3762251f1d1d-kube-api-access-fgmrn\") pod \"community-operators-rxnhw\" (UID: \"2bf5f48d-6c07-40f7-8f84-3762251f1d1d\") " pod="openshift-marketplace/community-operators-rxnhw" Dec 11 08:18:36 crc kubenswrapper[4881]: I1211 08:18:36.170136 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-cxnkh" podStartSLOduration=135.170110925 podStartE2EDuration="2m15.170110925s" podCreationTimestamp="2025-12-11 08:16:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 08:18:36.157089303 +0000 UTC m=+164.534458000" watchObservedRunningTime="2025-12-11 08:18:36.170110925 +0000 UTC m=+164.547479622" Dec 11 08:18:36 crc kubenswrapper[4881]: I1211 08:18:36.170500 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-q4t54"] Dec 11 08:18:36 crc kubenswrapper[4881]: I1211 08:18:36.201608 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-rxnhw" Dec 11 08:18:36 crc kubenswrapper[4881]: I1211 08:18:36.243008 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5433ad4c-65f7-4ec1-8a01-f358f14685c4-utilities\") pod \"certified-operators-q4t54\" (UID: \"5433ad4c-65f7-4ec1-8a01-f358f14685c4\") " pod="openshift-marketplace/certified-operators-q4t54" Dec 11 08:18:36 crc kubenswrapper[4881]: I1211 08:18:36.243090 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zn9v8\" (UID: \"ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-zn9v8" Dec 11 08:18:36 crc kubenswrapper[4881]: I1211 08:18:36.243111 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5433ad4c-65f7-4ec1-8a01-f358f14685c4-catalog-content\") pod \"certified-operators-q4t54\" (UID: \"5433ad4c-65f7-4ec1-8a01-f358f14685c4\") " pod="openshift-marketplace/certified-operators-q4t54" Dec 11 08:18:36 crc kubenswrapper[4881]: I1211 08:18:36.243130 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rldfw\" (UniqueName: \"kubernetes.io/projected/5433ad4c-65f7-4ec1-8a01-f358f14685c4-kube-api-access-rldfw\") pod \"certified-operators-q4t54\" (UID: \"5433ad4c-65f7-4ec1-8a01-f358f14685c4\") " pod="openshift-marketplace/certified-operators-q4t54" Dec 11 08:18:36 crc kubenswrapper[4881]: I1211 08:18:36.243756 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5433ad4c-65f7-4ec1-8a01-f358f14685c4-utilities\") pod \"certified-operators-q4t54\" (UID: \"5433ad4c-65f7-4ec1-8a01-f358f14685c4\") " pod="openshift-marketplace/certified-operators-q4t54" Dec 11 08:18:36 crc kubenswrapper[4881]: E1211 08:18:36.243988 4881 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-11 08:18:36.743976807 +0000 UTC m=+165.121345504 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zn9v8" (UID: "ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 08:18:36 crc kubenswrapper[4881]: I1211 08:18:36.244305 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5433ad4c-65f7-4ec1-8a01-f358f14685c4-catalog-content\") pod \"certified-operators-q4t54\" (UID: \"5433ad4c-65f7-4ec1-8a01-f358f14685c4\") " pod="openshift-marketplace/certified-operators-q4t54" Dec 11 08:18:36 crc kubenswrapper[4881]: I1211 08:18:36.255429 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-d8zgs" podStartSLOduration=135.255410319 podStartE2EDuration="2m15.255410319s" podCreationTimestamp="2025-12-11 08:16:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 08:18:36.212728176 +0000 UTC m=+164.590096873" watchObservedRunningTime="2025-12-11 08:18:36.255410319 +0000 UTC m=+164.632779006" Dec 11 08:18:36 crc kubenswrapper[4881]: I1211 08:18:36.255967 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-9jj8n" podStartSLOduration=136.255963663 podStartE2EDuration="2m16.255963663s" podCreationTimestamp="2025-12-11 08:16:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 08:18:36.253763908 +0000 UTC m=+164.631132605" watchObservedRunningTime="2025-12-11 08:18:36.255963663 +0000 UTC m=+164.633332360" Dec 11 08:18:36 crc kubenswrapper[4881]: I1211 08:18:36.292951 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rldfw\" (UniqueName: \"kubernetes.io/projected/5433ad4c-65f7-4ec1-8a01-f358f14685c4-kube-api-access-rldfw\") pod \"certified-operators-q4t54\" (UID: \"5433ad4c-65f7-4ec1-8a01-f358f14685c4\") " pod="openshift-marketplace/certified-operators-q4t54" Dec 11 08:18:36 crc kubenswrapper[4881]: I1211 08:18:36.350830 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 11 08:18:36 crc kubenswrapper[4881]: E1211 08:18:36.351315 4881 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-11 08:18:36.851294735 +0000 UTC m=+165.228663432 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 08:18:36 crc kubenswrapper[4881]: I1211 08:18:36.432141 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-q4t54" Dec 11 08:18:36 crc kubenswrapper[4881]: I1211 08:18:36.451943 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zn9v8\" (UID: \"ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-zn9v8" Dec 11 08:18:36 crc kubenswrapper[4881]: E1211 08:18:36.452311 4881 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-11 08:18:36.952296266 +0000 UTC m=+165.329664963 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zn9v8" (UID: "ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 08:18:36 crc kubenswrapper[4881]: I1211 08:18:36.554230 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 11 08:18:36 crc kubenswrapper[4881]: E1211 08:18:36.554427 4881 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-11 08:18:37.054401735 +0000 UTC m=+165.431770432 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 08:18:36 crc kubenswrapper[4881]: I1211 08:18:36.554606 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zn9v8\" (UID: \"ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-zn9v8" Dec 11 08:18:36 crc kubenswrapper[4881]: E1211 08:18:36.554963 4881 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-11 08:18:37.054951848 +0000 UTC m=+165.432320585 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zn9v8" (UID: "ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 08:18:36 crc kubenswrapper[4881]: I1211 08:18:36.599533 4881 patch_prober.go:28] interesting pod/openshift-config-operator-7777fb866f-td2hv container/openshift-config-operator namespace/openshift-config-operator: Liveness probe status=failure output="Get \"https://10.217.0.19:8443/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Dec 11 08:18:36 crc kubenswrapper[4881]: I1211 08:18:36.599648 4881 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-config-operator/openshift-config-operator-7777fb866f-td2hv" podUID="8ecef833-b914-464d-a395-49bb7f66a180" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.19:8443/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Dec 11 08:18:36 crc kubenswrapper[4881]: I1211 08:18:36.605551 4881 patch_prober.go:28] interesting pod/openshift-config-operator-7777fb866f-td2hv container/openshift-config-operator namespace/openshift-config-operator: Readiness probe status=failure output="Get \"https://10.217.0.19:8443/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Dec 11 08:18:36 crc kubenswrapper[4881]: I1211 08:18:36.605660 4881 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-config-operator/openshift-config-operator-7777fb866f-td2hv" podUID="8ecef833-b914-464d-a395-49bb7f66a180" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.19:8443/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Dec 11 08:18:36 crc kubenswrapper[4881]: I1211 08:18:36.655990 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 11 08:18:36 crc kubenswrapper[4881]: E1211 08:18:36.656436 4881 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-11 08:18:37.156420082 +0000 UTC m=+165.533788779 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 08:18:36 crc kubenswrapper[4881]: I1211 08:18:36.720453 4881 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-459c4 container/packageserver namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.39:5443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Dec 11 08:18:36 crc kubenswrapper[4881]: I1211 08:18:36.720530 4881 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-459c4" podUID="ed625b9a-3c51-472f-9210-761f7a318acd" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.39:5443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Dec 11 08:18:36 crc kubenswrapper[4881]: I1211 08:18:36.764733 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zn9v8\" (UID: \"ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-zn9v8" Dec 11 08:18:36 crc kubenswrapper[4881]: E1211 08:18:36.765969 4881 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-11 08:18:37.265942594 +0000 UTC m=+165.643311291 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zn9v8" (UID: "ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 08:18:36 crc kubenswrapper[4881]: I1211 08:18:36.773694 4881 patch_prober.go:28] interesting pod/openshift-config-operator-7777fb866f-td2hv container/openshift-config-operator namespace/openshift-config-operator: Readiness probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[+]ping ok Dec 11 08:18:36 crc kubenswrapper[4881]: [+]log ok Dec 11 08:18:36 crc kubenswrapper[4881]: [-]poststarthook/max-in-flight-filter failed: reason withheld Dec 11 08:18:36 crc kubenswrapper[4881]: [-]poststarthook/storage-object-count-tracker-hook failed: reason withheld Dec 11 08:18:36 crc kubenswrapper[4881]: healthz check failed Dec 11 08:18:36 crc kubenswrapper[4881]: I1211 08:18:36.773735 4881 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-config-operator/openshift-config-operator-7777fb866f-td2hv" podUID="8ecef833-b914-464d-a395-49bb7f66a180" containerName="openshift-config-operator" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 11 08:18:36 crc kubenswrapper[4881]: I1211 08:18:36.866965 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 11 08:18:36 crc kubenswrapper[4881]: E1211 08:18:36.867374 4881 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-11 08:18:37.367356717 +0000 UTC m=+165.744725414 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 08:18:36 crc kubenswrapper[4881]: I1211 08:18:36.940157 4881 patch_prober.go:28] interesting pod/router-default-5444994796-qbkd8 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Dec 11 08:18:36 crc kubenswrapper[4881]: [-]has-synced failed: reason withheld Dec 11 08:18:36 crc kubenswrapper[4881]: [+]process-running ok Dec 11 08:18:36 crc kubenswrapper[4881]: healthz check failed Dec 11 08:18:36 crc kubenswrapper[4881]: I1211 08:18:36.940539 4881 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-qbkd8" podUID="301dbf85-d5d3-48f8-8d66-5d05a2d2d22b" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 11 08:18:36 crc kubenswrapper[4881]: I1211 08:18:36.969212 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zn9v8\" (UID: \"ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-zn9v8" Dec 11 08:18:36 crc kubenswrapper[4881]: E1211 08:18:36.969630 4881 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-11 08:18:37.469614779 +0000 UTC m=+165.846983476 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zn9v8" (UID: "ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 08:18:36 crc kubenswrapper[4881]: I1211 08:18:36.999275 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-nh6jb" event={"ID":"09ace6bf-d3f8-407b-a17f-7163f94af7c7","Type":"ContainerStarted","Data":"ab222d35ae2b6b1167b682cd6f0ae3bc6a4d02354b4079acde8f1f959d447220"} Dec 11 08:18:37 crc kubenswrapper[4881]: I1211 08:18:37.014289 4881 generic.go:334] "Generic (PLEG): container finished" podID="77d994ef-53f1-4ef8-a668-38226c6c460b" containerID="309bbeac67bb53aa0969a3e3d31bddc1e3ca5d095eb1d312a01e5c0ff16c920c" exitCode=0 Dec 11 08:18:37 crc kubenswrapper[4881]: I1211 08:18:37.036472 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29424015-bl9jf" event={"ID":"77d994ef-53f1-4ef8-a668-38226c6c460b","Type":"ContainerDied","Data":"309bbeac67bb53aa0969a3e3d31bddc1e3ca5d095eb1d312a01e5c0ff16c920c"} Dec 11 08:18:37 crc kubenswrapper[4881]: I1211 08:18:37.036538 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-459c4" Dec 11 08:18:37 crc kubenswrapper[4881]: I1211 08:18:37.055031 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-config-operator/openshift-config-operator-7777fb866f-td2hv" Dec 11 08:18:37 crc kubenswrapper[4881]: I1211 08:18:37.071262 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 11 08:18:37 crc kubenswrapper[4881]: E1211 08:18:37.071567 4881 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-11 08:18:37.571549903 +0000 UTC m=+165.948918600 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 08:18:37 crc kubenswrapper[4881]: I1211 08:18:37.173088 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zn9v8\" (UID: \"ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-zn9v8" Dec 11 08:18:37 crc kubenswrapper[4881]: E1211 08:18:37.176666 4881 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-11 08:18:37.676654686 +0000 UTC m=+166.054023373 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zn9v8" (UID: "ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 08:18:37 crc kubenswrapper[4881]: I1211 08:18:37.276000 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 11 08:18:37 crc kubenswrapper[4881]: E1211 08:18:37.276444 4881 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-11 08:18:37.776421268 +0000 UTC m=+166.153789965 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 08:18:37 crc kubenswrapper[4881]: I1211 08:18:37.377369 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zn9v8\" (UID: \"ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-zn9v8" Dec 11 08:18:37 crc kubenswrapper[4881]: E1211 08:18:37.378166 4881 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-11 08:18:37.878124146 +0000 UTC m=+166.255492843 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zn9v8" (UID: "ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 08:18:37 crc kubenswrapper[4881]: I1211 08:18:37.405770 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-rxnhw"] Dec 11 08:18:37 crc kubenswrapper[4881]: I1211 08:18:37.428751 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-j8gln"] Dec 11 08:18:37 crc kubenswrapper[4881]: I1211 08:18:37.429738 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-j8gln" Dec 11 08:18:37 crc kubenswrapper[4881]: I1211 08:18:37.432395 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Dec 11 08:18:37 crc kubenswrapper[4881]: I1211 08:18:37.445544 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-gpmrb"] Dec 11 08:18:37 crc kubenswrapper[4881]: I1211 08:18:37.451870 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-j8gln"] Dec 11 08:18:37 crc kubenswrapper[4881]: I1211 08:18:37.479588 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 11 08:18:37 crc kubenswrapper[4881]: E1211 08:18:37.480102 4881 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-11 08:18:37.980076992 +0000 UTC m=+166.357445689 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 08:18:37 crc kubenswrapper[4881]: I1211 08:18:37.539628 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-q4t54"] Dec 11 08:18:37 crc kubenswrapper[4881]: I1211 08:18:37.569948 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-q7zkc"] Dec 11 08:18:37 crc kubenswrapper[4881]: E1211 08:18:37.583294 4881 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-11 08:18:38.083272978 +0000 UTC m=+166.460641675 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zn9v8" (UID: "ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 08:18:37 crc kubenswrapper[4881]: I1211 08:18:37.582800 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zn9v8\" (UID: \"ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-zn9v8" Dec 11 08:18:37 crc kubenswrapper[4881]: I1211 08:18:37.583638 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nlkx2\" (UniqueName: \"kubernetes.io/projected/1112a12e-84b7-447f-8c3c-a2bca7fc2096-kube-api-access-nlkx2\") pod \"redhat-marketplace-j8gln\" (UID: \"1112a12e-84b7-447f-8c3c-a2bca7fc2096\") " pod="openshift-marketplace/redhat-marketplace-j8gln" Dec 11 08:18:37 crc kubenswrapper[4881]: I1211 08:18:37.583730 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1112a12e-84b7-447f-8c3c-a2bca7fc2096-utilities\") pod \"redhat-marketplace-j8gln\" (UID: \"1112a12e-84b7-447f-8c3c-a2bca7fc2096\") " pod="openshift-marketplace/redhat-marketplace-j8gln" Dec 11 08:18:37 crc kubenswrapper[4881]: I1211 08:18:37.583778 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1112a12e-84b7-447f-8c3c-a2bca7fc2096-catalog-content\") pod \"redhat-marketplace-j8gln\" (UID: \"1112a12e-84b7-447f-8c3c-a2bca7fc2096\") " pod="openshift-marketplace/redhat-marketplace-j8gln" Dec 11 08:18:37 crc kubenswrapper[4881]: I1211 08:18:37.685698 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 11 08:18:37 crc kubenswrapper[4881]: E1211 08:18:37.685860 4881 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-11 08:18:38.185832838 +0000 UTC m=+166.563201535 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 08:18:37 crc kubenswrapper[4881]: I1211 08:18:37.686041 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nlkx2\" (UniqueName: \"kubernetes.io/projected/1112a12e-84b7-447f-8c3c-a2bca7fc2096-kube-api-access-nlkx2\") pod \"redhat-marketplace-j8gln\" (UID: \"1112a12e-84b7-447f-8c3c-a2bca7fc2096\") " pod="openshift-marketplace/redhat-marketplace-j8gln" Dec 11 08:18:37 crc kubenswrapper[4881]: I1211 08:18:37.686110 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1112a12e-84b7-447f-8c3c-a2bca7fc2096-utilities\") pod \"redhat-marketplace-j8gln\" (UID: \"1112a12e-84b7-447f-8c3c-a2bca7fc2096\") " pod="openshift-marketplace/redhat-marketplace-j8gln" Dec 11 08:18:37 crc kubenswrapper[4881]: I1211 08:18:37.686142 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1112a12e-84b7-447f-8c3c-a2bca7fc2096-catalog-content\") pod \"redhat-marketplace-j8gln\" (UID: \"1112a12e-84b7-447f-8c3c-a2bca7fc2096\") " pod="openshift-marketplace/redhat-marketplace-j8gln" Dec 11 08:18:37 crc kubenswrapper[4881]: I1211 08:18:37.686201 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zn9v8\" (UID: \"ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-zn9v8" Dec 11 08:18:37 crc kubenswrapper[4881]: E1211 08:18:37.686698 4881 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-11 08:18:38.186681969 +0000 UTC m=+166.564050666 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zn9v8" (UID: "ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 08:18:37 crc kubenswrapper[4881]: I1211 08:18:37.687400 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1112a12e-84b7-447f-8c3c-a2bca7fc2096-catalog-content\") pod \"redhat-marketplace-j8gln\" (UID: \"1112a12e-84b7-447f-8c3c-a2bca7fc2096\") " pod="openshift-marketplace/redhat-marketplace-j8gln" Dec 11 08:18:37 crc kubenswrapper[4881]: I1211 08:18:37.687618 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1112a12e-84b7-447f-8c3c-a2bca7fc2096-utilities\") pod \"redhat-marketplace-j8gln\" (UID: \"1112a12e-84b7-447f-8c3c-a2bca7fc2096\") " pod="openshift-marketplace/redhat-marketplace-j8gln" Dec 11 08:18:37 crc kubenswrapper[4881]: I1211 08:18:37.771273 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nlkx2\" (UniqueName: \"kubernetes.io/projected/1112a12e-84b7-447f-8c3c-a2bca7fc2096-kube-api-access-nlkx2\") pod \"redhat-marketplace-j8gln\" (UID: \"1112a12e-84b7-447f-8c3c-a2bca7fc2096\") " pod="openshift-marketplace/redhat-marketplace-j8gln" Dec 11 08:18:37 crc kubenswrapper[4881]: I1211 08:18:37.787430 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 11 08:18:37 crc kubenswrapper[4881]: E1211 08:18:37.787845 4881 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-11 08:18:38.287820964 +0000 UTC m=+166.665189661 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 08:18:37 crc kubenswrapper[4881]: I1211 08:18:37.804059 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-gt5pp"] Dec 11 08:18:37 crc kubenswrapper[4881]: I1211 08:18:37.806946 4881 plugin_watcher.go:194] "Adding socket path or updating timestamp to desired state cache" path="/var/lib/kubelet/plugins_registry/kubevirt.io.hostpath-provisioner-reg.sock" Dec 11 08:18:37 crc kubenswrapper[4881]: I1211 08:18:37.808598 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-gt5pp" Dec 11 08:18:37 crc kubenswrapper[4881]: I1211 08:18:37.847971 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-gt5pp"] Dec 11 08:18:37 crc kubenswrapper[4881]: I1211 08:18:37.890436 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lr6fl\" (UniqueName: \"kubernetes.io/projected/054e2f3b-99d6-4586-8f79-bb4dadd92742-kube-api-access-lr6fl\") pod \"redhat-marketplace-gt5pp\" (UID: \"054e2f3b-99d6-4586-8f79-bb4dadd92742\") " pod="openshift-marketplace/redhat-marketplace-gt5pp" Dec 11 08:18:37 crc kubenswrapper[4881]: I1211 08:18:37.890480 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/054e2f3b-99d6-4586-8f79-bb4dadd92742-catalog-content\") pod \"redhat-marketplace-gt5pp\" (UID: \"054e2f3b-99d6-4586-8f79-bb4dadd92742\") " pod="openshift-marketplace/redhat-marketplace-gt5pp" Dec 11 08:18:37 crc kubenswrapper[4881]: I1211 08:18:37.890510 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zn9v8\" (UID: \"ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-zn9v8" Dec 11 08:18:37 crc kubenswrapper[4881]: I1211 08:18:37.890533 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/054e2f3b-99d6-4586-8f79-bb4dadd92742-utilities\") pod \"redhat-marketplace-gt5pp\" (UID: \"054e2f3b-99d6-4586-8f79-bb4dadd92742\") " pod="openshift-marketplace/redhat-marketplace-gt5pp" Dec 11 08:18:37 crc kubenswrapper[4881]: E1211 08:18:37.890828 4881 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-11 08:18:38.390815885 +0000 UTC m=+166.768184582 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zn9v8" (UID: "ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 08:18:37 crc kubenswrapper[4881]: I1211 08:18:37.939179 4881 patch_prober.go:28] interesting pod/router-default-5444994796-qbkd8 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Dec 11 08:18:37 crc kubenswrapper[4881]: [-]has-synced failed: reason withheld Dec 11 08:18:37 crc kubenswrapper[4881]: [+]process-running ok Dec 11 08:18:37 crc kubenswrapper[4881]: healthz check failed Dec 11 08:18:37 crc kubenswrapper[4881]: I1211 08:18:37.939276 4881 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-qbkd8" podUID="301dbf85-d5d3-48f8-8d66-5d05a2d2d22b" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 11 08:18:37 crc kubenswrapper[4881]: I1211 08:18:37.991856 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 11 08:18:37 crc kubenswrapper[4881]: E1211 08:18:37.992059 4881 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-11 08:18:38.492043463 +0000 UTC m=+166.869412160 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 08:18:37 crc kubenswrapper[4881]: I1211 08:18:37.992098 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/054e2f3b-99d6-4586-8f79-bb4dadd92742-utilities\") pod \"redhat-marketplace-gt5pp\" (UID: \"054e2f3b-99d6-4586-8f79-bb4dadd92742\") " pod="openshift-marketplace/redhat-marketplace-gt5pp" Dec 11 08:18:37 crc kubenswrapper[4881]: I1211 08:18:37.992178 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lr6fl\" (UniqueName: \"kubernetes.io/projected/054e2f3b-99d6-4586-8f79-bb4dadd92742-kube-api-access-lr6fl\") pod \"redhat-marketplace-gt5pp\" (UID: \"054e2f3b-99d6-4586-8f79-bb4dadd92742\") " pod="openshift-marketplace/redhat-marketplace-gt5pp" Dec 11 08:18:37 crc kubenswrapper[4881]: I1211 08:18:37.992205 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/054e2f3b-99d6-4586-8f79-bb4dadd92742-catalog-content\") pod \"redhat-marketplace-gt5pp\" (UID: \"054e2f3b-99d6-4586-8f79-bb4dadd92742\") " pod="openshift-marketplace/redhat-marketplace-gt5pp" Dec 11 08:18:37 crc kubenswrapper[4881]: I1211 08:18:37.992700 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/054e2f3b-99d6-4586-8f79-bb4dadd92742-catalog-content\") pod \"redhat-marketplace-gt5pp\" (UID: \"054e2f3b-99d6-4586-8f79-bb4dadd92742\") " pod="openshift-marketplace/redhat-marketplace-gt5pp" Dec 11 08:18:37 crc kubenswrapper[4881]: I1211 08:18:37.992899 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/054e2f3b-99d6-4586-8f79-bb4dadd92742-utilities\") pod \"redhat-marketplace-gt5pp\" (UID: \"054e2f3b-99d6-4586-8f79-bb4dadd92742\") " pod="openshift-marketplace/redhat-marketplace-gt5pp" Dec 11 08:18:38 crc kubenswrapper[4881]: I1211 08:18:38.019583 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-q7zkc" event={"ID":"81795d8f-af22-4a16-92de-455b31623c53","Type":"ContainerStarted","Data":"16b8e5d358ce41e4e664a5d3faba09a3f5ef0d158c2f6f0eb86c760afe724cc5"} Dec 11 08:18:38 crc kubenswrapper[4881]: I1211 08:18:38.020451 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lr6fl\" (UniqueName: \"kubernetes.io/projected/054e2f3b-99d6-4586-8f79-bb4dadd92742-kube-api-access-lr6fl\") pod \"redhat-marketplace-gt5pp\" (UID: \"054e2f3b-99d6-4586-8f79-bb4dadd92742\") " pod="openshift-marketplace/redhat-marketplace-gt5pp" Dec 11 08:18:38 crc kubenswrapper[4881]: I1211 08:18:38.020828 4881 generic.go:334] "Generic (PLEG): container finished" podID="5d19974b-baa0-46b1-b1ba-24411f62a0c0" containerID="a326aa201312b4fabea8b77148ca66f4d59bca4460992165e7a09c88658301de" exitCode=0 Dec 11 08:18:38 crc kubenswrapper[4881]: I1211 08:18:38.020878 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-gpmrb" event={"ID":"5d19974b-baa0-46b1-b1ba-24411f62a0c0","Type":"ContainerDied","Data":"a326aa201312b4fabea8b77148ca66f4d59bca4460992165e7a09c88658301de"} Dec 11 08:18:38 crc kubenswrapper[4881]: I1211 08:18:38.020932 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-gpmrb" event={"ID":"5d19974b-baa0-46b1-b1ba-24411f62a0c0","Type":"ContainerStarted","Data":"1d4aea663f5827b2b13a3a4cc84955d33886f5755c349fab0abe4bd7c61c313a"} Dec 11 08:18:38 crc kubenswrapper[4881]: I1211 08:18:38.022709 4881 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Dec 11 08:18:38 crc kubenswrapper[4881]: I1211 08:18:38.025039 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-nh6jb" event={"ID":"09ace6bf-d3f8-407b-a17f-7163f94af7c7","Type":"ContainerStarted","Data":"dc31823c27cdc727f4109f8705e3e8ba7ae8fb51e5c71d0671b8353fbec1dd87"} Dec 11 08:18:38 crc kubenswrapper[4881]: I1211 08:18:38.025084 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-nh6jb" event={"ID":"09ace6bf-d3f8-407b-a17f-7163f94af7c7","Type":"ContainerStarted","Data":"7a6152e1b8b1522f80aef5503e36fa0864fb73dfc58fde002aec33eaf569244d"} Dec 11 08:18:38 crc kubenswrapper[4881]: I1211 08:18:38.027972 4881 generic.go:334] "Generic (PLEG): container finished" podID="2bf5f48d-6c07-40f7-8f84-3762251f1d1d" containerID="929789db30077f0eb0ce126eb142540875c036c0d5b5c1300b4cac126086e838" exitCode=0 Dec 11 08:18:38 crc kubenswrapper[4881]: I1211 08:18:38.028038 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rxnhw" event={"ID":"2bf5f48d-6c07-40f7-8f84-3762251f1d1d","Type":"ContainerDied","Data":"929789db30077f0eb0ce126eb142540875c036c0d5b5c1300b4cac126086e838"} Dec 11 08:18:38 crc kubenswrapper[4881]: I1211 08:18:38.028068 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rxnhw" event={"ID":"2bf5f48d-6c07-40f7-8f84-3762251f1d1d","Type":"ContainerStarted","Data":"1c4a12c799a042b030d39ac21d0828dc69e760c0f990b35e6c57c6a4e88df14e"} Dec 11 08:18:38 crc kubenswrapper[4881]: I1211 08:18:38.031668 4881 generic.go:334] "Generic (PLEG): container finished" podID="5433ad4c-65f7-4ec1-8a01-f358f14685c4" containerID="bdbf2d0c70c3492727076c85071a800b9bba83c72833d54d76bb7ffe0135f725" exitCode=0 Dec 11 08:18:38 crc kubenswrapper[4881]: I1211 08:18:38.031758 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-q4t54" event={"ID":"5433ad4c-65f7-4ec1-8a01-f358f14685c4","Type":"ContainerDied","Data":"bdbf2d0c70c3492727076c85071a800b9bba83c72833d54d76bb7ffe0135f725"} Dec 11 08:18:38 crc kubenswrapper[4881]: I1211 08:18:38.031815 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-q4t54" event={"ID":"5433ad4c-65f7-4ec1-8a01-f358f14685c4","Type":"ContainerStarted","Data":"570286220aeadb218374f797d6cc5651539f3eda419c458fc3e7d8dceb769840"} Dec 11 08:18:38 crc kubenswrapper[4881]: I1211 08:18:38.061404 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="hostpath-provisioner/csi-hostpathplugin-nh6jb" podStartSLOduration=11.061382173 podStartE2EDuration="11.061382173s" podCreationTimestamp="2025-12-11 08:18:27 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 08:18:38.061068055 +0000 UTC m=+166.438436772" watchObservedRunningTime="2025-12-11 08:18:38.061382173 +0000 UTC m=+166.438750870" Dec 11 08:18:38 crc kubenswrapper[4881]: I1211 08:18:38.064738 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-j8gln" Dec 11 08:18:38 crc kubenswrapper[4881]: I1211 08:18:38.093298 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zn9v8\" (UID: \"ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-zn9v8" Dec 11 08:18:38 crc kubenswrapper[4881]: E1211 08:18:38.093797 4881 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-11 08:18:38.593781042 +0000 UTC m=+166.971149739 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zn9v8" (UID: "ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 08:18:38 crc kubenswrapper[4881]: I1211 08:18:38.154116 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-gt5pp" Dec 11 08:18:38 crc kubenswrapper[4881]: I1211 08:18:38.194890 4881 reconciler.go:161] "OperationExecutor.RegisterPlugin started" plugin={"SocketPath":"/var/lib/kubelet/plugins_registry/kubevirt.io.hostpath-provisioner-reg.sock","Timestamp":"2025-12-11T08:18:37.806975807Z","Handler":null,"Name":""} Dec 11 08:18:38 crc kubenswrapper[4881]: I1211 08:18:38.195400 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 11 08:18:38 crc kubenswrapper[4881]: E1211 08:18:38.195577 4881 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-11 08:18:38.695544193 +0000 UTC m=+167.072912890 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 08:18:38 crc kubenswrapper[4881]: I1211 08:18:38.195726 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zn9v8\" (UID: \"ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-zn9v8" Dec 11 08:18:38 crc kubenswrapper[4881]: E1211 08:18:38.195981 4881 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-11 08:18:38.695973293 +0000 UTC m=+167.073341990 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zn9v8" (UID: "ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 08:18:38 crc kubenswrapper[4881]: I1211 08:18:38.198160 4881 csi_plugin.go:100] kubernetes.io/csi: Trying to validate a new CSI Driver with name: kubevirt.io.hostpath-provisioner endpoint: /var/lib/kubelet/plugins/csi-hostpath/csi.sock versions: 1.0.0 Dec 11 08:18:38 crc kubenswrapper[4881]: I1211 08:18:38.198189 4881 csi_plugin.go:113] kubernetes.io/csi: Register new plugin with name: kubevirt.io.hostpath-provisioner at endpoint: /var/lib/kubelet/plugins/csi-hostpath/csi.sock Dec 11 08:18:38 crc kubenswrapper[4881]: I1211 08:18:38.296949 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 11 08:18:38 crc kubenswrapper[4881]: I1211 08:18:38.302314 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (OuterVolumeSpecName: "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8". PluginName "kubernetes.io/csi", VolumeGidValue "" Dec 11 08:18:38 crc kubenswrapper[4881]: I1211 08:18:38.340060 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29424015-bl9jf" Dec 11 08:18:38 crc kubenswrapper[4881]: I1211 08:18:38.395685 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-gt5pp"] Dec 11 08:18:38 crc kubenswrapper[4881]: I1211 08:18:38.398837 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-c8qwh\" (UniqueName: \"kubernetes.io/projected/77d994ef-53f1-4ef8-a668-38226c6c460b-kube-api-access-c8qwh\") pod \"77d994ef-53f1-4ef8-a668-38226c6c460b\" (UID: \"77d994ef-53f1-4ef8-a668-38226c6c460b\") " Dec 11 08:18:38 crc kubenswrapper[4881]: I1211 08:18:38.398975 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/77d994ef-53f1-4ef8-a668-38226c6c460b-config-volume\") pod \"77d994ef-53f1-4ef8-a668-38226c6c460b\" (UID: \"77d994ef-53f1-4ef8-a668-38226c6c460b\") " Dec 11 08:18:38 crc kubenswrapper[4881]: I1211 08:18:38.399052 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/77d994ef-53f1-4ef8-a668-38226c6c460b-secret-volume\") pod \"77d994ef-53f1-4ef8-a668-38226c6c460b\" (UID: \"77d994ef-53f1-4ef8-a668-38226c6c460b\") " Dec 11 08:18:38 crc kubenswrapper[4881]: I1211 08:18:38.399207 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zn9v8\" (UID: \"ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-zn9v8" Dec 11 08:18:38 crc kubenswrapper[4881]: I1211 08:18:38.399615 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/77d994ef-53f1-4ef8-a668-38226c6c460b-config-volume" (OuterVolumeSpecName: "config-volume") pod "77d994ef-53f1-4ef8-a668-38226c6c460b" (UID: "77d994ef-53f1-4ef8-a668-38226c6c460b"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:18:38 crc kubenswrapper[4881]: W1211 08:18:38.400188 4881 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod054e2f3b_99d6_4586_8f79_bb4dadd92742.slice/crio-8b21081b2e2ebac1f979d6181e46060858c62362c866252d066801be4f23fa16 WatchSource:0}: Error finding container 8b21081b2e2ebac1f979d6181e46060858c62362c866252d066801be4f23fa16: Status 404 returned error can't find the container with id 8b21081b2e2ebac1f979d6181e46060858c62362c866252d066801be4f23fa16 Dec 11 08:18:38 crc kubenswrapper[4881]: I1211 08:18:38.401808 4881 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Dec 11 08:18:38 crc kubenswrapper[4881]: I1211 08:18:38.401832 4881 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zn9v8\" (UID: \"ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount\"" pod="openshift-image-registry/image-registry-697d97f7c8-zn9v8" Dec 11 08:18:38 crc kubenswrapper[4881]: I1211 08:18:38.404537 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/77d994ef-53f1-4ef8-a668-38226c6c460b-kube-api-access-c8qwh" (OuterVolumeSpecName: "kube-api-access-c8qwh") pod "77d994ef-53f1-4ef8-a668-38226c6c460b" (UID: "77d994ef-53f1-4ef8-a668-38226c6c460b"). InnerVolumeSpecName "kube-api-access-c8qwh". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:18:38 crc kubenswrapper[4881]: I1211 08:18:38.404935 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/77d994ef-53f1-4ef8-a668-38226c6c460b-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "77d994ef-53f1-4ef8-a668-38226c6c460b" (UID: "77d994ef-53f1-4ef8-a668-38226c6c460b"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:18:38 crc kubenswrapper[4881]: I1211 08:18:38.438900 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zn9v8\" (UID: \"ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1\") " pod="openshift-image-registry/image-registry-697d97f7c8-zn9v8" Dec 11 08:18:38 crc kubenswrapper[4881]: I1211 08:18:38.478357 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-f9d7485db-d6tb5" Dec 11 08:18:38 crc kubenswrapper[4881]: I1211 08:18:38.478422 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-f9d7485db-d6tb5" Dec 11 08:18:38 crc kubenswrapper[4881]: I1211 08:18:38.480360 4881 patch_prober.go:28] interesting pod/console-f9d7485db-d6tb5 container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.12:8443/health\": dial tcp 10.217.0.12:8443: connect: connection refused" start-of-body= Dec 11 08:18:38 crc kubenswrapper[4881]: I1211 08:18:38.480438 4881 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-f9d7485db-d6tb5" podUID="09b82983-c4d5-4c1f-8f41-9dcc20fbfd03" containerName="console" probeResult="failure" output="Get \"https://10.217.0.12:8443/health\": dial tcp 10.217.0.12:8443: connect: connection refused" Dec 11 08:18:38 crc kubenswrapper[4881]: I1211 08:18:38.500281 4881 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/77d994ef-53f1-4ef8-a668-38226c6c460b-config-volume\") on node \"crc\" DevicePath \"\"" Dec 11 08:18:38 crc kubenswrapper[4881]: I1211 08:18:38.500319 4881 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/77d994ef-53f1-4ef8-a668-38226c6c460b-secret-volume\") on node \"crc\" DevicePath \"\"" Dec 11 08:18:38 crc kubenswrapper[4881]: I1211 08:18:38.500344 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-c8qwh\" (UniqueName: \"kubernetes.io/projected/77d994ef-53f1-4ef8-a668-38226c6c460b-kube-api-access-c8qwh\") on node \"crc\" DevicePath \"\"" Dec 11 08:18:38 crc kubenswrapper[4881]: I1211 08:18:38.526236 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-j8gln"] Dec 11 08:18:38 crc kubenswrapper[4881]: I1211 08:18:38.537832 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-zn9v8" Dec 11 08:18:38 crc kubenswrapper[4881]: W1211 08:18:38.577977 4881 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod1112a12e_84b7_447f_8c3c_a2bca7fc2096.slice/crio-6d194f80a882b19724bccc2483a0a98e7027463c1cb7ef79f9a8a93e10775179 WatchSource:0}: Error finding container 6d194f80a882b19724bccc2483a0a98e7027463c1cb7ef79f9a8a93e10775179: Status 404 returned error can't find the container with id 6d194f80a882b19724bccc2483a0a98e7027463c1cb7ef79f9a8a93e10775179 Dec 11 08:18:38 crc kubenswrapper[4881]: I1211 08:18:38.690790 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-apiserver/apiserver-76f77b778f-jwrst" Dec 11 08:18:38 crc kubenswrapper[4881]: I1211 08:18:38.690872 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-apiserver/apiserver-76f77b778f-jwrst" Dec 11 08:18:38 crc kubenswrapper[4881]: I1211 08:18:38.704116 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-apiserver/apiserver-76f77b778f-jwrst" Dec 11 08:18:38 crc kubenswrapper[4881]: I1211 08:18:38.715724 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-zn9v8"] Dec 11 08:18:38 crc kubenswrapper[4881]: I1211 08:18:38.807641 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-46vfl"] Dec 11 08:18:38 crc kubenswrapper[4881]: E1211 08:18:38.807850 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="77d994ef-53f1-4ef8-a668-38226c6c460b" containerName="collect-profiles" Dec 11 08:18:38 crc kubenswrapper[4881]: I1211 08:18:38.807886 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="77d994ef-53f1-4ef8-a668-38226c6c460b" containerName="collect-profiles" Dec 11 08:18:38 crc kubenswrapper[4881]: I1211 08:18:38.808014 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="77d994ef-53f1-4ef8-a668-38226c6c460b" containerName="collect-profiles" Dec 11 08:18:38 crc kubenswrapper[4881]: I1211 08:18:38.808756 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-46vfl" Dec 11 08:18:38 crc kubenswrapper[4881]: I1211 08:18:38.811347 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Dec 11 08:18:38 crc kubenswrapper[4881]: I1211 08:18:38.826223 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-46vfl"] Dec 11 08:18:38 crc kubenswrapper[4881]: I1211 08:18:38.839205 4881 patch_prober.go:28] interesting pod/downloads-7954f5f757-tzs74 container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.10:8080/\": dial tcp 10.217.0.10:8080: connect: connection refused" start-of-body= Dec 11 08:18:38 crc kubenswrapper[4881]: I1211 08:18:38.839265 4881 patch_prober.go:28] interesting pod/downloads-7954f5f757-tzs74 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.10:8080/\": dial tcp 10.217.0.10:8080: connect: connection refused" start-of-body= Dec 11 08:18:38 crc kubenswrapper[4881]: I1211 08:18:38.839314 4881 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-tzs74" podUID="26aecb96-f0ab-48d9-977c-89c3a1cf06e7" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.10:8080/\": dial tcp 10.217.0.10:8080: connect: connection refused" Dec 11 08:18:38 crc kubenswrapper[4881]: I1211 08:18:38.839265 4881 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-tzs74" podUID="26aecb96-f0ab-48d9-977c-89c3a1cf06e7" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.10:8080/\": dial tcp 10.217.0.10:8080: connect: connection refused" Dec 11 08:18:38 crc kubenswrapper[4881]: I1211 08:18:38.908146 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bcd451b4-9860-4079-9d8a-eab5bb3365c8-catalog-content\") pod \"redhat-operators-46vfl\" (UID: \"bcd451b4-9860-4079-9d8a-eab5bb3365c8\") " pod="openshift-marketplace/redhat-operators-46vfl" Dec 11 08:18:38 crc kubenswrapper[4881]: I1211 08:18:38.908195 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bcd451b4-9860-4079-9d8a-eab5bb3365c8-utilities\") pod \"redhat-operators-46vfl\" (UID: \"bcd451b4-9860-4079-9d8a-eab5bb3365c8\") " pod="openshift-marketplace/redhat-operators-46vfl" Dec 11 08:18:38 crc kubenswrapper[4881]: I1211 08:18:38.908315 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bxp8c\" (UniqueName: \"kubernetes.io/projected/bcd451b4-9860-4079-9d8a-eab5bb3365c8-kube-api-access-bxp8c\") pod \"redhat-operators-46vfl\" (UID: \"bcd451b4-9860-4079-9d8a-eab5bb3365c8\") " pod="openshift-marketplace/redhat-operators-46vfl" Dec 11 08:18:38 crc kubenswrapper[4881]: I1211 08:18:38.933471 4881 patch_prober.go:28] interesting pod/router-default-5444994796-qbkd8 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Dec 11 08:18:38 crc kubenswrapper[4881]: [-]has-synced failed: reason withheld Dec 11 08:18:38 crc kubenswrapper[4881]: [+]process-running ok Dec 11 08:18:38 crc kubenswrapper[4881]: healthz check failed Dec 11 08:18:38 crc kubenswrapper[4881]: I1211 08:18:38.933531 4881 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-qbkd8" podUID="301dbf85-d5d3-48f8-8d66-5d05a2d2d22b" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 11 08:18:39 crc kubenswrapper[4881]: I1211 08:18:39.010266 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bxp8c\" (UniqueName: \"kubernetes.io/projected/bcd451b4-9860-4079-9d8a-eab5bb3365c8-kube-api-access-bxp8c\") pod \"redhat-operators-46vfl\" (UID: \"bcd451b4-9860-4079-9d8a-eab5bb3365c8\") " pod="openshift-marketplace/redhat-operators-46vfl" Dec 11 08:18:39 crc kubenswrapper[4881]: I1211 08:18:39.011086 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bcd451b4-9860-4079-9d8a-eab5bb3365c8-catalog-content\") pod \"redhat-operators-46vfl\" (UID: \"bcd451b4-9860-4079-9d8a-eab5bb3365c8\") " pod="openshift-marketplace/redhat-operators-46vfl" Dec 11 08:18:39 crc kubenswrapper[4881]: I1211 08:18:39.011127 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bcd451b4-9860-4079-9d8a-eab5bb3365c8-utilities\") pod \"redhat-operators-46vfl\" (UID: \"bcd451b4-9860-4079-9d8a-eab5bb3365c8\") " pod="openshift-marketplace/redhat-operators-46vfl" Dec 11 08:18:39 crc kubenswrapper[4881]: I1211 08:18:39.012186 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bcd451b4-9860-4079-9d8a-eab5bb3365c8-utilities\") pod \"redhat-operators-46vfl\" (UID: \"bcd451b4-9860-4079-9d8a-eab5bb3365c8\") " pod="openshift-marketplace/redhat-operators-46vfl" Dec 11 08:18:39 crc kubenswrapper[4881]: I1211 08:18:39.012294 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bcd451b4-9860-4079-9d8a-eab5bb3365c8-catalog-content\") pod \"redhat-operators-46vfl\" (UID: \"bcd451b4-9860-4079-9d8a-eab5bb3365c8\") " pod="openshift-marketplace/redhat-operators-46vfl" Dec 11 08:18:39 crc kubenswrapper[4881]: I1211 08:18:39.032379 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8f668bae-612b-4b75-9490-919e737c6a3b" path="/var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes" Dec 11 08:18:39 crc kubenswrapper[4881]: I1211 08:18:39.043860 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-zn9v8" event={"ID":"ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1","Type":"ContainerStarted","Data":"78424e21b354c54529a5a4bc83c45a2b99b3d25d3e44cf7f4d5aca40f1d9ef67"} Dec 11 08:18:39 crc kubenswrapper[4881]: I1211 08:18:39.043896 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-zn9v8" event={"ID":"ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1","Type":"ContainerStarted","Data":"728ddb536a6009db0b6069c9feb86db5e7215b44d6d3d18f091c85eda1891956"} Dec 11 08:18:39 crc kubenswrapper[4881]: I1211 08:18:39.044560 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-image-registry/image-registry-697d97f7c8-zn9v8" Dec 11 08:18:39 crc kubenswrapper[4881]: I1211 08:18:39.048582 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bxp8c\" (UniqueName: \"kubernetes.io/projected/bcd451b4-9860-4079-9d8a-eab5bb3365c8-kube-api-access-bxp8c\") pod \"redhat-operators-46vfl\" (UID: \"bcd451b4-9860-4079-9d8a-eab5bb3365c8\") " pod="openshift-marketplace/redhat-operators-46vfl" Dec 11 08:18:39 crc kubenswrapper[4881]: I1211 08:18:39.057225 4881 generic.go:334] "Generic (PLEG): container finished" podID="054e2f3b-99d6-4586-8f79-bb4dadd92742" containerID="d35046069eb6edb40a3c626534d5af76896bd1cb9e1f5736a77427243906f9bb" exitCode=0 Dec 11 08:18:39 crc kubenswrapper[4881]: I1211 08:18:39.057409 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-gt5pp" event={"ID":"054e2f3b-99d6-4586-8f79-bb4dadd92742","Type":"ContainerDied","Data":"d35046069eb6edb40a3c626534d5af76896bd1cb9e1f5736a77427243906f9bb"} Dec 11 08:18:39 crc kubenswrapper[4881]: I1211 08:18:39.057445 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-gt5pp" event={"ID":"054e2f3b-99d6-4586-8f79-bb4dadd92742","Type":"ContainerStarted","Data":"8b21081b2e2ebac1f979d6181e46060858c62362c866252d066801be4f23fa16"} Dec 11 08:18:39 crc kubenswrapper[4881]: I1211 08:18:39.062609 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29424015-bl9jf" event={"ID":"77d994ef-53f1-4ef8-a668-38226c6c460b","Type":"ContainerDied","Data":"35cc20635c26a0876dca7c895f441e5d6db02b033c3957b9fa19ceddbdf072ab"} Dec 11 08:18:39 crc kubenswrapper[4881]: I1211 08:18:39.062650 4881 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="35cc20635c26a0876dca7c895f441e5d6db02b033c3957b9fa19ceddbdf072ab" Dec 11 08:18:39 crc kubenswrapper[4881]: I1211 08:18:39.062749 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29424015-bl9jf" Dec 11 08:18:39 crc kubenswrapper[4881]: I1211 08:18:39.070187 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/image-registry-697d97f7c8-zn9v8" podStartSLOduration=139.070168 podStartE2EDuration="2m19.070168s" podCreationTimestamp="2025-12-11 08:16:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 08:18:39.06852472 +0000 UTC m=+167.445893427" watchObservedRunningTime="2025-12-11 08:18:39.070168 +0000 UTC m=+167.447536697" Dec 11 08:18:39 crc kubenswrapper[4881]: I1211 08:18:39.077358 4881 generic.go:334] "Generic (PLEG): container finished" podID="81795d8f-af22-4a16-92de-455b31623c53" containerID="6aa6814d9038dbe3337f5b3c8d612a80a02d4e62fe66535bed54ab966f1544df" exitCode=0 Dec 11 08:18:39 crc kubenswrapper[4881]: I1211 08:18:39.077452 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-q7zkc" event={"ID":"81795d8f-af22-4a16-92de-455b31623c53","Type":"ContainerDied","Data":"6aa6814d9038dbe3337f5b3c8d612a80a02d4e62fe66535bed54ab966f1544df"} Dec 11 08:18:39 crc kubenswrapper[4881]: I1211 08:18:39.096046 4881 generic.go:334] "Generic (PLEG): container finished" podID="1112a12e-84b7-447f-8c3c-a2bca7fc2096" containerID="a957612b7d9746c7605f9dc5db8e5073b363f0a4154f831ea8ad796aaef4321f" exitCode=0 Dec 11 08:18:39 crc kubenswrapper[4881]: I1211 08:18:39.097836 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-j8gln" event={"ID":"1112a12e-84b7-447f-8c3c-a2bca7fc2096","Type":"ContainerDied","Data":"a957612b7d9746c7605f9dc5db8e5073b363f0a4154f831ea8ad796aaef4321f"} Dec 11 08:18:39 crc kubenswrapper[4881]: I1211 08:18:39.097871 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-j8gln" event={"ID":"1112a12e-84b7-447f-8c3c-a2bca7fc2096","Type":"ContainerStarted","Data":"6d194f80a882b19724bccc2483a0a98e7027463c1cb7ef79f9a8a93e10775179"} Dec 11 08:18:39 crc kubenswrapper[4881]: I1211 08:18:39.105941 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-apiserver/apiserver-76f77b778f-jwrst" Dec 11 08:18:39 crc kubenswrapper[4881]: I1211 08:18:39.162162 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-46vfl" Dec 11 08:18:39 crc kubenswrapper[4881]: I1211 08:18:39.211006 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-58jkm"] Dec 11 08:18:39 crc kubenswrapper[4881]: I1211 08:18:39.213901 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-58jkm" Dec 11 08:18:39 crc kubenswrapper[4881]: I1211 08:18:39.234808 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-58jkm"] Dec 11 08:18:39 crc kubenswrapper[4881]: I1211 08:18:39.326273 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d1f50723-ab88-4c34-b3ad-099eeffd62c4-catalog-content\") pod \"redhat-operators-58jkm\" (UID: \"d1f50723-ab88-4c34-b3ad-099eeffd62c4\") " pod="openshift-marketplace/redhat-operators-58jkm" Dec 11 08:18:39 crc kubenswrapper[4881]: I1211 08:18:39.326325 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d1f50723-ab88-4c34-b3ad-099eeffd62c4-utilities\") pod \"redhat-operators-58jkm\" (UID: \"d1f50723-ab88-4c34-b3ad-099eeffd62c4\") " pod="openshift-marketplace/redhat-operators-58jkm" Dec 11 08:18:39 crc kubenswrapper[4881]: I1211 08:18:39.326420 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w9j76\" (UniqueName: \"kubernetes.io/projected/d1f50723-ab88-4c34-b3ad-099eeffd62c4-kube-api-access-w9j76\") pod \"redhat-operators-58jkm\" (UID: \"d1f50723-ab88-4c34-b3ad-099eeffd62c4\") " pod="openshift-marketplace/redhat-operators-58jkm" Dec 11 08:18:39 crc kubenswrapper[4881]: I1211 08:18:39.433310 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d1f50723-ab88-4c34-b3ad-099eeffd62c4-catalog-content\") pod \"redhat-operators-58jkm\" (UID: \"d1f50723-ab88-4c34-b3ad-099eeffd62c4\") " pod="openshift-marketplace/redhat-operators-58jkm" Dec 11 08:18:39 crc kubenswrapper[4881]: I1211 08:18:39.433651 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d1f50723-ab88-4c34-b3ad-099eeffd62c4-utilities\") pod \"redhat-operators-58jkm\" (UID: \"d1f50723-ab88-4c34-b3ad-099eeffd62c4\") " pod="openshift-marketplace/redhat-operators-58jkm" Dec 11 08:18:39 crc kubenswrapper[4881]: I1211 08:18:39.433697 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w9j76\" (UniqueName: \"kubernetes.io/projected/d1f50723-ab88-4c34-b3ad-099eeffd62c4-kube-api-access-w9j76\") pod \"redhat-operators-58jkm\" (UID: \"d1f50723-ab88-4c34-b3ad-099eeffd62c4\") " pod="openshift-marketplace/redhat-operators-58jkm" Dec 11 08:18:39 crc kubenswrapper[4881]: I1211 08:18:39.435512 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d1f50723-ab88-4c34-b3ad-099eeffd62c4-utilities\") pod \"redhat-operators-58jkm\" (UID: \"d1f50723-ab88-4c34-b3ad-099eeffd62c4\") " pod="openshift-marketplace/redhat-operators-58jkm" Dec 11 08:18:39 crc kubenswrapper[4881]: I1211 08:18:39.435764 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d1f50723-ab88-4c34-b3ad-099eeffd62c4-catalog-content\") pod \"redhat-operators-58jkm\" (UID: \"d1f50723-ab88-4c34-b3ad-099eeffd62c4\") " pod="openshift-marketplace/redhat-operators-58jkm" Dec 11 08:18:39 crc kubenswrapper[4881]: I1211 08:18:39.454367 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w9j76\" (UniqueName: \"kubernetes.io/projected/d1f50723-ab88-4c34-b3ad-099eeffd62c4-kube-api-access-w9j76\") pod \"redhat-operators-58jkm\" (UID: \"d1f50723-ab88-4c34-b3ad-099eeffd62c4\") " pod="openshift-marketplace/redhat-operators-58jkm" Dec 11 08:18:39 crc kubenswrapper[4881]: I1211 08:18:39.518624 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-46vfl"] Dec 11 08:18:39 crc kubenswrapper[4881]: I1211 08:18:39.550507 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-cxnkh" Dec 11 08:18:39 crc kubenswrapper[4881]: I1211 08:18:39.550810 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-cxnkh" Dec 11 08:18:39 crc kubenswrapper[4881]: I1211 08:18:39.561122 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-cxnkh" Dec 11 08:18:39 crc kubenswrapper[4881]: I1211 08:18:39.583918 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-58jkm" Dec 11 08:18:39 crc kubenswrapper[4881]: I1211 08:18:39.930615 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ingress/router-default-5444994796-qbkd8" Dec 11 08:18:39 crc kubenswrapper[4881]: I1211 08:18:39.934344 4881 patch_prober.go:28] interesting pod/router-default-5444994796-qbkd8 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Dec 11 08:18:39 crc kubenswrapper[4881]: [-]has-synced failed: reason withheld Dec 11 08:18:39 crc kubenswrapper[4881]: [+]process-running ok Dec 11 08:18:39 crc kubenswrapper[4881]: healthz check failed Dec 11 08:18:39 crc kubenswrapper[4881]: I1211 08:18:39.934384 4881 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-qbkd8" podUID="301dbf85-d5d3-48f8-8d66-5d05a2d2d22b" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 11 08:18:40 crc kubenswrapper[4881]: I1211 08:18:40.086563 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-58jkm"] Dec 11 08:18:40 crc kubenswrapper[4881]: W1211 08:18:40.103624 4881 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd1f50723_ab88_4c34_b3ad_099eeffd62c4.slice/crio-6dee1a8184e47add6a265582f6bdfd6a8a5744e23435b7cb2995605a03990e23 WatchSource:0}: Error finding container 6dee1a8184e47add6a265582f6bdfd6a8a5744e23435b7cb2995605a03990e23: Status 404 returned error can't find the container with id 6dee1a8184e47add6a265582f6bdfd6a8a5744e23435b7cb2995605a03990e23 Dec 11 08:18:40 crc kubenswrapper[4881]: I1211 08:18:40.106904 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-46vfl" event={"ID":"bcd451b4-9860-4079-9d8a-eab5bb3365c8","Type":"ContainerStarted","Data":"365b5d344ed2fead5949ece16d9a28b945ee73667964fd51703bf7b4ec3918ea"} Dec 11 08:18:40 crc kubenswrapper[4881]: I1211 08:18:40.112949 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-cxnkh" Dec 11 08:18:40 crc kubenswrapper[4881]: I1211 08:18:40.629737 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-2n9k5" Dec 11 08:18:40 crc kubenswrapper[4881]: I1211 08:18:40.642648 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Dec 11 08:18:40 crc kubenswrapper[4881]: I1211 08:18:40.644440 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Dec 11 08:18:40 crc kubenswrapper[4881]: I1211 08:18:40.647376 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager"/"installer-sa-dockercfg-kjl2n" Dec 11 08:18:40 crc kubenswrapper[4881]: I1211 08:18:40.657136 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager"/"kube-root-ca.crt" Dec 11 08:18:40 crc kubenswrapper[4881]: I1211 08:18:40.660645 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Dec 11 08:18:40 crc kubenswrapper[4881]: I1211 08:18:40.788586 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/4fe94c00-9a92-4010-a2d3-6203e7fd43c8-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"4fe94c00-9a92-4010-a2d3-6203e7fd43c8\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Dec 11 08:18:40 crc kubenswrapper[4881]: I1211 08:18:40.788637 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/4fe94c00-9a92-4010-a2d3-6203e7fd43c8-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"4fe94c00-9a92-4010-a2d3-6203e7fd43c8\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Dec 11 08:18:40 crc kubenswrapper[4881]: I1211 08:18:40.890548 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/4fe94c00-9a92-4010-a2d3-6203e7fd43c8-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"4fe94c00-9a92-4010-a2d3-6203e7fd43c8\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Dec 11 08:18:40 crc kubenswrapper[4881]: I1211 08:18:40.890982 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/4fe94c00-9a92-4010-a2d3-6203e7fd43c8-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"4fe94c00-9a92-4010-a2d3-6203e7fd43c8\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Dec 11 08:18:40 crc kubenswrapper[4881]: I1211 08:18:40.891146 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/4fe94c00-9a92-4010-a2d3-6203e7fd43c8-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"4fe94c00-9a92-4010-a2d3-6203e7fd43c8\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Dec 11 08:18:40 crc kubenswrapper[4881]: I1211 08:18:40.912828 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/4fe94c00-9a92-4010-a2d3-6203e7fd43c8-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"4fe94c00-9a92-4010-a2d3-6203e7fd43c8\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Dec 11 08:18:40 crc kubenswrapper[4881]: I1211 08:18:40.932938 4881 patch_prober.go:28] interesting pod/router-default-5444994796-qbkd8 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Dec 11 08:18:40 crc kubenswrapper[4881]: [-]has-synced failed: reason withheld Dec 11 08:18:40 crc kubenswrapper[4881]: [+]process-running ok Dec 11 08:18:40 crc kubenswrapper[4881]: healthz check failed Dec 11 08:18:40 crc kubenswrapper[4881]: I1211 08:18:40.932992 4881 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-qbkd8" podUID="301dbf85-d5d3-48f8-8d66-5d05a2d2d22b" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 11 08:18:40 crc kubenswrapper[4881]: I1211 08:18:40.976373 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Dec 11 08:18:41 crc kubenswrapper[4881]: I1211 08:18:41.125462 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-58jkm" event={"ID":"d1f50723-ab88-4c34-b3ad-099eeffd62c4","Type":"ContainerStarted","Data":"6dee1a8184e47add6a265582f6bdfd6a8a5744e23435b7cb2995605a03990e23"} Dec 11 08:18:41 crc kubenswrapper[4881]: I1211 08:18:41.374974 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Dec 11 08:18:41 crc kubenswrapper[4881]: W1211 08:18:41.394148 4881 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-pod4fe94c00_9a92_4010_a2d3_6203e7fd43c8.slice/crio-a551d185d252afad7bd4eb8cb1a2fe7d00917d1f4857a06c21b464d7a5cf3d9d WatchSource:0}: Error finding container a551d185d252afad7bd4eb8cb1a2fe7d00917d1f4857a06c21b464d7a5cf3d9d: Status 404 returned error can't find the container with id a551d185d252afad7bd4eb8cb1a2fe7d00917d1f4857a06c21b464d7a5cf3d9d Dec 11 08:18:41 crc kubenswrapper[4881]: I1211 08:18:41.935141 4881 patch_prober.go:28] interesting pod/router-default-5444994796-qbkd8 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Dec 11 08:18:41 crc kubenswrapper[4881]: [-]has-synced failed: reason withheld Dec 11 08:18:41 crc kubenswrapper[4881]: [+]process-running ok Dec 11 08:18:41 crc kubenswrapper[4881]: healthz check failed Dec 11 08:18:41 crc kubenswrapper[4881]: I1211 08:18:41.935392 4881 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-qbkd8" podUID="301dbf85-d5d3-48f8-8d66-5d05a2d2d22b" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 11 08:18:42 crc kubenswrapper[4881]: I1211 08:18:42.138579 4881 generic.go:334] "Generic (PLEG): container finished" podID="bcd451b4-9860-4079-9d8a-eab5bb3365c8" containerID="559460f96451f5ce04aea6564d4369de1e45a11f8b2c82801e11359dd0114167" exitCode=0 Dec 11 08:18:42 crc kubenswrapper[4881]: I1211 08:18:42.138628 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-46vfl" event={"ID":"bcd451b4-9860-4079-9d8a-eab5bb3365c8","Type":"ContainerDied","Data":"559460f96451f5ce04aea6564d4369de1e45a11f8b2c82801e11359dd0114167"} Dec 11 08:18:42 crc kubenswrapper[4881]: I1211 08:18:42.140815 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"4fe94c00-9a92-4010-a2d3-6203e7fd43c8","Type":"ContainerStarted","Data":"a551d185d252afad7bd4eb8cb1a2fe7d00917d1f4857a06c21b464d7a5cf3d9d"} Dec 11 08:18:42 crc kubenswrapper[4881]: I1211 08:18:42.934605 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-ingress/router-default-5444994796-qbkd8" Dec 11 08:18:42 crc kubenswrapper[4881]: I1211 08:18:42.938614 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ingress/router-default-5444994796-qbkd8" Dec 11 08:18:43 crc kubenswrapper[4881]: I1211 08:18:43.154892 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"4fe94c00-9a92-4010-a2d3-6203e7fd43c8","Type":"ContainerStarted","Data":"d2c0589f5fce6b76192731a7a7f02c59845bde7a5f491d3735b100fc56da357c"} Dec 11 08:18:43 crc kubenswrapper[4881]: I1211 08:18:43.156517 4881 generic.go:334] "Generic (PLEG): container finished" podID="d1f50723-ab88-4c34-b3ad-099eeffd62c4" containerID="1130696a293d25015fc70a0fff74d70b650e5e4941a327c52dcd8f118c0abfc7" exitCode=0 Dec 11 08:18:43 crc kubenswrapper[4881]: I1211 08:18:43.157629 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-58jkm" event={"ID":"d1f50723-ab88-4c34-b3ad-099eeffd62c4","Type":"ContainerDied","Data":"1130696a293d25015fc70a0fff74d70b650e5e4941a327c52dcd8f118c0abfc7"} Dec 11 08:18:43 crc kubenswrapper[4881]: I1211 08:18:43.169057 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager/revision-pruner-9-crc" podStartSLOduration=3.169042091 podStartE2EDuration="3.169042091s" podCreationTimestamp="2025-12-11 08:18:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 08:18:43.168532099 +0000 UTC m=+171.545900796" watchObservedRunningTime="2025-12-11 08:18:43.169042091 +0000 UTC m=+171.546410788" Dec 11 08:18:43 crc kubenswrapper[4881]: I1211 08:18:43.228195 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/3a9b30f4-30b0-4b16-9a4a-135d2e7a9beb-metrics-certs\") pod \"network-metrics-daemon-bzslm\" (UID: \"3a9b30f4-30b0-4b16-9a4a-135d2e7a9beb\") " pod="openshift-multus/network-metrics-daemon-bzslm" Dec 11 08:18:43 crc kubenswrapper[4881]: I1211 08:18:43.234366 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/3a9b30f4-30b0-4b16-9a4a-135d2e7a9beb-metrics-certs\") pod \"network-metrics-daemon-bzslm\" (UID: \"3a9b30f4-30b0-4b16-9a4a-135d2e7a9beb\") " pod="openshift-multus/network-metrics-daemon-bzslm" Dec 11 08:18:43 crc kubenswrapper[4881]: I1211 08:18:43.398152 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bzslm" Dec 11 08:18:43 crc kubenswrapper[4881]: I1211 08:18:43.670227 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/network-metrics-daemon-bzslm"] Dec 11 08:18:43 crc kubenswrapper[4881]: W1211 08:18:43.693988 4881 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3a9b30f4_30b0_4b16_9a4a_135d2e7a9beb.slice/crio-c4ab673c07b4957b706288a4100ebfd004b354a85f4141b8393eea8946b8a235 WatchSource:0}: Error finding container c4ab673c07b4957b706288a4100ebfd004b354a85f4141b8393eea8946b8a235: Status 404 returned error can't find the container with id c4ab673c07b4957b706288a4100ebfd004b354a85f4141b8393eea8946b8a235 Dec 11 08:18:44 crc kubenswrapper[4881]: I1211 08:18:44.174446 4881 generic.go:334] "Generic (PLEG): container finished" podID="4fe94c00-9a92-4010-a2d3-6203e7fd43c8" containerID="d2c0589f5fce6b76192731a7a7f02c59845bde7a5f491d3735b100fc56da357c" exitCode=0 Dec 11 08:18:44 crc kubenswrapper[4881]: I1211 08:18:44.174503 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"4fe94c00-9a92-4010-a2d3-6203e7fd43c8","Type":"ContainerDied","Data":"d2c0589f5fce6b76192731a7a7f02c59845bde7a5f491d3735b100fc56da357c"} Dec 11 08:18:44 crc kubenswrapper[4881]: I1211 08:18:44.176924 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-bzslm" event={"ID":"3a9b30f4-30b0-4b16-9a4a-135d2e7a9beb","Type":"ContainerStarted","Data":"c4ab673c07b4957b706288a4100ebfd004b354a85f4141b8393eea8946b8a235"} Dec 11 08:18:44 crc kubenswrapper[4881]: I1211 08:18:44.256946 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Dec 11 08:18:44 crc kubenswrapper[4881]: I1211 08:18:44.257625 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Dec 11 08:18:44 crc kubenswrapper[4881]: I1211 08:18:44.262384 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver"/"kube-root-ca.crt" Dec 11 08:18:44 crc kubenswrapper[4881]: I1211 08:18:44.262386 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver"/"installer-sa-dockercfg-5pr6n" Dec 11 08:18:44 crc kubenswrapper[4881]: I1211 08:18:44.277039 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Dec 11 08:18:44 crc kubenswrapper[4881]: I1211 08:18:44.376300 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/4c99e425-8e55-42be-aff7-dc4e5199f600-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"4c99e425-8e55-42be-aff7-dc4e5199f600\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Dec 11 08:18:44 crc kubenswrapper[4881]: I1211 08:18:44.376687 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/4c99e425-8e55-42be-aff7-dc4e5199f600-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"4c99e425-8e55-42be-aff7-dc4e5199f600\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Dec 11 08:18:44 crc kubenswrapper[4881]: I1211 08:18:44.478480 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/4c99e425-8e55-42be-aff7-dc4e5199f600-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"4c99e425-8e55-42be-aff7-dc4e5199f600\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Dec 11 08:18:44 crc kubenswrapper[4881]: I1211 08:18:44.478581 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/4c99e425-8e55-42be-aff7-dc4e5199f600-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"4c99e425-8e55-42be-aff7-dc4e5199f600\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Dec 11 08:18:44 crc kubenswrapper[4881]: I1211 08:18:44.478645 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/4c99e425-8e55-42be-aff7-dc4e5199f600-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"4c99e425-8e55-42be-aff7-dc4e5199f600\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Dec 11 08:18:44 crc kubenswrapper[4881]: I1211 08:18:44.501312 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/4c99e425-8e55-42be-aff7-dc4e5199f600-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"4c99e425-8e55-42be-aff7-dc4e5199f600\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Dec 11 08:18:44 crc kubenswrapper[4881]: I1211 08:18:44.579540 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Dec 11 08:18:44 crc kubenswrapper[4881]: I1211 08:18:44.902397 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Dec 11 08:18:45 crc kubenswrapper[4881]: I1211 08:18:45.204898 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"4c99e425-8e55-42be-aff7-dc4e5199f600","Type":"ContainerStarted","Data":"b0963ac602ceefe246af8ada9a6255c758d83770248fcc0f8c7599450616a2a9"} Dec 11 08:18:45 crc kubenswrapper[4881]: I1211 08:18:45.213251 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-bzslm" event={"ID":"3a9b30f4-30b0-4b16-9a4a-135d2e7a9beb","Type":"ContainerStarted","Data":"3e48c9d0ad7413eb126530628f94e6e0111c5d5abf772ce3f700a0d554dbf7a1"} Dec 11 08:18:45 crc kubenswrapper[4881]: I1211 08:18:45.404897 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-dns/dns-default-f5gdp" Dec 11 08:18:45 crc kubenswrapper[4881]: I1211 08:18:45.435418 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Dec 11 08:18:45 crc kubenswrapper[4881]: I1211 08:18:45.517556 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/4fe94c00-9a92-4010-a2d3-6203e7fd43c8-kubelet-dir\") pod \"4fe94c00-9a92-4010-a2d3-6203e7fd43c8\" (UID: \"4fe94c00-9a92-4010-a2d3-6203e7fd43c8\") " Dec 11 08:18:45 crc kubenswrapper[4881]: I1211 08:18:45.517662 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/4fe94c00-9a92-4010-a2d3-6203e7fd43c8-kube-api-access\") pod \"4fe94c00-9a92-4010-a2d3-6203e7fd43c8\" (UID: \"4fe94c00-9a92-4010-a2d3-6203e7fd43c8\") " Dec 11 08:18:45 crc kubenswrapper[4881]: I1211 08:18:45.518806 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/4fe94c00-9a92-4010-a2d3-6203e7fd43c8-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "4fe94c00-9a92-4010-a2d3-6203e7fd43c8" (UID: "4fe94c00-9a92-4010-a2d3-6203e7fd43c8"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 11 08:18:45 crc kubenswrapper[4881]: I1211 08:18:45.530519 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4fe94c00-9a92-4010-a2d3-6203e7fd43c8-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "4fe94c00-9a92-4010-a2d3-6203e7fd43c8" (UID: "4fe94c00-9a92-4010-a2d3-6203e7fd43c8"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:18:45 crc kubenswrapper[4881]: I1211 08:18:45.619409 4881 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/4fe94c00-9a92-4010-a2d3-6203e7fd43c8-kubelet-dir\") on node \"crc\" DevicePath \"\"" Dec 11 08:18:45 crc kubenswrapper[4881]: I1211 08:18:45.619443 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/4fe94c00-9a92-4010-a2d3-6203e7fd43c8-kube-api-access\") on node \"crc\" DevicePath \"\"" Dec 11 08:18:46 crc kubenswrapper[4881]: I1211 08:18:46.234160 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"4c99e425-8e55-42be-aff7-dc4e5199f600","Type":"ContainerStarted","Data":"c7d2c291f756741d30df592eb2fffcfd59c9f96f055eddc60f9366c4dbf28e02"} Dec 11 08:18:46 crc kubenswrapper[4881]: I1211 08:18:46.238620 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"4fe94c00-9a92-4010-a2d3-6203e7fd43c8","Type":"ContainerDied","Data":"a551d185d252afad7bd4eb8cb1a2fe7d00917d1f4857a06c21b464d7a5cf3d9d"} Dec 11 08:18:46 crc kubenswrapper[4881]: I1211 08:18:46.238660 4881 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a551d185d252afad7bd4eb8cb1a2fe7d00917d1f4857a06c21b464d7a5cf3d9d" Dec 11 08:18:46 crc kubenswrapper[4881]: I1211 08:18:46.238749 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Dec 11 08:18:47 crc kubenswrapper[4881]: I1211 08:18:47.258220 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-bzslm" event={"ID":"3a9b30f4-30b0-4b16-9a4a-135d2e7a9beb","Type":"ContainerStarted","Data":"601524fe3269dc8d079cecb1f1ff8142874a15688083588dd25d4ea60a2bd205"} Dec 11 08:18:47 crc kubenswrapper[4881]: I1211 08:18:47.260353 4881 generic.go:334] "Generic (PLEG): container finished" podID="4c99e425-8e55-42be-aff7-dc4e5199f600" containerID="c7d2c291f756741d30df592eb2fffcfd59c9f96f055eddc60f9366c4dbf28e02" exitCode=0 Dec 11 08:18:47 crc kubenswrapper[4881]: I1211 08:18:47.260416 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"4c99e425-8e55-42be-aff7-dc4e5199f600","Type":"ContainerDied","Data":"c7d2c291f756741d30df592eb2fffcfd59c9f96f055eddc60f9366c4dbf28e02"} Dec 11 08:18:48 crc kubenswrapper[4881]: I1211 08:18:48.292627 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/network-metrics-daemon-bzslm" podStartSLOduration=148.292605342 podStartE2EDuration="2m28.292605342s" podCreationTimestamp="2025-12-11 08:16:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 08:18:48.292496868 +0000 UTC m=+176.669865565" watchObservedRunningTime="2025-12-11 08:18:48.292605342 +0000 UTC m=+176.669974039" Dec 11 08:18:48 crc kubenswrapper[4881]: I1211 08:18:48.478431 4881 patch_prober.go:28] interesting pod/console-f9d7485db-d6tb5 container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.12:8443/health\": dial tcp 10.217.0.12:8443: connect: connection refused" start-of-body= Dec 11 08:18:48 crc kubenswrapper[4881]: I1211 08:18:48.478490 4881 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-f9d7485db-d6tb5" podUID="09b82983-c4d5-4c1f-8f41-9dcc20fbfd03" containerName="console" probeResult="failure" output="Get \"https://10.217.0.12:8443/health\": dial tcp 10.217.0.12:8443: connect: connection refused" Dec 11 08:18:48 crc kubenswrapper[4881]: I1211 08:18:48.854379 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/downloads-7954f5f757-tzs74" Dec 11 08:18:58 crc kubenswrapper[4881]: I1211 08:18:58.485528 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-f9d7485db-d6tb5" Dec 11 08:18:58 crc kubenswrapper[4881]: I1211 08:18:58.491957 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-f9d7485db-d6tb5" Dec 11 08:18:58 crc kubenswrapper[4881]: I1211 08:18:58.548116 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-697d97f7c8-zn9v8" Dec 11 08:18:59 crc kubenswrapper[4881]: I1211 08:18:59.397409 4881 patch_prober.go:28] interesting pod/machine-config-daemon-z9nnh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 11 08:18:59 crc kubenswrapper[4881]: I1211 08:18:59.397508 4881 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 11 08:19:05 crc kubenswrapper[4881]: I1211 08:19:05.066105 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 11 08:19:10 crc kubenswrapper[4881]: I1211 08:19:10.304659 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-tgcxv" Dec 11 08:19:14 crc kubenswrapper[4881]: I1211 08:19:14.857966 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Dec 11 08:19:14 crc kubenswrapper[4881]: E1211 08:19:14.858694 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4fe94c00-9a92-4010-a2d3-6203e7fd43c8" containerName="pruner" Dec 11 08:19:14 crc kubenswrapper[4881]: I1211 08:19:14.858720 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="4fe94c00-9a92-4010-a2d3-6203e7fd43c8" containerName="pruner" Dec 11 08:19:14 crc kubenswrapper[4881]: I1211 08:19:14.858868 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="4fe94c00-9a92-4010-a2d3-6203e7fd43c8" containerName="pruner" Dec 11 08:19:14 crc kubenswrapper[4881]: I1211 08:19:14.859435 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Dec 11 08:19:14 crc kubenswrapper[4881]: I1211 08:19:14.870139 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Dec 11 08:19:15 crc kubenswrapper[4881]: I1211 08:19:15.051408 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/e4823407-b937-4bed-a881-cba0aec7d8ae-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"e4823407-b937-4bed-a881-cba0aec7d8ae\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Dec 11 08:19:15 crc kubenswrapper[4881]: I1211 08:19:15.052141 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e4823407-b937-4bed-a881-cba0aec7d8ae-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"e4823407-b937-4bed-a881-cba0aec7d8ae\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Dec 11 08:19:15 crc kubenswrapper[4881]: I1211 08:19:15.152974 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e4823407-b937-4bed-a881-cba0aec7d8ae-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"e4823407-b937-4bed-a881-cba0aec7d8ae\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Dec 11 08:19:15 crc kubenswrapper[4881]: I1211 08:19:15.153048 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/e4823407-b937-4bed-a881-cba0aec7d8ae-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"e4823407-b937-4bed-a881-cba0aec7d8ae\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Dec 11 08:19:15 crc kubenswrapper[4881]: I1211 08:19:15.153193 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/e4823407-b937-4bed-a881-cba0aec7d8ae-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"e4823407-b937-4bed-a881-cba0aec7d8ae\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Dec 11 08:19:15 crc kubenswrapper[4881]: I1211 08:19:15.526320 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e4823407-b937-4bed-a881-cba0aec7d8ae-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"e4823407-b937-4bed-a881-cba0aec7d8ae\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Dec 11 08:19:15 crc kubenswrapper[4881]: I1211 08:19:15.806659 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Dec 11 08:19:17 crc kubenswrapper[4881]: I1211 08:19:17.058806 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Dec 11 08:19:17 crc kubenswrapper[4881]: I1211 08:19:17.190908 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/4c99e425-8e55-42be-aff7-dc4e5199f600-kubelet-dir\") pod \"4c99e425-8e55-42be-aff7-dc4e5199f600\" (UID: \"4c99e425-8e55-42be-aff7-dc4e5199f600\") " Dec 11 08:19:17 crc kubenswrapper[4881]: I1211 08:19:17.190988 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/4c99e425-8e55-42be-aff7-dc4e5199f600-kube-api-access\") pod \"4c99e425-8e55-42be-aff7-dc4e5199f600\" (UID: \"4c99e425-8e55-42be-aff7-dc4e5199f600\") " Dec 11 08:19:17 crc kubenswrapper[4881]: I1211 08:19:17.191074 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/4c99e425-8e55-42be-aff7-dc4e5199f600-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "4c99e425-8e55-42be-aff7-dc4e5199f600" (UID: "4c99e425-8e55-42be-aff7-dc4e5199f600"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 11 08:19:17 crc kubenswrapper[4881]: I1211 08:19:17.191461 4881 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/4c99e425-8e55-42be-aff7-dc4e5199f600-kubelet-dir\") on node \"crc\" DevicePath \"\"" Dec 11 08:19:17 crc kubenswrapper[4881]: I1211 08:19:17.207194 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4c99e425-8e55-42be-aff7-dc4e5199f600-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "4c99e425-8e55-42be-aff7-dc4e5199f600" (UID: "4c99e425-8e55-42be-aff7-dc4e5199f600"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:19:17 crc kubenswrapper[4881]: I1211 08:19:17.292404 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/4c99e425-8e55-42be-aff7-dc4e5199f600-kube-api-access\") on node \"crc\" DevicePath \"\"" Dec 11 08:19:17 crc kubenswrapper[4881]: I1211 08:19:17.486207 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"4c99e425-8e55-42be-aff7-dc4e5199f600","Type":"ContainerDied","Data":"b0963ac602ceefe246af8ada9a6255c758d83770248fcc0f8c7599450616a2a9"} Dec 11 08:19:17 crc kubenswrapper[4881]: I1211 08:19:17.486640 4881 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b0963ac602ceefe246af8ada9a6255c758d83770248fcc0f8c7599450616a2a9" Dec 11 08:19:17 crc kubenswrapper[4881]: I1211 08:19:17.486786 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Dec 11 08:19:19 crc kubenswrapper[4881]: I1211 08:19:19.448269 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Dec 11 08:19:19 crc kubenswrapper[4881]: E1211 08:19:19.448789 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4c99e425-8e55-42be-aff7-dc4e5199f600" containerName="pruner" Dec 11 08:19:19 crc kubenswrapper[4881]: I1211 08:19:19.448802 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="4c99e425-8e55-42be-aff7-dc4e5199f600" containerName="pruner" Dec 11 08:19:19 crc kubenswrapper[4881]: I1211 08:19:19.448906 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="4c99e425-8e55-42be-aff7-dc4e5199f600" containerName="pruner" Dec 11 08:19:19 crc kubenswrapper[4881]: I1211 08:19:19.449349 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Dec 11 08:19:19 crc kubenswrapper[4881]: I1211 08:19:19.461145 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Dec 11 08:19:19 crc kubenswrapper[4881]: I1211 08:19:19.522578 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/eb7312c9-9c92-429a-8c3a-d86d8196564d-var-lock\") pod \"installer-9-crc\" (UID: \"eb7312c9-9c92-429a-8c3a-d86d8196564d\") " pod="openshift-kube-apiserver/installer-9-crc" Dec 11 08:19:19 crc kubenswrapper[4881]: I1211 08:19:19.522634 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/eb7312c9-9c92-429a-8c3a-d86d8196564d-kube-api-access\") pod \"installer-9-crc\" (UID: \"eb7312c9-9c92-429a-8c3a-d86d8196564d\") " pod="openshift-kube-apiserver/installer-9-crc" Dec 11 08:19:19 crc kubenswrapper[4881]: I1211 08:19:19.522869 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/eb7312c9-9c92-429a-8c3a-d86d8196564d-kubelet-dir\") pod \"installer-9-crc\" (UID: \"eb7312c9-9c92-429a-8c3a-d86d8196564d\") " pod="openshift-kube-apiserver/installer-9-crc" Dec 11 08:19:19 crc kubenswrapper[4881]: I1211 08:19:19.623902 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/eb7312c9-9c92-429a-8c3a-d86d8196564d-kubelet-dir\") pod \"installer-9-crc\" (UID: \"eb7312c9-9c92-429a-8c3a-d86d8196564d\") " pod="openshift-kube-apiserver/installer-9-crc" Dec 11 08:19:19 crc kubenswrapper[4881]: I1211 08:19:19.624018 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/eb7312c9-9c92-429a-8c3a-d86d8196564d-var-lock\") pod \"installer-9-crc\" (UID: \"eb7312c9-9c92-429a-8c3a-d86d8196564d\") " pod="openshift-kube-apiserver/installer-9-crc" Dec 11 08:19:19 crc kubenswrapper[4881]: I1211 08:19:19.624027 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/eb7312c9-9c92-429a-8c3a-d86d8196564d-kubelet-dir\") pod \"installer-9-crc\" (UID: \"eb7312c9-9c92-429a-8c3a-d86d8196564d\") " pod="openshift-kube-apiserver/installer-9-crc" Dec 11 08:19:19 crc kubenswrapper[4881]: I1211 08:19:19.624050 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/eb7312c9-9c92-429a-8c3a-d86d8196564d-kube-api-access\") pod \"installer-9-crc\" (UID: \"eb7312c9-9c92-429a-8c3a-d86d8196564d\") " pod="openshift-kube-apiserver/installer-9-crc" Dec 11 08:19:19 crc kubenswrapper[4881]: I1211 08:19:19.624103 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/eb7312c9-9c92-429a-8c3a-d86d8196564d-var-lock\") pod \"installer-9-crc\" (UID: \"eb7312c9-9c92-429a-8c3a-d86d8196564d\") " pod="openshift-kube-apiserver/installer-9-crc" Dec 11 08:19:19 crc kubenswrapper[4881]: I1211 08:19:19.648171 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/eb7312c9-9c92-429a-8c3a-d86d8196564d-kube-api-access\") pod \"installer-9-crc\" (UID: \"eb7312c9-9c92-429a-8c3a-d86d8196564d\") " pod="openshift-kube-apiserver/installer-9-crc" Dec 11 08:19:19 crc kubenswrapper[4881]: I1211 08:19:19.776747 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Dec 11 08:19:22 crc kubenswrapper[4881]: E1211 08:19:22.795689 4881 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: reading blob sha256:e9bc35478da4e272fcc5e4573ebac9535075e1f2d8c613b985ef6e3a3c0c813e: Get \"https://registry.redhat.io/v2/redhat/redhat-operator-index/blobs/sha256:e9bc35478da4e272fcc5e4573ebac9535075e1f2d8c613b985ef6e3a3c0c813e\": context canceled" image="registry.redhat.io/redhat/redhat-operator-index:v4.18" Dec 11 08:19:22 crc kubenswrapper[4881]: E1211 08:19:22.796468 4881 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-w9j76,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-operators-58jkm_openshift-marketplace(d1f50723-ab88-4c34-b3ad-099eeffd62c4): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: reading blob sha256:e9bc35478da4e272fcc5e4573ebac9535075e1f2d8c613b985ef6e3a3c0c813e: Get \"https://registry.redhat.io/v2/redhat/redhat-operator-index/blobs/sha256:e9bc35478da4e272fcc5e4573ebac9535075e1f2d8c613b985ef6e3a3c0c813e\": context canceled" logger="UnhandledError" Dec 11 08:19:22 crc kubenswrapper[4881]: E1211 08:19:22.797692 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: reading blob sha256:e9bc35478da4e272fcc5e4573ebac9535075e1f2d8c613b985ef6e3a3c0c813e: Get \\\"https://registry.redhat.io/v2/redhat/redhat-operator-index/blobs/sha256:e9bc35478da4e272fcc5e4573ebac9535075e1f2d8c613b985ef6e3a3c0c813e\\\": context canceled\"" pod="openshift-marketplace/redhat-operators-58jkm" podUID="d1f50723-ab88-4c34-b3ad-099eeffd62c4" Dec 11 08:19:23 crc kubenswrapper[4881]: E1211 08:19:23.952686 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-operators-58jkm" podUID="d1f50723-ab88-4c34-b3ad-099eeffd62c4" Dec 11 08:19:24 crc kubenswrapper[4881]: E1211 08:19:24.022286 4881 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-marketplace-index:v4.18" Dec 11 08:19:24 crc kubenswrapper[4881]: E1211 08:19:24.022477 4881 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-marketplace-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-nlkx2,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-marketplace-j8gln_openshift-marketplace(1112a12e-84b7-447f-8c3c-a2bca7fc2096): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Dec 11 08:19:24 crc kubenswrapper[4881]: E1211 08:19:24.026446 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-marketplace-j8gln" podUID="1112a12e-84b7-447f-8c3c-a2bca7fc2096" Dec 11 08:19:24 crc kubenswrapper[4881]: E1211 08:19:24.049630 4881 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-marketplace-index:v4.18" Dec 11 08:19:24 crc kubenswrapper[4881]: E1211 08:19:24.049840 4881 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-marketplace-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-lr6fl,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-marketplace-gt5pp_openshift-marketplace(054e2f3b-99d6-4586-8f79-bb4dadd92742): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Dec 11 08:19:24 crc kubenswrapper[4881]: E1211 08:19:24.051248 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-marketplace-gt5pp" podUID="054e2f3b-99d6-4586-8f79-bb4dadd92742" Dec 11 08:19:28 crc kubenswrapper[4881]: E1211 08:19:28.504102 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-marketplace-j8gln" podUID="1112a12e-84b7-447f-8c3c-a2bca7fc2096" Dec 11 08:19:28 crc kubenswrapper[4881]: E1211 08:19:28.506033 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-marketplace-gt5pp" podUID="054e2f3b-99d6-4586-8f79-bb4dadd92742" Dec 11 08:19:29 crc kubenswrapper[4881]: E1211 08:19:29.180155 4881 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/community-operator-index:v4.18" Dec 11 08:19:29 crc kubenswrapper[4881]: E1211 08:19:29.184545 4881 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/community-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-sgf8x,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod community-operators-q7zkc_openshift-marketplace(81795d8f-af22-4a16-92de-455b31623c53): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Dec 11 08:19:29 crc kubenswrapper[4881]: E1211 08:19:29.186573 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/community-operators-q7zkc" podUID="81795d8f-af22-4a16-92de-455b31623c53" Dec 11 08:19:29 crc kubenswrapper[4881]: I1211 08:19:29.397305 4881 patch_prober.go:28] interesting pod/machine-config-daemon-z9nnh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 11 08:19:29 crc kubenswrapper[4881]: I1211 08:19:29.397416 4881 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 11 08:19:29 crc kubenswrapper[4881]: I1211 08:19:29.397485 4881 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" Dec 11 08:19:29 crc kubenswrapper[4881]: I1211 08:19:29.398129 4881 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"b0321657968185975569a4bf8037ab956988b13493feec0be9c439fbe6c20707"} pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Dec 11 08:19:29 crc kubenswrapper[4881]: I1211 08:19:29.398258 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" containerName="machine-config-daemon" containerID="cri-o://b0321657968185975569a4bf8037ab956988b13493feec0be9c439fbe6c20707" gracePeriod=600 Dec 11 08:19:29 crc kubenswrapper[4881]: I1211 08:19:29.545418 4881 generic.go:334] "Generic (PLEG): container finished" podID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" containerID="b0321657968185975569a4bf8037ab956988b13493feec0be9c439fbe6c20707" exitCode=0 Dec 11 08:19:29 crc kubenswrapper[4881]: I1211 08:19:29.545487 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" event={"ID":"56d69133-af36-4cbd-af7d-3a58cc4dd8ca","Type":"ContainerDied","Data":"b0321657968185975569a4bf8037ab956988b13493feec0be9c439fbe6c20707"} Dec 11 08:19:30 crc kubenswrapper[4881]: E1211 08:19:30.844098 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"\"" pod="openshift-marketplace/community-operators-q7zkc" podUID="81795d8f-af22-4a16-92de-455b31623c53" Dec 11 08:19:30 crc kubenswrapper[4881]: E1211 08:19:30.966440 4881 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/certified-operator-index:v4.18" Dec 11 08:19:30 crc kubenswrapper[4881]: E1211 08:19:30.966620 4881 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/certified-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-rldfw,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod certified-operators-q4t54_openshift-marketplace(5433ad4c-65f7-4ec1-8a01-f358f14685c4): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Dec 11 08:19:30 crc kubenswrapper[4881]: E1211 08:19:30.967818 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/certified-operators-q4t54" podUID="5433ad4c-65f7-4ec1-8a01-f358f14685c4" Dec 11 08:19:31 crc kubenswrapper[4881]: E1211 08:19:31.075144 4881 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/certified-operator-index:v4.18" Dec 11 08:19:31 crc kubenswrapper[4881]: E1211 08:19:31.075315 4881 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/certified-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-m57fs,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod certified-operators-gpmrb_openshift-marketplace(5d19974b-baa0-46b1-b1ba-24411f62a0c0): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Dec 11 08:19:31 crc kubenswrapper[4881]: E1211 08:19:31.076824 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/certified-operators-gpmrb" podUID="5d19974b-baa0-46b1-b1ba-24411f62a0c0" Dec 11 08:19:31 crc kubenswrapper[4881]: E1211 08:19:31.307644 4881 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/community-operator-index:v4.18" Dec 11 08:19:31 crc kubenswrapper[4881]: E1211 08:19:31.307807 4881 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/community-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-fgmrn,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod community-operators-rxnhw_openshift-marketplace(2bf5f48d-6c07-40f7-8f84-3762251f1d1d): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Dec 11 08:19:31 crc kubenswrapper[4881]: E1211 08:19:31.308918 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/community-operators-rxnhw" podUID="2bf5f48d-6c07-40f7-8f84-3762251f1d1d" Dec 11 08:19:39 crc kubenswrapper[4881]: E1211 08:19:39.762473 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"\"" pod="openshift-marketplace/community-operators-rxnhw" podUID="2bf5f48d-6c07-40f7-8f84-3762251f1d1d" Dec 11 08:19:39 crc kubenswrapper[4881]: E1211 08:19:39.762498 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"\"" pod="openshift-marketplace/certified-operators-q4t54" podUID="5433ad4c-65f7-4ec1-8a01-f358f14685c4" Dec 11 08:19:39 crc kubenswrapper[4881]: E1211 08:19:39.762553 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"\"" pod="openshift-marketplace/certified-operators-gpmrb" podUID="5d19974b-baa0-46b1-b1ba-24411f62a0c0" Dec 11 08:19:39 crc kubenswrapper[4881]: E1211 08:19:39.854187 4881 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-operator-index:v4.18" Dec 11 08:19:39 crc kubenswrapper[4881]: E1211 08:19:39.854757 4881 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-bxp8c,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-operators-46vfl_openshift-marketplace(bcd451b4-9860-4079-9d8a-eab5bb3365c8): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Dec 11 08:19:39 crc kubenswrapper[4881]: E1211 08:19:39.855980 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-operators-46vfl" podUID="bcd451b4-9860-4079-9d8a-eab5bb3365c8" Dec 11 08:19:40 crc kubenswrapper[4881]: I1211 08:19:40.008070 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Dec 11 08:19:40 crc kubenswrapper[4881]: I1211 08:19:40.040959 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Dec 11 08:19:40 crc kubenswrapper[4881]: I1211 08:19:40.602031 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"e4823407-b937-4bed-a881-cba0aec7d8ae","Type":"ContainerStarted","Data":"75383caa860893f4214ce0e52d347f7ee9505667622c58e6d95e13654d78d9b9"} Dec 11 08:19:40 crc kubenswrapper[4881]: I1211 08:19:40.604612 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"eb7312c9-9c92-429a-8c3a-d86d8196564d","Type":"ContainerStarted","Data":"47a279f89b32d5b2b734c086078cb938e46714f9c62a33a26c3b3de4bbaa2b9b"} Dec 11 08:19:41 crc kubenswrapper[4881]: I1211 08:19:41.610688 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"eb7312c9-9c92-429a-8c3a-d86d8196564d","Type":"ContainerStarted","Data":"1b96acdc1951244f1de31b564e9731ad7eccd3629f5a5bf859f2715d38e919be"} Dec 11 08:19:41 crc kubenswrapper[4881]: I1211 08:19:41.612042 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"e4823407-b937-4bed-a881-cba0aec7d8ae","Type":"ContainerStarted","Data":"64b5c99b1412dc5eba8516aad7287aeaf540f0272491e313b8b23c4a1197236a"} Dec 11 08:19:41 crc kubenswrapper[4881]: I1211 08:19:41.621846 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" event={"ID":"56d69133-af36-4cbd-af7d-3a58cc4dd8ca","Type":"ContainerStarted","Data":"d7ca78f14154a7019c587a5f29a4086b385a480f4f657464057274ffa0a054ec"} Dec 11 08:19:41 crc kubenswrapper[4881]: I1211 08:19:41.639695 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/revision-pruner-9-crc" podStartSLOduration=27.639653638 podStartE2EDuration="27.639653638s" podCreationTimestamp="2025-12-11 08:19:14 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 08:19:41.633803745 +0000 UTC m=+230.011172442" watchObservedRunningTime="2025-12-11 08:19:41.639653638 +0000 UTC m=+230.017022335" Dec 11 08:19:42 crc kubenswrapper[4881]: I1211 08:19:42.640781 4881 generic.go:334] "Generic (PLEG): container finished" podID="e4823407-b937-4bed-a881-cba0aec7d8ae" containerID="64b5c99b1412dc5eba8516aad7287aeaf540f0272491e313b8b23c4a1197236a" exitCode=0 Dec 11 08:19:42 crc kubenswrapper[4881]: I1211 08:19:42.640858 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"e4823407-b937-4bed-a881-cba0aec7d8ae","Type":"ContainerDied","Data":"64b5c99b1412dc5eba8516aad7287aeaf540f0272491e313b8b23c4a1197236a"} Dec 11 08:19:42 crc kubenswrapper[4881]: I1211 08:19:42.673295 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/installer-9-crc" podStartSLOduration=23.673277562 podStartE2EDuration="23.673277562s" podCreationTimestamp="2025-12-11 08:19:19 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 08:19:42.672952434 +0000 UTC m=+231.050321141" watchObservedRunningTime="2025-12-11 08:19:42.673277562 +0000 UTC m=+231.050646259" Dec 11 08:19:43 crc kubenswrapper[4881]: I1211 08:19:43.649391 4881 generic.go:334] "Generic (PLEG): container finished" podID="d1f50723-ab88-4c34-b3ad-099eeffd62c4" containerID="9490ecdcae64d37198f4b34b6c09acc48cc5fdc04dd712c79de664985be1a9bc" exitCode=0 Dec 11 08:19:43 crc kubenswrapper[4881]: I1211 08:19:43.649481 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-58jkm" event={"ID":"d1f50723-ab88-4c34-b3ad-099eeffd62c4","Type":"ContainerDied","Data":"9490ecdcae64d37198f4b34b6c09acc48cc5fdc04dd712c79de664985be1a9bc"} Dec 11 08:19:43 crc kubenswrapper[4881]: I1211 08:19:43.874529 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Dec 11 08:19:43 crc kubenswrapper[4881]: I1211 08:19:43.962473 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/e4823407-b937-4bed-a881-cba0aec7d8ae-kubelet-dir\") pod \"e4823407-b937-4bed-a881-cba0aec7d8ae\" (UID: \"e4823407-b937-4bed-a881-cba0aec7d8ae\") " Dec 11 08:19:43 crc kubenswrapper[4881]: I1211 08:19:43.962561 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e4823407-b937-4bed-a881-cba0aec7d8ae-kube-api-access\") pod \"e4823407-b937-4bed-a881-cba0aec7d8ae\" (UID: \"e4823407-b937-4bed-a881-cba0aec7d8ae\") " Dec 11 08:19:43 crc kubenswrapper[4881]: I1211 08:19:43.962613 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/e4823407-b937-4bed-a881-cba0aec7d8ae-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "e4823407-b937-4bed-a881-cba0aec7d8ae" (UID: "e4823407-b937-4bed-a881-cba0aec7d8ae"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 11 08:19:43 crc kubenswrapper[4881]: I1211 08:19:43.962839 4881 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/e4823407-b937-4bed-a881-cba0aec7d8ae-kubelet-dir\") on node \"crc\" DevicePath \"\"" Dec 11 08:19:43 crc kubenswrapper[4881]: I1211 08:19:43.969377 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e4823407-b937-4bed-a881-cba0aec7d8ae-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "e4823407-b937-4bed-a881-cba0aec7d8ae" (UID: "e4823407-b937-4bed-a881-cba0aec7d8ae"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:19:44 crc kubenswrapper[4881]: I1211 08:19:44.064821 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e4823407-b937-4bed-a881-cba0aec7d8ae-kube-api-access\") on node \"crc\" DevicePath \"\"" Dec 11 08:19:44 crc kubenswrapper[4881]: I1211 08:19:44.656596 4881 generic.go:334] "Generic (PLEG): container finished" podID="054e2f3b-99d6-4586-8f79-bb4dadd92742" containerID="7520f203c7ba410ef2e5667c20cfd7e0fd58b0ff7810d85055ad5e964a38deb0" exitCode=0 Dec 11 08:19:44 crc kubenswrapper[4881]: I1211 08:19:44.656682 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-gt5pp" event={"ID":"054e2f3b-99d6-4586-8f79-bb4dadd92742","Type":"ContainerDied","Data":"7520f203c7ba410ef2e5667c20cfd7e0fd58b0ff7810d85055ad5e964a38deb0"} Dec 11 08:19:44 crc kubenswrapper[4881]: I1211 08:19:44.659690 4881 generic.go:334] "Generic (PLEG): container finished" podID="1112a12e-84b7-447f-8c3c-a2bca7fc2096" containerID="897600ec153ed64a5e11c3b0840f94560c724714a837d40e34ee3994756316e6" exitCode=0 Dec 11 08:19:44 crc kubenswrapper[4881]: I1211 08:19:44.659764 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-j8gln" event={"ID":"1112a12e-84b7-447f-8c3c-a2bca7fc2096","Type":"ContainerDied","Data":"897600ec153ed64a5e11c3b0840f94560c724714a837d40e34ee3994756316e6"} Dec 11 08:19:44 crc kubenswrapper[4881]: I1211 08:19:44.661128 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"e4823407-b937-4bed-a881-cba0aec7d8ae","Type":"ContainerDied","Data":"75383caa860893f4214ce0e52d347f7ee9505667622c58e6d95e13654d78d9b9"} Dec 11 08:19:44 crc kubenswrapper[4881]: I1211 08:19:44.661152 4881 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="75383caa860893f4214ce0e52d347f7ee9505667622c58e6d95e13654d78d9b9" Dec 11 08:19:44 crc kubenswrapper[4881]: I1211 08:19:44.661203 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Dec 11 08:19:44 crc kubenswrapper[4881]: I1211 08:19:44.665087 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-58jkm" event={"ID":"d1f50723-ab88-4c34-b3ad-099eeffd62c4","Type":"ContainerStarted","Data":"5321d6fd3b8a976d89b579ce3fd92b3587e1a4ba0238f3077c4413a9f40bb666"} Dec 11 08:19:44 crc kubenswrapper[4881]: I1211 08:19:44.714085 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-58jkm" podStartSLOduration=4.44856156 podStartE2EDuration="1m5.714062824s" podCreationTimestamp="2025-12-11 08:18:39 +0000 UTC" firstStartedPulling="2025-12-11 08:18:43.160381917 +0000 UTC m=+171.537750614" lastFinishedPulling="2025-12-11 08:19:44.425883161 +0000 UTC m=+232.803251878" observedRunningTime="2025-12-11 08:19:44.711717207 +0000 UTC m=+233.089085924" watchObservedRunningTime="2025-12-11 08:19:44.714062824 +0000 UTC m=+233.091431531" Dec 11 08:19:45 crc kubenswrapper[4881]: I1211 08:19:45.671771 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-j8gln" event={"ID":"1112a12e-84b7-447f-8c3c-a2bca7fc2096","Type":"ContainerStarted","Data":"6bfe59659d969cfe52cd90134c4e6d7ae4e90d60d62672738636e658a6e32f00"} Dec 11 08:19:45 crc kubenswrapper[4881]: I1211 08:19:45.689664 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-j8gln" podStartSLOduration=2.4996212890000002 podStartE2EDuration="1m8.689643821s" podCreationTimestamp="2025-12-11 08:18:37 +0000 UTC" firstStartedPulling="2025-12-11 08:18:39.099280739 +0000 UTC m=+167.476649436" lastFinishedPulling="2025-12-11 08:19:45.289303271 +0000 UTC m=+233.666671968" observedRunningTime="2025-12-11 08:19:45.687694434 +0000 UTC m=+234.065063141" watchObservedRunningTime="2025-12-11 08:19:45.689643821 +0000 UTC m=+234.067012518" Dec 11 08:19:46 crc kubenswrapper[4881]: I1211 08:19:46.678470 4881 generic.go:334] "Generic (PLEG): container finished" podID="81795d8f-af22-4a16-92de-455b31623c53" containerID="5eedd1abe3c37534822d496c3350db07cbfe97b55dff295a6cd3ee6ac8282e38" exitCode=0 Dec 11 08:19:46 crc kubenswrapper[4881]: I1211 08:19:46.678571 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-q7zkc" event={"ID":"81795d8f-af22-4a16-92de-455b31623c53","Type":"ContainerDied","Data":"5eedd1abe3c37534822d496c3350db07cbfe97b55dff295a6cd3ee6ac8282e38"} Dec 11 08:19:46 crc kubenswrapper[4881]: I1211 08:19:46.681876 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-gt5pp" event={"ID":"054e2f3b-99d6-4586-8f79-bb4dadd92742","Type":"ContainerStarted","Data":"336e626f3736a7b89a769f26b44fd9bc0b3955de6aea3fdaae2ed83a6827543c"} Dec 11 08:19:46 crc kubenswrapper[4881]: I1211 08:19:46.723658 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-gt5pp" podStartSLOduration=3.034361677 podStartE2EDuration="1m9.723640174s" podCreationTimestamp="2025-12-11 08:18:37 +0000 UTC" firstStartedPulling="2025-12-11 08:18:39.059197139 +0000 UTC m=+167.436565836" lastFinishedPulling="2025-12-11 08:19:45.748475646 +0000 UTC m=+234.125844333" observedRunningTime="2025-12-11 08:19:46.7226503 +0000 UTC m=+235.100018997" watchObservedRunningTime="2025-12-11 08:19:46.723640174 +0000 UTC m=+235.101008871" Dec 11 08:19:47 crc kubenswrapper[4881]: I1211 08:19:47.696516 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-q7zkc" event={"ID":"81795d8f-af22-4a16-92de-455b31623c53","Type":"ContainerStarted","Data":"d86991f7dff9ca353a54d0670f26a4712db17ebcc98fac2546a845b01b0d7fa7"} Dec 11 08:19:47 crc kubenswrapper[4881]: I1211 08:19:47.711621 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-q7zkc" podStartSLOduration=4.350297857 podStartE2EDuration="1m12.711601843s" podCreationTimestamp="2025-12-11 08:18:35 +0000 UTC" firstStartedPulling="2025-12-11 08:18:39.079347956 +0000 UTC m=+167.456716653" lastFinishedPulling="2025-12-11 08:19:47.440651942 +0000 UTC m=+235.818020639" observedRunningTime="2025-12-11 08:19:47.711240975 +0000 UTC m=+236.088609692" watchObservedRunningTime="2025-12-11 08:19:47.711601843 +0000 UTC m=+236.088970540" Dec 11 08:19:48 crc kubenswrapper[4881]: I1211 08:19:48.066153 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-j8gln" Dec 11 08:19:48 crc kubenswrapper[4881]: I1211 08:19:48.066312 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-j8gln" Dec 11 08:19:48 crc kubenswrapper[4881]: I1211 08:19:48.154697 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-gt5pp" Dec 11 08:19:48 crc kubenswrapper[4881]: I1211 08:19:48.154763 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-gt5pp" Dec 11 08:19:48 crc kubenswrapper[4881]: I1211 08:19:48.165776 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-j8gln" Dec 11 08:19:48 crc kubenswrapper[4881]: I1211 08:19:48.193993 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-gt5pp" Dec 11 08:19:49 crc kubenswrapper[4881]: I1211 08:19:49.584724 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-58jkm" Dec 11 08:19:49 crc kubenswrapper[4881]: I1211 08:19:49.585084 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-58jkm" Dec 11 08:19:50 crc kubenswrapper[4881]: I1211 08:19:50.622453 4881 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-58jkm" podUID="d1f50723-ab88-4c34-b3ad-099eeffd62c4" containerName="registry-server" probeResult="failure" output=< Dec 11 08:19:50 crc kubenswrapper[4881]: timeout: failed to connect service ":50051" within 1s Dec 11 08:19:50 crc kubenswrapper[4881]: > Dec 11 08:19:56 crc kubenswrapper[4881]: I1211 08:19:56.100726 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-q7zkc" Dec 11 08:19:56 crc kubenswrapper[4881]: I1211 08:19:56.101551 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-q7zkc" Dec 11 08:19:56 crc kubenswrapper[4881]: I1211 08:19:56.144631 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-q7zkc" Dec 11 08:19:57 crc kubenswrapper[4881]: I1211 08:19:57.794234 4881 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/community-operators-q7zkc" podUID="81795d8f-af22-4a16-92de-455b31623c53" containerName="registry-server" probeResult="failure" output=< Dec 11 08:19:57 crc kubenswrapper[4881]: timeout: failed to connect service ":50051" within 1s Dec 11 08:19:57 crc kubenswrapper[4881]: > Dec 11 08:19:58 crc kubenswrapper[4881]: I1211 08:19:58.109539 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-j8gln" Dec 11 08:19:58 crc kubenswrapper[4881]: I1211 08:19:58.950668 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-gt5pp" Dec 11 08:19:58 crc kubenswrapper[4881]: I1211 08:19:58.999136 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-gt5pp"] Dec 11 08:19:59 crc kubenswrapper[4881]: I1211 08:19:59.639932 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-58jkm" Dec 11 08:19:59 crc kubenswrapper[4881]: I1211 08:19:59.681931 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-58jkm" Dec 11 08:19:59 crc kubenswrapper[4881]: I1211 08:19:59.762939 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-gt5pp" podUID="054e2f3b-99d6-4586-8f79-bb4dadd92742" containerName="registry-server" containerID="cri-o://336e626f3736a7b89a769f26b44fd9bc0b3955de6aea3fdaae2ed83a6827543c" gracePeriod=2 Dec 11 08:20:00 crc kubenswrapper[4881]: I1211 08:20:00.740676 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-58jkm"] Dec 11 08:20:00 crc kubenswrapper[4881]: I1211 08:20:00.768427 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-58jkm" podUID="d1f50723-ab88-4c34-b3ad-099eeffd62c4" containerName="registry-server" containerID="cri-o://5321d6fd3b8a976d89b579ce3fd92b3587e1a4ba0238f3077c4413a9f40bb666" gracePeriod=2 Dec 11 08:20:01 crc kubenswrapper[4881]: I1211 08:20:01.776835 4881 generic.go:334] "Generic (PLEG): container finished" podID="054e2f3b-99d6-4586-8f79-bb4dadd92742" containerID="336e626f3736a7b89a769f26b44fd9bc0b3955de6aea3fdaae2ed83a6827543c" exitCode=0 Dec 11 08:20:01 crc kubenswrapper[4881]: I1211 08:20:01.776902 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-gt5pp" event={"ID":"054e2f3b-99d6-4586-8f79-bb4dadd92742","Type":"ContainerDied","Data":"336e626f3736a7b89a769f26b44fd9bc0b3955de6aea3fdaae2ed83a6827543c"} Dec 11 08:20:02 crc kubenswrapper[4881]: I1211 08:20:02.783479 4881 generic.go:334] "Generic (PLEG): container finished" podID="d1f50723-ab88-4c34-b3ad-099eeffd62c4" containerID="5321d6fd3b8a976d89b579ce3fd92b3587e1a4ba0238f3077c4413a9f40bb666" exitCode=0 Dec 11 08:20:02 crc kubenswrapper[4881]: I1211 08:20:02.783766 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-58jkm" event={"ID":"d1f50723-ab88-4c34-b3ad-099eeffd62c4","Type":"ContainerDied","Data":"5321d6fd3b8a976d89b579ce3fd92b3587e1a4ba0238f3077c4413a9f40bb666"} Dec 11 08:20:02 crc kubenswrapper[4881]: I1211 08:20:02.810708 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-79b8l"] Dec 11 08:20:04 crc kubenswrapper[4881]: I1211 08:20:04.737234 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-gpmrb"] Dec 11 08:20:04 crc kubenswrapper[4881]: I1211 08:20:04.752396 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-q4t54"] Dec 11 08:20:04 crc kubenswrapper[4881]: I1211 08:20:04.756767 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-q7zkc"] Dec 11 08:20:04 crc kubenswrapper[4881]: I1211 08:20:04.757318 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-q7zkc" podUID="81795d8f-af22-4a16-92de-455b31623c53" containerName="registry-server" containerID="cri-o://d86991f7dff9ca353a54d0670f26a4712db17ebcc98fac2546a845b01b0d7fa7" gracePeriod=30 Dec 11 08:20:04 crc kubenswrapper[4881]: I1211 08:20:04.760120 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-rxnhw"] Dec 11 08:20:04 crc kubenswrapper[4881]: E1211 08:20:04.765545 4881 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="d86991f7dff9ca353a54d0670f26a4712db17ebcc98fac2546a845b01b0d7fa7" cmd=["grpc_health_probe","-addr=:50051"] Dec 11 08:20:04 crc kubenswrapper[4881]: I1211 08:20:04.765711 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-2n9k5"] Dec 11 08:20:04 crc kubenswrapper[4881]: I1211 08:20:04.765905 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/marketplace-operator-79b997595-2n9k5" podUID="8dc5d9d6-b64d-494d-a6e6-917ed40c01ae" containerName="marketplace-operator" containerID="cri-o://ccde261349a9cdfe045229c11857866097d56efdcf65bf7a367ca36cc7d89aee" gracePeriod=30 Dec 11 08:20:04 crc kubenswrapper[4881]: E1211 08:20:04.771288 4881 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="d86991f7dff9ca353a54d0670f26a4712db17ebcc98fac2546a845b01b0d7fa7" cmd=["grpc_health_probe","-addr=:50051"] Dec 11 08:20:04 crc kubenswrapper[4881]: I1211 08:20:04.772911 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-j8gln"] Dec 11 08:20:04 crc kubenswrapper[4881]: I1211 08:20:04.773199 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-j8gln" podUID="1112a12e-84b7-447f-8c3c-a2bca7fc2096" containerName="registry-server" containerID="cri-o://6bfe59659d969cfe52cd90134c4e6d7ae4e90d60d62672738636e658a6e32f00" gracePeriod=30 Dec 11 08:20:04 crc kubenswrapper[4881]: E1211 08:20:04.774941 4881 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="d86991f7dff9ca353a54d0670f26a4712db17ebcc98fac2546a845b01b0d7fa7" cmd=["grpc_health_probe","-addr=:50051"] Dec 11 08:20:04 crc kubenswrapper[4881]: E1211 08:20:04.775002 4881 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openshift-marketplace/community-operators-q7zkc" podUID="81795d8f-af22-4a16-92de-455b31623c53" containerName="registry-server" Dec 11 08:20:04 crc kubenswrapper[4881]: I1211 08:20:04.776738 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-tbhw6"] Dec 11 08:20:04 crc kubenswrapper[4881]: E1211 08:20:04.777173 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e4823407-b937-4bed-a881-cba0aec7d8ae" containerName="pruner" Dec 11 08:20:04 crc kubenswrapper[4881]: I1211 08:20:04.777209 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="e4823407-b937-4bed-a881-cba0aec7d8ae" containerName="pruner" Dec 11 08:20:04 crc kubenswrapper[4881]: I1211 08:20:04.777389 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="e4823407-b937-4bed-a881-cba0aec7d8ae" containerName="pruner" Dec 11 08:20:04 crc kubenswrapper[4881]: I1211 08:20:04.777932 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-tbhw6" Dec 11 08:20:04 crc kubenswrapper[4881]: I1211 08:20:04.779994 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-46vfl"] Dec 11 08:20:04 crc kubenswrapper[4881]: I1211 08:20:04.786371 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-tbhw6"] Dec 11 08:20:04 crc kubenswrapper[4881]: I1211 08:20:04.859163 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a98dd8bc-f2be-42b3-94c9-5eb725bfb1bf-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-tbhw6\" (UID: \"a98dd8bc-f2be-42b3-94c9-5eb725bfb1bf\") " pod="openshift-marketplace/marketplace-operator-79b997595-tbhw6" Dec 11 08:20:04 crc kubenswrapper[4881]: I1211 08:20:04.859225 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/a98dd8bc-f2be-42b3-94c9-5eb725bfb1bf-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-tbhw6\" (UID: \"a98dd8bc-f2be-42b3-94c9-5eb725bfb1bf\") " pod="openshift-marketplace/marketplace-operator-79b997595-tbhw6" Dec 11 08:20:04 crc kubenswrapper[4881]: I1211 08:20:04.859297 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vx8xf\" (UniqueName: \"kubernetes.io/projected/a98dd8bc-f2be-42b3-94c9-5eb725bfb1bf-kube-api-access-vx8xf\") pod \"marketplace-operator-79b997595-tbhw6\" (UID: \"a98dd8bc-f2be-42b3-94c9-5eb725bfb1bf\") " pod="openshift-marketplace/marketplace-operator-79b997595-tbhw6" Dec 11 08:20:04 crc kubenswrapper[4881]: I1211 08:20:04.960548 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a98dd8bc-f2be-42b3-94c9-5eb725bfb1bf-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-tbhw6\" (UID: \"a98dd8bc-f2be-42b3-94c9-5eb725bfb1bf\") " pod="openshift-marketplace/marketplace-operator-79b997595-tbhw6" Dec 11 08:20:04 crc kubenswrapper[4881]: I1211 08:20:04.960642 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/a98dd8bc-f2be-42b3-94c9-5eb725bfb1bf-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-tbhw6\" (UID: \"a98dd8bc-f2be-42b3-94c9-5eb725bfb1bf\") " pod="openshift-marketplace/marketplace-operator-79b997595-tbhw6" Dec 11 08:20:04 crc kubenswrapper[4881]: I1211 08:20:04.960766 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vx8xf\" (UniqueName: \"kubernetes.io/projected/a98dd8bc-f2be-42b3-94c9-5eb725bfb1bf-kube-api-access-vx8xf\") pod \"marketplace-operator-79b997595-tbhw6\" (UID: \"a98dd8bc-f2be-42b3-94c9-5eb725bfb1bf\") " pod="openshift-marketplace/marketplace-operator-79b997595-tbhw6" Dec 11 08:20:04 crc kubenswrapper[4881]: I1211 08:20:04.962955 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a98dd8bc-f2be-42b3-94c9-5eb725bfb1bf-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-tbhw6\" (UID: \"a98dd8bc-f2be-42b3-94c9-5eb725bfb1bf\") " pod="openshift-marketplace/marketplace-operator-79b997595-tbhw6" Dec 11 08:20:04 crc kubenswrapper[4881]: I1211 08:20:04.973832 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/a98dd8bc-f2be-42b3-94c9-5eb725bfb1bf-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-tbhw6\" (UID: \"a98dd8bc-f2be-42b3-94c9-5eb725bfb1bf\") " pod="openshift-marketplace/marketplace-operator-79b997595-tbhw6" Dec 11 08:20:04 crc kubenswrapper[4881]: I1211 08:20:04.980640 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vx8xf\" (UniqueName: \"kubernetes.io/projected/a98dd8bc-f2be-42b3-94c9-5eb725bfb1bf-kube-api-access-vx8xf\") pod \"marketplace-operator-79b997595-tbhw6\" (UID: \"a98dd8bc-f2be-42b3-94c9-5eb725bfb1bf\") " pod="openshift-marketplace/marketplace-operator-79b997595-tbhw6" Dec 11 08:20:05 crc kubenswrapper[4881]: I1211 08:20:05.123655 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-tbhw6" Dec 11 08:20:05 crc kubenswrapper[4881]: I1211 08:20:05.809314 4881 generic.go:334] "Generic (PLEG): container finished" podID="81795d8f-af22-4a16-92de-455b31623c53" containerID="d86991f7dff9ca353a54d0670f26a4712db17ebcc98fac2546a845b01b0d7fa7" exitCode=0 Dec 11 08:20:05 crc kubenswrapper[4881]: I1211 08:20:05.809430 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-q7zkc" event={"ID":"81795d8f-af22-4a16-92de-455b31623c53","Type":"ContainerDied","Data":"d86991f7dff9ca353a54d0670f26a4712db17ebcc98fac2546a845b01b0d7fa7"} Dec 11 08:20:05 crc kubenswrapper[4881]: I1211 08:20:05.811587 4881 generic.go:334] "Generic (PLEG): container finished" podID="8dc5d9d6-b64d-494d-a6e6-917ed40c01ae" containerID="ccde261349a9cdfe045229c11857866097d56efdcf65bf7a367ca36cc7d89aee" exitCode=0 Dec 11 08:20:05 crc kubenswrapper[4881]: I1211 08:20:05.811688 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-2n9k5" event={"ID":"8dc5d9d6-b64d-494d-a6e6-917ed40c01ae","Type":"ContainerDied","Data":"ccde261349a9cdfe045229c11857866097d56efdcf65bf7a367ca36cc7d89aee"} Dec 11 08:20:05 crc kubenswrapper[4881]: I1211 08:20:05.815488 4881 generic.go:334] "Generic (PLEG): container finished" podID="1112a12e-84b7-447f-8c3c-a2bca7fc2096" containerID="6bfe59659d969cfe52cd90134c4e6d7ae4e90d60d62672738636e658a6e32f00" exitCode=0 Dec 11 08:20:05 crc kubenswrapper[4881]: I1211 08:20:05.815549 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-j8gln" event={"ID":"1112a12e-84b7-447f-8c3c-a2bca7fc2096","Type":"ContainerDied","Data":"6bfe59659d969cfe52cd90134c4e6d7ae4e90d60d62672738636e658a6e32f00"} Dec 11 08:20:06 crc kubenswrapper[4881]: E1211 08:20:06.101886 4881 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of d86991f7dff9ca353a54d0670f26a4712db17ebcc98fac2546a845b01b0d7fa7 is running failed: container process not found" containerID="d86991f7dff9ca353a54d0670f26a4712db17ebcc98fac2546a845b01b0d7fa7" cmd=["grpc_health_probe","-addr=:50051"] Dec 11 08:20:06 crc kubenswrapper[4881]: E1211 08:20:06.102522 4881 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of d86991f7dff9ca353a54d0670f26a4712db17ebcc98fac2546a845b01b0d7fa7 is running failed: container process not found" containerID="d86991f7dff9ca353a54d0670f26a4712db17ebcc98fac2546a845b01b0d7fa7" cmd=["grpc_health_probe","-addr=:50051"] Dec 11 08:20:06 crc kubenswrapper[4881]: E1211 08:20:06.103038 4881 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of d86991f7dff9ca353a54d0670f26a4712db17ebcc98fac2546a845b01b0d7fa7 is running failed: container process not found" containerID="d86991f7dff9ca353a54d0670f26a4712db17ebcc98fac2546a845b01b0d7fa7" cmd=["grpc_health_probe","-addr=:50051"] Dec 11 08:20:06 crc kubenswrapper[4881]: E1211 08:20:06.103113 4881 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of d86991f7dff9ca353a54d0670f26a4712db17ebcc98fac2546a845b01b0d7fa7 is running failed: container process not found" probeType="Readiness" pod="openshift-marketplace/community-operators-q7zkc" podUID="81795d8f-af22-4a16-92de-455b31623c53" containerName="registry-server" Dec 11 08:20:08 crc kubenswrapper[4881]: E1211 08:20:08.067084 4881 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 6bfe59659d969cfe52cd90134c4e6d7ae4e90d60d62672738636e658a6e32f00 is running failed: container process not found" containerID="6bfe59659d969cfe52cd90134c4e6d7ae4e90d60d62672738636e658a6e32f00" cmd=["grpc_health_probe","-addr=:50051"] Dec 11 08:20:08 crc kubenswrapper[4881]: E1211 08:20:08.067951 4881 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 6bfe59659d969cfe52cd90134c4e6d7ae4e90d60d62672738636e658a6e32f00 is running failed: container process not found" containerID="6bfe59659d969cfe52cd90134c4e6d7ae4e90d60d62672738636e658a6e32f00" cmd=["grpc_health_probe","-addr=:50051"] Dec 11 08:20:08 crc kubenswrapper[4881]: E1211 08:20:08.068403 4881 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 6bfe59659d969cfe52cd90134c4e6d7ae4e90d60d62672738636e658a6e32f00 is running failed: container process not found" containerID="6bfe59659d969cfe52cd90134c4e6d7ae4e90d60d62672738636e658a6e32f00" cmd=["grpc_health_probe","-addr=:50051"] Dec 11 08:20:08 crc kubenswrapper[4881]: E1211 08:20:08.068492 4881 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 6bfe59659d969cfe52cd90134c4e6d7ae4e90d60d62672738636e658a6e32f00 is running failed: container process not found" probeType="Readiness" pod="openshift-marketplace/redhat-marketplace-j8gln" podUID="1112a12e-84b7-447f-8c3c-a2bca7fc2096" containerName="registry-server" Dec 11 08:20:08 crc kubenswrapper[4881]: E1211 08:20:08.156156 4881 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 336e626f3736a7b89a769f26b44fd9bc0b3955de6aea3fdaae2ed83a6827543c is running failed: container process not found" containerID="336e626f3736a7b89a769f26b44fd9bc0b3955de6aea3fdaae2ed83a6827543c" cmd=["grpc_health_probe","-addr=:50051"] Dec 11 08:20:08 crc kubenswrapper[4881]: E1211 08:20:08.156584 4881 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 336e626f3736a7b89a769f26b44fd9bc0b3955de6aea3fdaae2ed83a6827543c is running failed: container process not found" containerID="336e626f3736a7b89a769f26b44fd9bc0b3955de6aea3fdaae2ed83a6827543c" cmd=["grpc_health_probe","-addr=:50051"] Dec 11 08:20:08 crc kubenswrapper[4881]: E1211 08:20:08.157086 4881 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 336e626f3736a7b89a769f26b44fd9bc0b3955de6aea3fdaae2ed83a6827543c is running failed: container process not found" containerID="336e626f3736a7b89a769f26b44fd9bc0b3955de6aea3fdaae2ed83a6827543c" cmd=["grpc_health_probe","-addr=:50051"] Dec 11 08:20:08 crc kubenswrapper[4881]: E1211 08:20:08.157127 4881 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 336e626f3736a7b89a769f26b44fd9bc0b3955de6aea3fdaae2ed83a6827543c is running failed: container process not found" probeType="Readiness" pod="openshift-marketplace/redhat-marketplace-gt5pp" podUID="054e2f3b-99d6-4586-8f79-bb4dadd92742" containerName="registry-server" Dec 11 08:20:08 crc kubenswrapper[4881]: I1211 08:20:08.379565 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-gt5pp" Dec 11 08:20:08 crc kubenswrapper[4881]: I1211 08:20:08.505549 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lr6fl\" (UniqueName: \"kubernetes.io/projected/054e2f3b-99d6-4586-8f79-bb4dadd92742-kube-api-access-lr6fl\") pod \"054e2f3b-99d6-4586-8f79-bb4dadd92742\" (UID: \"054e2f3b-99d6-4586-8f79-bb4dadd92742\") " Dec 11 08:20:08 crc kubenswrapper[4881]: I1211 08:20:08.505929 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/054e2f3b-99d6-4586-8f79-bb4dadd92742-catalog-content\") pod \"054e2f3b-99d6-4586-8f79-bb4dadd92742\" (UID: \"054e2f3b-99d6-4586-8f79-bb4dadd92742\") " Dec 11 08:20:08 crc kubenswrapper[4881]: I1211 08:20:08.506095 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/054e2f3b-99d6-4586-8f79-bb4dadd92742-utilities\") pod \"054e2f3b-99d6-4586-8f79-bb4dadd92742\" (UID: \"054e2f3b-99d6-4586-8f79-bb4dadd92742\") " Dec 11 08:20:08 crc kubenswrapper[4881]: I1211 08:20:08.512628 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/054e2f3b-99d6-4586-8f79-bb4dadd92742-kube-api-access-lr6fl" (OuterVolumeSpecName: "kube-api-access-lr6fl") pod "054e2f3b-99d6-4586-8f79-bb4dadd92742" (UID: "054e2f3b-99d6-4586-8f79-bb4dadd92742"). InnerVolumeSpecName "kube-api-access-lr6fl". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:20:08 crc kubenswrapper[4881]: I1211 08:20:08.517570 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/054e2f3b-99d6-4586-8f79-bb4dadd92742-utilities" (OuterVolumeSpecName: "utilities") pod "054e2f3b-99d6-4586-8f79-bb4dadd92742" (UID: "054e2f3b-99d6-4586-8f79-bb4dadd92742"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 08:20:08 crc kubenswrapper[4881]: I1211 08:20:08.544188 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/054e2f3b-99d6-4586-8f79-bb4dadd92742-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "054e2f3b-99d6-4586-8f79-bb4dadd92742" (UID: "054e2f3b-99d6-4586-8f79-bb4dadd92742"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 08:20:08 crc kubenswrapper[4881]: I1211 08:20:08.579255 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-58jkm" Dec 11 08:20:08 crc kubenswrapper[4881]: I1211 08:20:08.618100 4881 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/054e2f3b-99d6-4586-8f79-bb4dadd92742-utilities\") on node \"crc\" DevicePath \"\"" Dec 11 08:20:08 crc kubenswrapper[4881]: I1211 08:20:08.618168 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lr6fl\" (UniqueName: \"kubernetes.io/projected/054e2f3b-99d6-4586-8f79-bb4dadd92742-kube-api-access-lr6fl\") on node \"crc\" DevicePath \"\"" Dec 11 08:20:08 crc kubenswrapper[4881]: I1211 08:20:08.618179 4881 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/054e2f3b-99d6-4586-8f79-bb4dadd92742-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 11 08:20:08 crc kubenswrapper[4881]: I1211 08:20:08.626707 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-j8gln" Dec 11 08:20:08 crc kubenswrapper[4881]: I1211 08:20:08.649725 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-q7zkc" Dec 11 08:20:08 crc kubenswrapper[4881]: I1211 08:20:08.651178 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-tbhw6"] Dec 11 08:20:08 crc kubenswrapper[4881]: I1211 08:20:08.712214 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-2n9k5" Dec 11 08:20:08 crc kubenswrapper[4881]: I1211 08:20:08.718964 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d1f50723-ab88-4c34-b3ad-099eeffd62c4-catalog-content\") pod \"d1f50723-ab88-4c34-b3ad-099eeffd62c4\" (UID: \"d1f50723-ab88-4c34-b3ad-099eeffd62c4\") " Dec 11 08:20:08 crc kubenswrapper[4881]: I1211 08:20:08.719050 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w9j76\" (UniqueName: \"kubernetes.io/projected/d1f50723-ab88-4c34-b3ad-099eeffd62c4-kube-api-access-w9j76\") pod \"d1f50723-ab88-4c34-b3ad-099eeffd62c4\" (UID: \"d1f50723-ab88-4c34-b3ad-099eeffd62c4\") " Dec 11 08:20:08 crc kubenswrapper[4881]: I1211 08:20:08.719171 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d1f50723-ab88-4c34-b3ad-099eeffd62c4-utilities\") pod \"d1f50723-ab88-4c34-b3ad-099eeffd62c4\" (UID: \"d1f50723-ab88-4c34-b3ad-099eeffd62c4\") " Dec 11 08:20:08 crc kubenswrapper[4881]: I1211 08:20:08.725275 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d1f50723-ab88-4c34-b3ad-099eeffd62c4-kube-api-access-w9j76" (OuterVolumeSpecName: "kube-api-access-w9j76") pod "d1f50723-ab88-4c34-b3ad-099eeffd62c4" (UID: "d1f50723-ab88-4c34-b3ad-099eeffd62c4"). InnerVolumeSpecName "kube-api-access-w9j76". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:20:08 crc kubenswrapper[4881]: I1211 08:20:08.725848 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w9j76\" (UniqueName: \"kubernetes.io/projected/d1f50723-ab88-4c34-b3ad-099eeffd62c4-kube-api-access-w9j76\") on node \"crc\" DevicePath \"\"" Dec 11 08:20:08 crc kubenswrapper[4881]: I1211 08:20:08.733170 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d1f50723-ab88-4c34-b3ad-099eeffd62c4-utilities" (OuterVolumeSpecName: "utilities") pod "d1f50723-ab88-4c34-b3ad-099eeffd62c4" (UID: "d1f50723-ab88-4c34-b3ad-099eeffd62c4"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 08:20:08 crc kubenswrapper[4881]: I1211 08:20:08.826861 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/81795d8f-af22-4a16-92de-455b31623c53-catalog-content\") pod \"81795d8f-af22-4a16-92de-455b31623c53\" (UID: \"81795d8f-af22-4a16-92de-455b31623c53\") " Dec 11 08:20:08 crc kubenswrapper[4881]: I1211 08:20:08.826917 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/81795d8f-af22-4a16-92de-455b31623c53-utilities\") pod \"81795d8f-af22-4a16-92de-455b31623c53\" (UID: \"81795d8f-af22-4a16-92de-455b31623c53\") " Dec 11 08:20:08 crc kubenswrapper[4881]: I1211 08:20:08.826941 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nlkx2\" (UniqueName: \"kubernetes.io/projected/1112a12e-84b7-447f-8c3c-a2bca7fc2096-kube-api-access-nlkx2\") pod \"1112a12e-84b7-447f-8c3c-a2bca7fc2096\" (UID: \"1112a12e-84b7-447f-8c3c-a2bca7fc2096\") " Dec 11 08:20:08 crc kubenswrapper[4881]: I1211 08:20:08.826966 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sgf8x\" (UniqueName: \"kubernetes.io/projected/81795d8f-af22-4a16-92de-455b31623c53-kube-api-access-sgf8x\") pod \"81795d8f-af22-4a16-92de-455b31623c53\" (UID: \"81795d8f-af22-4a16-92de-455b31623c53\") " Dec 11 08:20:08 crc kubenswrapper[4881]: I1211 08:20:08.827035 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8dc5d9d6-b64d-494d-a6e6-917ed40c01ae-marketplace-trusted-ca\") pod \"8dc5d9d6-b64d-494d-a6e6-917ed40c01ae\" (UID: \"8dc5d9d6-b64d-494d-a6e6-917ed40c01ae\") " Dec 11 08:20:08 crc kubenswrapper[4881]: I1211 08:20:08.827062 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/8dc5d9d6-b64d-494d-a6e6-917ed40c01ae-marketplace-operator-metrics\") pod \"8dc5d9d6-b64d-494d-a6e6-917ed40c01ae\" (UID: \"8dc5d9d6-b64d-494d-a6e6-917ed40c01ae\") " Dec 11 08:20:08 crc kubenswrapper[4881]: I1211 08:20:08.827088 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1112a12e-84b7-447f-8c3c-a2bca7fc2096-utilities\") pod \"1112a12e-84b7-447f-8c3c-a2bca7fc2096\" (UID: \"1112a12e-84b7-447f-8c3c-a2bca7fc2096\") " Dec 11 08:20:08 crc kubenswrapper[4881]: I1211 08:20:08.827110 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1112a12e-84b7-447f-8c3c-a2bca7fc2096-catalog-content\") pod \"1112a12e-84b7-447f-8c3c-a2bca7fc2096\" (UID: \"1112a12e-84b7-447f-8c3c-a2bca7fc2096\") " Dec 11 08:20:08 crc kubenswrapper[4881]: I1211 08:20:08.827127 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bh4l2\" (UniqueName: \"kubernetes.io/projected/8dc5d9d6-b64d-494d-a6e6-917ed40c01ae-kube-api-access-bh4l2\") pod \"8dc5d9d6-b64d-494d-a6e6-917ed40c01ae\" (UID: \"8dc5d9d6-b64d-494d-a6e6-917ed40c01ae\") " Dec 11 08:20:08 crc kubenswrapper[4881]: I1211 08:20:08.827771 4881 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d1f50723-ab88-4c34-b3ad-099eeffd62c4-utilities\") on node \"crc\" DevicePath \"\"" Dec 11 08:20:08 crc kubenswrapper[4881]: I1211 08:20:08.828195 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8dc5d9d6-b64d-494d-a6e6-917ed40c01ae-marketplace-trusted-ca" (OuterVolumeSpecName: "marketplace-trusted-ca") pod "8dc5d9d6-b64d-494d-a6e6-917ed40c01ae" (UID: "8dc5d9d6-b64d-494d-a6e6-917ed40c01ae"). InnerVolumeSpecName "marketplace-trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:20:08 crc kubenswrapper[4881]: I1211 08:20:08.828484 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/81795d8f-af22-4a16-92de-455b31623c53-utilities" (OuterVolumeSpecName: "utilities") pod "81795d8f-af22-4a16-92de-455b31623c53" (UID: "81795d8f-af22-4a16-92de-455b31623c53"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 08:20:08 crc kubenswrapper[4881]: I1211 08:20:08.828587 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1112a12e-84b7-447f-8c3c-a2bca7fc2096-utilities" (OuterVolumeSpecName: "utilities") pod "1112a12e-84b7-447f-8c3c-a2bca7fc2096" (UID: "1112a12e-84b7-447f-8c3c-a2bca7fc2096"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 08:20:08 crc kubenswrapper[4881]: I1211 08:20:08.831470 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8dc5d9d6-b64d-494d-a6e6-917ed40c01ae-kube-api-access-bh4l2" (OuterVolumeSpecName: "kube-api-access-bh4l2") pod "8dc5d9d6-b64d-494d-a6e6-917ed40c01ae" (UID: "8dc5d9d6-b64d-494d-a6e6-917ed40c01ae"). InnerVolumeSpecName "kube-api-access-bh4l2". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:20:08 crc kubenswrapper[4881]: I1211 08:20:08.831741 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8dc5d9d6-b64d-494d-a6e6-917ed40c01ae-marketplace-operator-metrics" (OuterVolumeSpecName: "marketplace-operator-metrics") pod "8dc5d9d6-b64d-494d-a6e6-917ed40c01ae" (UID: "8dc5d9d6-b64d-494d-a6e6-917ed40c01ae"). InnerVolumeSpecName "marketplace-operator-metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:20:08 crc kubenswrapper[4881]: I1211 08:20:08.834920 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1112a12e-84b7-447f-8c3c-a2bca7fc2096-kube-api-access-nlkx2" (OuterVolumeSpecName: "kube-api-access-nlkx2") pod "1112a12e-84b7-447f-8c3c-a2bca7fc2096" (UID: "1112a12e-84b7-447f-8c3c-a2bca7fc2096"). InnerVolumeSpecName "kube-api-access-nlkx2". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:20:08 crc kubenswrapper[4881]: I1211 08:20:08.841726 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rxnhw" event={"ID":"2bf5f48d-6c07-40f7-8f84-3762251f1d1d","Type":"ContainerStarted","Data":"eb3145b23d3a22c90706313039c3f817fe8a4aefdf42d09692eaa83800ac3a46"} Dec 11 08:20:08 crc kubenswrapper[4881]: I1211 08:20:08.841971 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-rxnhw" podUID="2bf5f48d-6c07-40f7-8f84-3762251f1d1d" containerName="extract-content" containerID="cri-o://eb3145b23d3a22c90706313039c3f817fe8a4aefdf42d09692eaa83800ac3a46" gracePeriod=30 Dec 11 08:20:08 crc kubenswrapper[4881]: I1211 08:20:08.845493 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/81795d8f-af22-4a16-92de-455b31623c53-kube-api-access-sgf8x" (OuterVolumeSpecName: "kube-api-access-sgf8x") pod "81795d8f-af22-4a16-92de-455b31623c53" (UID: "81795d8f-af22-4a16-92de-455b31623c53"). InnerVolumeSpecName "kube-api-access-sgf8x". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:20:08 crc kubenswrapper[4881]: I1211 08:20:08.850631 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-q4t54" podUID="5433ad4c-65f7-4ec1-8a01-f358f14685c4" containerName="extract-content" containerID="cri-o://e0e92bdb25116e1a39b2fd2f795d6e6b630c1fbb8d63b7efedf2193485c303d3" gracePeriod=30 Dec 11 08:20:08 crc kubenswrapper[4881]: I1211 08:20:08.850718 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-q4t54" event={"ID":"5433ad4c-65f7-4ec1-8a01-f358f14685c4","Type":"ContainerStarted","Data":"e0e92bdb25116e1a39b2fd2f795d6e6b630c1fbb8d63b7efedf2193485c303d3"} Dec 11 08:20:08 crc kubenswrapper[4881]: I1211 08:20:08.860204 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d1f50723-ab88-4c34-b3ad-099eeffd62c4-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "d1f50723-ab88-4c34-b3ad-099eeffd62c4" (UID: "d1f50723-ab88-4c34-b3ad-099eeffd62c4"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 08:20:08 crc kubenswrapper[4881]: I1211 08:20:08.860606 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-46vfl" event={"ID":"bcd451b4-9860-4079-9d8a-eab5bb3365c8","Type":"ContainerStarted","Data":"9dbbdd7feb41d97da07f7f4f60bccecb75d4612c5ba6a140eaac44e9ca1d41be"} Dec 11 08:20:08 crc kubenswrapper[4881]: I1211 08:20:08.860770 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-46vfl" podUID="bcd451b4-9860-4079-9d8a-eab5bb3365c8" containerName="extract-content" containerID="cri-o://9dbbdd7feb41d97da07f7f4f60bccecb75d4612c5ba6a140eaac44e9ca1d41be" gracePeriod=30 Dec 11 08:20:08 crc kubenswrapper[4881]: I1211 08:20:08.867494 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-gpmrb" event={"ID":"5d19974b-baa0-46b1-b1ba-24411f62a0c0","Type":"ContainerStarted","Data":"8f2de2c08a0bfb5ca7fbee08f074dd1bb31a45efefd5911fa606fa9313ab754e"} Dec 11 08:20:08 crc kubenswrapper[4881]: I1211 08:20:08.867724 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-gpmrb" podUID="5d19974b-baa0-46b1-b1ba-24411f62a0c0" containerName="extract-content" containerID="cri-o://8f2de2c08a0bfb5ca7fbee08f074dd1bb31a45efefd5911fa606fa9313ab754e" gracePeriod=30 Dec 11 08:20:08 crc kubenswrapper[4881]: I1211 08:20:08.873076 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-gt5pp" event={"ID":"054e2f3b-99d6-4586-8f79-bb4dadd92742","Type":"ContainerDied","Data":"8b21081b2e2ebac1f979d6181e46060858c62362c866252d066801be4f23fa16"} Dec 11 08:20:08 crc kubenswrapper[4881]: I1211 08:20:08.873110 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-gt5pp" Dec 11 08:20:08 crc kubenswrapper[4881]: I1211 08:20:08.873133 4881 scope.go:117] "RemoveContainer" containerID="336e626f3736a7b89a769f26b44fd9bc0b3955de6aea3fdaae2ed83a6827543c" Dec 11 08:20:08 crc kubenswrapper[4881]: I1211 08:20:08.879158 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1112a12e-84b7-447f-8c3c-a2bca7fc2096-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "1112a12e-84b7-447f-8c3c-a2bca7fc2096" (UID: "1112a12e-84b7-447f-8c3c-a2bca7fc2096"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 08:20:08 crc kubenswrapper[4881]: I1211 08:20:08.881898 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-q7zkc" event={"ID":"81795d8f-af22-4a16-92de-455b31623c53","Type":"ContainerDied","Data":"16b8e5d358ce41e4e664a5d3faba09a3f5ef0d158c2f6f0eb86c760afe724cc5"} Dec 11 08:20:08 crc kubenswrapper[4881]: I1211 08:20:08.882031 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-q7zkc" Dec 11 08:20:08 crc kubenswrapper[4881]: I1211 08:20:08.883864 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-2n9k5" Dec 11 08:20:08 crc kubenswrapper[4881]: I1211 08:20:08.883884 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-2n9k5" event={"ID":"8dc5d9d6-b64d-494d-a6e6-917ed40c01ae","Type":"ContainerDied","Data":"36a1d3860b1d213616d461a6d73e90db85c93508eb35e714b061c4fbafbcf49a"} Dec 11 08:20:08 crc kubenswrapper[4881]: I1211 08:20:08.885730 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-j8gln" event={"ID":"1112a12e-84b7-447f-8c3c-a2bca7fc2096","Type":"ContainerDied","Data":"6d194f80a882b19724bccc2483a0a98e7027463c1cb7ef79f9a8a93e10775179"} Dec 11 08:20:08 crc kubenswrapper[4881]: I1211 08:20:08.885788 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-j8gln" Dec 11 08:20:08 crc kubenswrapper[4881]: I1211 08:20:08.890263 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-tbhw6" event={"ID":"a98dd8bc-f2be-42b3-94c9-5eb725bfb1bf","Type":"ContainerStarted","Data":"5d23b2e5ed4242ed3de5ed040df26ead0bf521437f6fc93920df702db4f2fa35"} Dec 11 08:20:08 crc kubenswrapper[4881]: I1211 08:20:08.890889 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-tbhw6" Dec 11 08:20:08 crc kubenswrapper[4881]: I1211 08:20:08.891396 4881 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-tbhw6 container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.56:8080/healthz\": dial tcp 10.217.0.56:8080: connect: connection refused" start-of-body= Dec 11 08:20:08 crc kubenswrapper[4881]: I1211 08:20:08.891441 4881 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-tbhw6" podUID="a98dd8bc-f2be-42b3-94c9-5eb725bfb1bf" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.56:8080/healthz\": dial tcp 10.217.0.56:8080: connect: connection refused" Dec 11 08:20:08 crc kubenswrapper[4881]: I1211 08:20:08.892392 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-58jkm" event={"ID":"d1f50723-ab88-4c34-b3ad-099eeffd62c4","Type":"ContainerDied","Data":"6dee1a8184e47add6a265582f6bdfd6a8a5744e23435b7cb2995605a03990e23"} Dec 11 08:20:08 crc kubenswrapper[4881]: I1211 08:20:08.892478 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-58jkm" Dec 11 08:20:08 crc kubenswrapper[4881]: I1211 08:20:08.907001 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/81795d8f-af22-4a16-92de-455b31623c53-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "81795d8f-af22-4a16-92de-455b31623c53" (UID: "81795d8f-af22-4a16-92de-455b31623c53"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 08:20:08 crc kubenswrapper[4881]: I1211 08:20:08.928371 4881 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d1f50723-ab88-4c34-b3ad-099eeffd62c4-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 11 08:20:08 crc kubenswrapper[4881]: I1211 08:20:08.928411 4881 reconciler_common.go:293] "Volume detached for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8dc5d9d6-b64d-494d-a6e6-917ed40c01ae-marketplace-trusted-ca\") on node \"crc\" DevicePath \"\"" Dec 11 08:20:08 crc kubenswrapper[4881]: I1211 08:20:08.928426 4881 reconciler_common.go:293] "Volume detached for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/8dc5d9d6-b64d-494d-a6e6-917ed40c01ae-marketplace-operator-metrics\") on node \"crc\" DevicePath \"\"" Dec 11 08:20:08 crc kubenswrapper[4881]: I1211 08:20:08.928437 4881 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1112a12e-84b7-447f-8c3c-a2bca7fc2096-utilities\") on node \"crc\" DevicePath \"\"" Dec 11 08:20:08 crc kubenswrapper[4881]: I1211 08:20:08.928450 4881 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1112a12e-84b7-447f-8c3c-a2bca7fc2096-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 11 08:20:08 crc kubenswrapper[4881]: I1211 08:20:08.928461 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bh4l2\" (UniqueName: \"kubernetes.io/projected/8dc5d9d6-b64d-494d-a6e6-917ed40c01ae-kube-api-access-bh4l2\") on node \"crc\" DevicePath \"\"" Dec 11 08:20:08 crc kubenswrapper[4881]: I1211 08:20:08.928472 4881 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/81795d8f-af22-4a16-92de-455b31623c53-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 11 08:20:08 crc kubenswrapper[4881]: I1211 08:20:08.928482 4881 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/81795d8f-af22-4a16-92de-455b31623c53-utilities\") on node \"crc\" DevicePath \"\"" Dec 11 08:20:08 crc kubenswrapper[4881]: I1211 08:20:08.928492 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nlkx2\" (UniqueName: \"kubernetes.io/projected/1112a12e-84b7-447f-8c3c-a2bca7fc2096-kube-api-access-nlkx2\") on node \"crc\" DevicePath \"\"" Dec 11 08:20:08 crc kubenswrapper[4881]: I1211 08:20:08.928503 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sgf8x\" (UniqueName: \"kubernetes.io/projected/81795d8f-af22-4a16-92de-455b31623c53-kube-api-access-sgf8x\") on node \"crc\" DevicePath \"\"" Dec 11 08:20:08 crc kubenswrapper[4881]: I1211 08:20:08.940666 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/marketplace-operator-79b997595-tbhw6" podStartSLOduration=4.940628152 podStartE2EDuration="4.940628152s" podCreationTimestamp="2025-12-11 08:20:04 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 08:20:08.936708556 +0000 UTC m=+257.314077253" watchObservedRunningTime="2025-12-11 08:20:08.940628152 +0000 UTC m=+257.317996849" Dec 11 08:20:09 crc kubenswrapper[4881]: I1211 08:20:09.146575 4881 scope.go:117] "RemoveContainer" containerID="7520f203c7ba410ef2e5667c20cfd7e0fd58b0ff7810d85055ad5e964a38deb0" Dec 11 08:20:09 crc kubenswrapper[4881]: I1211 08:20:09.195324 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-q4t54_5433ad4c-65f7-4ec1-8a01-f358f14685c4/extract-content/0.log" Dec 11 08:20:09 crc kubenswrapper[4881]: I1211 08:20:09.196166 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-q4t54" Dec 11 08:20:09 crc kubenswrapper[4881]: I1211 08:20:09.199165 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-gt5pp"] Dec 11 08:20:09 crc kubenswrapper[4881]: I1211 08:20:09.200364 4881 scope.go:117] "RemoveContainer" containerID="d35046069eb6edb40a3c626534d5af76896bd1cb9e1f5736a77427243906f9bb" Dec 11 08:20:09 crc kubenswrapper[4881]: I1211 08:20:09.201691 4881 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-gt5pp"] Dec 11 08:20:09 crc kubenswrapper[4881]: I1211 08:20:09.212018 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-58jkm"] Dec 11 08:20:09 crc kubenswrapper[4881]: I1211 08:20:09.237455 4881 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-58jkm"] Dec 11 08:20:09 crc kubenswrapper[4881]: I1211 08:20:09.241514 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-q7zkc"] Dec 11 08:20:09 crc kubenswrapper[4881]: I1211 08:20:09.242542 4881 scope.go:117] "RemoveContainer" containerID="d86991f7dff9ca353a54d0670f26a4712db17ebcc98fac2546a845b01b0d7fa7" Dec 11 08:20:09 crc kubenswrapper[4881]: I1211 08:20:09.243906 4881 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-q7zkc"] Dec 11 08:20:09 crc kubenswrapper[4881]: I1211 08:20:09.253305 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-j8gln"] Dec 11 08:20:09 crc kubenswrapper[4881]: I1211 08:20:09.259483 4881 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-j8gln"] Dec 11 08:20:09 crc kubenswrapper[4881]: I1211 08:20:09.264066 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-2n9k5"] Dec 11 08:20:09 crc kubenswrapper[4881]: I1211 08:20:09.269012 4881 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-2n9k5"] Dec 11 08:20:09 crc kubenswrapper[4881]: I1211 08:20:09.272689 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-rxnhw_2bf5f48d-6c07-40f7-8f84-3762251f1d1d/extract-content/0.log" Dec 11 08:20:09 crc kubenswrapper[4881]: I1211 08:20:09.273104 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-rxnhw" Dec 11 08:20:09 crc kubenswrapper[4881]: I1211 08:20:09.274983 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-46vfl_bcd451b4-9860-4079-9d8a-eab5bb3365c8/extract-content/0.log" Dec 11 08:20:09 crc kubenswrapper[4881]: I1211 08:20:09.275875 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-46vfl" Dec 11 08:20:09 crc kubenswrapper[4881]: I1211 08:20:09.277500 4881 scope.go:117] "RemoveContainer" containerID="5eedd1abe3c37534822d496c3350db07cbfe97b55dff295a6cd3ee6ac8282e38" Dec 11 08:20:09 crc kubenswrapper[4881]: I1211 08:20:09.338950 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rldfw\" (UniqueName: \"kubernetes.io/projected/5433ad4c-65f7-4ec1-8a01-f358f14685c4-kube-api-access-rldfw\") pod \"5433ad4c-65f7-4ec1-8a01-f358f14685c4\" (UID: \"5433ad4c-65f7-4ec1-8a01-f358f14685c4\") " Dec 11 08:20:09 crc kubenswrapper[4881]: I1211 08:20:09.339005 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5433ad4c-65f7-4ec1-8a01-f358f14685c4-utilities\") pod \"5433ad4c-65f7-4ec1-8a01-f358f14685c4\" (UID: \"5433ad4c-65f7-4ec1-8a01-f358f14685c4\") " Dec 11 08:20:09 crc kubenswrapper[4881]: I1211 08:20:09.339044 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2bf5f48d-6c07-40f7-8f84-3762251f1d1d-catalog-content\") pod \"2bf5f48d-6c07-40f7-8f84-3762251f1d1d\" (UID: \"2bf5f48d-6c07-40f7-8f84-3762251f1d1d\") " Dec 11 08:20:09 crc kubenswrapper[4881]: I1211 08:20:09.339081 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5433ad4c-65f7-4ec1-8a01-f358f14685c4-catalog-content\") pod \"5433ad4c-65f7-4ec1-8a01-f358f14685c4\" (UID: \"5433ad4c-65f7-4ec1-8a01-f358f14685c4\") " Dec 11 08:20:09 crc kubenswrapper[4881]: I1211 08:20:09.339109 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bcd451b4-9860-4079-9d8a-eab5bb3365c8-utilities\") pod \"bcd451b4-9860-4079-9d8a-eab5bb3365c8\" (UID: \"bcd451b4-9860-4079-9d8a-eab5bb3365c8\") " Dec 11 08:20:09 crc kubenswrapper[4881]: I1211 08:20:09.339142 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bxp8c\" (UniqueName: \"kubernetes.io/projected/bcd451b4-9860-4079-9d8a-eab5bb3365c8-kube-api-access-bxp8c\") pod \"bcd451b4-9860-4079-9d8a-eab5bb3365c8\" (UID: \"bcd451b4-9860-4079-9d8a-eab5bb3365c8\") " Dec 11 08:20:09 crc kubenswrapper[4881]: I1211 08:20:09.339174 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bcd451b4-9860-4079-9d8a-eab5bb3365c8-catalog-content\") pod \"bcd451b4-9860-4079-9d8a-eab5bb3365c8\" (UID: \"bcd451b4-9860-4079-9d8a-eab5bb3365c8\") " Dec 11 08:20:09 crc kubenswrapper[4881]: I1211 08:20:09.339196 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2bf5f48d-6c07-40f7-8f84-3762251f1d1d-utilities\") pod \"2bf5f48d-6c07-40f7-8f84-3762251f1d1d\" (UID: \"2bf5f48d-6c07-40f7-8f84-3762251f1d1d\") " Dec 11 08:20:09 crc kubenswrapper[4881]: I1211 08:20:09.339220 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fgmrn\" (UniqueName: \"kubernetes.io/projected/2bf5f48d-6c07-40f7-8f84-3762251f1d1d-kube-api-access-fgmrn\") pod \"2bf5f48d-6c07-40f7-8f84-3762251f1d1d\" (UID: \"2bf5f48d-6c07-40f7-8f84-3762251f1d1d\") " Dec 11 08:20:09 crc kubenswrapper[4881]: I1211 08:20:09.342227 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5433ad4c-65f7-4ec1-8a01-f358f14685c4-utilities" (OuterVolumeSpecName: "utilities") pod "5433ad4c-65f7-4ec1-8a01-f358f14685c4" (UID: "5433ad4c-65f7-4ec1-8a01-f358f14685c4"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 08:20:09 crc kubenswrapper[4881]: I1211 08:20:09.342972 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bcd451b4-9860-4079-9d8a-eab5bb3365c8-utilities" (OuterVolumeSpecName: "utilities") pod "bcd451b4-9860-4079-9d8a-eab5bb3365c8" (UID: "bcd451b4-9860-4079-9d8a-eab5bb3365c8"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 08:20:09 crc kubenswrapper[4881]: I1211 08:20:09.343463 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2bf5f48d-6c07-40f7-8f84-3762251f1d1d-utilities" (OuterVolumeSpecName: "utilities") pod "2bf5f48d-6c07-40f7-8f84-3762251f1d1d" (UID: "2bf5f48d-6c07-40f7-8f84-3762251f1d1d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 08:20:09 crc kubenswrapper[4881]: I1211 08:20:09.345482 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2bf5f48d-6c07-40f7-8f84-3762251f1d1d-kube-api-access-fgmrn" (OuterVolumeSpecName: "kube-api-access-fgmrn") pod "2bf5f48d-6c07-40f7-8f84-3762251f1d1d" (UID: "2bf5f48d-6c07-40f7-8f84-3762251f1d1d"). InnerVolumeSpecName "kube-api-access-fgmrn". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:20:09 crc kubenswrapper[4881]: I1211 08:20:09.345531 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5433ad4c-65f7-4ec1-8a01-f358f14685c4-kube-api-access-rldfw" (OuterVolumeSpecName: "kube-api-access-rldfw") pod "5433ad4c-65f7-4ec1-8a01-f358f14685c4" (UID: "5433ad4c-65f7-4ec1-8a01-f358f14685c4"). InnerVolumeSpecName "kube-api-access-rldfw". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:20:09 crc kubenswrapper[4881]: I1211 08:20:09.349989 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bcd451b4-9860-4079-9d8a-eab5bb3365c8-kube-api-access-bxp8c" (OuterVolumeSpecName: "kube-api-access-bxp8c") pod "bcd451b4-9860-4079-9d8a-eab5bb3365c8" (UID: "bcd451b4-9860-4079-9d8a-eab5bb3365c8"). InnerVolumeSpecName "kube-api-access-bxp8c". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:20:09 crc kubenswrapper[4881]: I1211 08:20:09.378836 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-gpmrb_5d19974b-baa0-46b1-b1ba-24411f62a0c0/extract-content/0.log" Dec 11 08:20:09 crc kubenswrapper[4881]: I1211 08:20:09.379370 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-gpmrb" Dec 11 08:20:09 crc kubenswrapper[4881]: I1211 08:20:09.387880 4881 scope.go:117] "RemoveContainer" containerID="6aa6814d9038dbe3337f5b3c8d612a80a02d4e62fe66535bed54ab966f1544df" Dec 11 08:20:09 crc kubenswrapper[4881]: I1211 08:20:09.396541 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5433ad4c-65f7-4ec1-8a01-f358f14685c4-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5433ad4c-65f7-4ec1-8a01-f358f14685c4" (UID: "5433ad4c-65f7-4ec1-8a01-f358f14685c4"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 08:20:09 crc kubenswrapper[4881]: I1211 08:20:09.400513 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bcd451b4-9860-4079-9d8a-eab5bb3365c8-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "bcd451b4-9860-4079-9d8a-eab5bb3365c8" (UID: "bcd451b4-9860-4079-9d8a-eab5bb3365c8"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 08:20:09 crc kubenswrapper[4881]: I1211 08:20:09.403298 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2bf5f48d-6c07-40f7-8f84-3762251f1d1d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "2bf5f48d-6c07-40f7-8f84-3762251f1d1d" (UID: "2bf5f48d-6c07-40f7-8f84-3762251f1d1d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 08:20:09 crc kubenswrapper[4881]: I1211 08:20:09.405960 4881 scope.go:117] "RemoveContainer" containerID="ccde261349a9cdfe045229c11857866097d56efdcf65bf7a367ca36cc7d89aee" Dec 11 08:20:09 crc kubenswrapper[4881]: I1211 08:20:09.418277 4881 scope.go:117] "RemoveContainer" containerID="6bfe59659d969cfe52cd90134c4e6d7ae4e90d60d62672738636e658a6e32f00" Dec 11 08:20:09 crc kubenswrapper[4881]: I1211 08:20:09.431480 4881 scope.go:117] "RemoveContainer" containerID="897600ec153ed64a5e11c3b0840f94560c724714a837d40e34ee3994756316e6" Dec 11 08:20:09 crc kubenswrapper[4881]: I1211 08:20:09.440993 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5d19974b-baa0-46b1-b1ba-24411f62a0c0-catalog-content\") pod \"5d19974b-baa0-46b1-b1ba-24411f62a0c0\" (UID: \"5d19974b-baa0-46b1-b1ba-24411f62a0c0\") " Dec 11 08:20:09 crc kubenswrapper[4881]: I1211 08:20:09.441054 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5d19974b-baa0-46b1-b1ba-24411f62a0c0-utilities\") pod \"5d19974b-baa0-46b1-b1ba-24411f62a0c0\" (UID: \"5d19974b-baa0-46b1-b1ba-24411f62a0c0\") " Dec 11 08:20:09 crc kubenswrapper[4881]: I1211 08:20:09.441122 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m57fs\" (UniqueName: \"kubernetes.io/projected/5d19974b-baa0-46b1-b1ba-24411f62a0c0-kube-api-access-m57fs\") pod \"5d19974b-baa0-46b1-b1ba-24411f62a0c0\" (UID: \"5d19974b-baa0-46b1-b1ba-24411f62a0c0\") " Dec 11 08:20:09 crc kubenswrapper[4881]: I1211 08:20:09.441428 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rldfw\" (UniqueName: \"kubernetes.io/projected/5433ad4c-65f7-4ec1-8a01-f358f14685c4-kube-api-access-rldfw\") on node \"crc\" DevicePath \"\"" Dec 11 08:20:09 crc kubenswrapper[4881]: I1211 08:20:09.441441 4881 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5433ad4c-65f7-4ec1-8a01-f358f14685c4-utilities\") on node \"crc\" DevicePath \"\"" Dec 11 08:20:09 crc kubenswrapper[4881]: I1211 08:20:09.441451 4881 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2bf5f48d-6c07-40f7-8f84-3762251f1d1d-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 11 08:20:09 crc kubenswrapper[4881]: I1211 08:20:09.441458 4881 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5433ad4c-65f7-4ec1-8a01-f358f14685c4-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 11 08:20:09 crc kubenswrapper[4881]: I1211 08:20:09.441467 4881 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bcd451b4-9860-4079-9d8a-eab5bb3365c8-utilities\") on node \"crc\" DevicePath \"\"" Dec 11 08:20:09 crc kubenswrapper[4881]: I1211 08:20:09.441475 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bxp8c\" (UniqueName: \"kubernetes.io/projected/bcd451b4-9860-4079-9d8a-eab5bb3365c8-kube-api-access-bxp8c\") on node \"crc\" DevicePath \"\"" Dec 11 08:20:09 crc kubenswrapper[4881]: I1211 08:20:09.441482 4881 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bcd451b4-9860-4079-9d8a-eab5bb3365c8-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 11 08:20:09 crc kubenswrapper[4881]: I1211 08:20:09.441492 4881 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2bf5f48d-6c07-40f7-8f84-3762251f1d1d-utilities\") on node \"crc\" DevicePath \"\"" Dec 11 08:20:09 crc kubenswrapper[4881]: I1211 08:20:09.441499 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fgmrn\" (UniqueName: \"kubernetes.io/projected/2bf5f48d-6c07-40f7-8f84-3762251f1d1d-kube-api-access-fgmrn\") on node \"crc\" DevicePath \"\"" Dec 11 08:20:09 crc kubenswrapper[4881]: I1211 08:20:09.444524 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5d19974b-baa0-46b1-b1ba-24411f62a0c0-kube-api-access-m57fs" (OuterVolumeSpecName: "kube-api-access-m57fs") pod "5d19974b-baa0-46b1-b1ba-24411f62a0c0" (UID: "5d19974b-baa0-46b1-b1ba-24411f62a0c0"). InnerVolumeSpecName "kube-api-access-m57fs". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:20:09 crc kubenswrapper[4881]: I1211 08:20:09.446478 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5d19974b-baa0-46b1-b1ba-24411f62a0c0-utilities" (OuterVolumeSpecName: "utilities") pod "5d19974b-baa0-46b1-b1ba-24411f62a0c0" (UID: "5d19974b-baa0-46b1-b1ba-24411f62a0c0"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 08:20:09 crc kubenswrapper[4881]: I1211 08:20:09.447822 4881 scope.go:117] "RemoveContainer" containerID="a957612b7d9746c7605f9dc5db8e5073b363f0a4154f831ea8ad796aaef4321f" Dec 11 08:20:09 crc kubenswrapper[4881]: I1211 08:20:09.465762 4881 scope.go:117] "RemoveContainer" containerID="5321d6fd3b8a976d89b579ce3fd92b3587e1a4ba0238f3077c4413a9f40bb666" Dec 11 08:20:09 crc kubenswrapper[4881]: I1211 08:20:09.482718 4881 scope.go:117] "RemoveContainer" containerID="9490ecdcae64d37198f4b34b6c09acc48cc5fdc04dd712c79de664985be1a9bc" Dec 11 08:20:09 crc kubenswrapper[4881]: I1211 08:20:09.492915 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5d19974b-baa0-46b1-b1ba-24411f62a0c0-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5d19974b-baa0-46b1-b1ba-24411f62a0c0" (UID: "5d19974b-baa0-46b1-b1ba-24411f62a0c0"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 08:20:09 crc kubenswrapper[4881]: I1211 08:20:09.500960 4881 scope.go:117] "RemoveContainer" containerID="1130696a293d25015fc70a0fff74d70b650e5e4941a327c52dcd8f118c0abfc7" Dec 11 08:20:09 crc kubenswrapper[4881]: I1211 08:20:09.542918 4881 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5d19974b-baa0-46b1-b1ba-24411f62a0c0-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 11 08:20:09 crc kubenswrapper[4881]: I1211 08:20:09.542955 4881 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5d19974b-baa0-46b1-b1ba-24411f62a0c0-utilities\") on node \"crc\" DevicePath \"\"" Dec 11 08:20:09 crc kubenswrapper[4881]: I1211 08:20:09.542967 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-m57fs\" (UniqueName: \"kubernetes.io/projected/5d19974b-baa0-46b1-b1ba-24411f62a0c0-kube-api-access-m57fs\") on node \"crc\" DevicePath \"\"" Dec 11 08:20:09 crc kubenswrapper[4881]: I1211 08:20:09.903481 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-gpmrb_5d19974b-baa0-46b1-b1ba-24411f62a0c0/extract-content/0.log" Dec 11 08:20:09 crc kubenswrapper[4881]: I1211 08:20:09.904005 4881 generic.go:334] "Generic (PLEG): container finished" podID="5d19974b-baa0-46b1-b1ba-24411f62a0c0" containerID="8f2de2c08a0bfb5ca7fbee08f074dd1bb31a45efefd5911fa606fa9313ab754e" exitCode=2 Dec 11 08:20:09 crc kubenswrapper[4881]: I1211 08:20:09.904059 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-gpmrb" event={"ID":"5d19974b-baa0-46b1-b1ba-24411f62a0c0","Type":"ContainerDied","Data":"8f2de2c08a0bfb5ca7fbee08f074dd1bb31a45efefd5911fa606fa9313ab754e"} Dec 11 08:20:09 crc kubenswrapper[4881]: I1211 08:20:09.904117 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-gpmrb" event={"ID":"5d19974b-baa0-46b1-b1ba-24411f62a0c0","Type":"ContainerDied","Data":"1d4aea663f5827b2b13a3a4cc84955d33886f5755c349fab0abe4bd7c61c313a"} Dec 11 08:20:09 crc kubenswrapper[4881]: I1211 08:20:09.904138 4881 scope.go:117] "RemoveContainer" containerID="8f2de2c08a0bfb5ca7fbee08f074dd1bb31a45efefd5911fa606fa9313ab754e" Dec 11 08:20:09 crc kubenswrapper[4881]: I1211 08:20:09.904242 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-gpmrb" Dec 11 08:20:09 crc kubenswrapper[4881]: I1211 08:20:09.919884 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-tbhw6" event={"ID":"a98dd8bc-f2be-42b3-94c9-5eb725bfb1bf","Type":"ContainerStarted","Data":"f792b65425aa8e0d6e33043da9e324807b48fa9a42a8cf1582a7fc6d618698cc"} Dec 11 08:20:09 crc kubenswrapper[4881]: I1211 08:20:09.924798 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-tbhw6" Dec 11 08:20:09 crc kubenswrapper[4881]: I1211 08:20:09.935578 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-46vfl_bcd451b4-9860-4079-9d8a-eab5bb3365c8/extract-content/0.log" Dec 11 08:20:09 crc kubenswrapper[4881]: I1211 08:20:09.937192 4881 generic.go:334] "Generic (PLEG): container finished" podID="bcd451b4-9860-4079-9d8a-eab5bb3365c8" containerID="9dbbdd7feb41d97da07f7f4f60bccecb75d4612c5ba6a140eaac44e9ca1d41be" exitCode=2 Dec 11 08:20:09 crc kubenswrapper[4881]: I1211 08:20:09.937568 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-46vfl" Dec 11 08:20:09 crc kubenswrapper[4881]: I1211 08:20:09.937803 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-46vfl" event={"ID":"bcd451b4-9860-4079-9d8a-eab5bb3365c8","Type":"ContainerDied","Data":"9dbbdd7feb41d97da07f7f4f60bccecb75d4612c5ba6a140eaac44e9ca1d41be"} Dec 11 08:20:09 crc kubenswrapper[4881]: I1211 08:20:09.937874 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-46vfl" event={"ID":"bcd451b4-9860-4079-9d8a-eab5bb3365c8","Type":"ContainerDied","Data":"365b5d344ed2fead5949ece16d9a28b945ee73667964fd51703bf7b4ec3918ea"} Dec 11 08:20:09 crc kubenswrapper[4881]: I1211 08:20:09.954310 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-rxnhw_2bf5f48d-6c07-40f7-8f84-3762251f1d1d/extract-content/0.log" Dec 11 08:20:09 crc kubenswrapper[4881]: I1211 08:20:09.955008 4881 generic.go:334] "Generic (PLEG): container finished" podID="2bf5f48d-6c07-40f7-8f84-3762251f1d1d" containerID="eb3145b23d3a22c90706313039c3f817fe8a4aefdf42d09692eaa83800ac3a46" exitCode=2 Dec 11 08:20:09 crc kubenswrapper[4881]: I1211 08:20:09.955062 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-rxnhw" Dec 11 08:20:09 crc kubenswrapper[4881]: I1211 08:20:09.955099 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rxnhw" event={"ID":"2bf5f48d-6c07-40f7-8f84-3762251f1d1d","Type":"ContainerDied","Data":"eb3145b23d3a22c90706313039c3f817fe8a4aefdf42d09692eaa83800ac3a46"} Dec 11 08:20:09 crc kubenswrapper[4881]: I1211 08:20:09.955158 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rxnhw" event={"ID":"2bf5f48d-6c07-40f7-8f84-3762251f1d1d","Type":"ContainerDied","Data":"1c4a12c799a042b030d39ac21d0828dc69e760c0f990b35e6c57c6a4e88df14e"} Dec 11 08:20:09 crc kubenswrapper[4881]: I1211 08:20:09.958001 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-q4t54_5433ad4c-65f7-4ec1-8a01-f358f14685c4/extract-content/0.log" Dec 11 08:20:09 crc kubenswrapper[4881]: I1211 08:20:09.958726 4881 generic.go:334] "Generic (PLEG): container finished" podID="5433ad4c-65f7-4ec1-8a01-f358f14685c4" containerID="e0e92bdb25116e1a39b2fd2f795d6e6b630c1fbb8d63b7efedf2193485c303d3" exitCode=2 Dec 11 08:20:09 crc kubenswrapper[4881]: I1211 08:20:09.958820 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-q4t54" Dec 11 08:20:09 crc kubenswrapper[4881]: I1211 08:20:09.958829 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-q4t54" event={"ID":"5433ad4c-65f7-4ec1-8a01-f358f14685c4","Type":"ContainerDied","Data":"e0e92bdb25116e1a39b2fd2f795d6e6b630c1fbb8d63b7efedf2193485c303d3"} Dec 11 08:20:09 crc kubenswrapper[4881]: I1211 08:20:09.958860 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-q4t54" event={"ID":"5433ad4c-65f7-4ec1-8a01-f358f14685c4","Type":"ContainerDied","Data":"570286220aeadb218374f797d6cc5651539f3eda419c458fc3e7d8dceb769840"} Dec 11 08:20:09 crc kubenswrapper[4881]: I1211 08:20:09.980908 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-gpmrb"] Dec 11 08:20:09 crc kubenswrapper[4881]: I1211 08:20:09.980948 4881 scope.go:117] "RemoveContainer" containerID="a326aa201312b4fabea8b77148ca66f4d59bca4460992165e7a09c88658301de" Dec 11 08:20:09 crc kubenswrapper[4881]: I1211 08:20:09.987126 4881 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-gpmrb"] Dec 11 08:20:10 crc kubenswrapper[4881]: I1211 08:20:10.038576 4881 scope.go:117] "RemoveContainer" containerID="8f2de2c08a0bfb5ca7fbee08f074dd1bb31a45efefd5911fa606fa9313ab754e" Dec 11 08:20:10 crc kubenswrapper[4881]: E1211 08:20:10.039216 4881 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8f2de2c08a0bfb5ca7fbee08f074dd1bb31a45efefd5911fa606fa9313ab754e\": container with ID starting with 8f2de2c08a0bfb5ca7fbee08f074dd1bb31a45efefd5911fa606fa9313ab754e not found: ID does not exist" containerID="8f2de2c08a0bfb5ca7fbee08f074dd1bb31a45efefd5911fa606fa9313ab754e" Dec 11 08:20:10 crc kubenswrapper[4881]: I1211 08:20:10.039251 4881 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8f2de2c08a0bfb5ca7fbee08f074dd1bb31a45efefd5911fa606fa9313ab754e"} err="failed to get container status \"8f2de2c08a0bfb5ca7fbee08f074dd1bb31a45efefd5911fa606fa9313ab754e\": rpc error: code = NotFound desc = could not find container \"8f2de2c08a0bfb5ca7fbee08f074dd1bb31a45efefd5911fa606fa9313ab754e\": container with ID starting with 8f2de2c08a0bfb5ca7fbee08f074dd1bb31a45efefd5911fa606fa9313ab754e not found: ID does not exist" Dec 11 08:20:10 crc kubenswrapper[4881]: I1211 08:20:10.039277 4881 scope.go:117] "RemoveContainer" containerID="a326aa201312b4fabea8b77148ca66f4d59bca4460992165e7a09c88658301de" Dec 11 08:20:10 crc kubenswrapper[4881]: E1211 08:20:10.039551 4881 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a326aa201312b4fabea8b77148ca66f4d59bca4460992165e7a09c88658301de\": container with ID starting with a326aa201312b4fabea8b77148ca66f4d59bca4460992165e7a09c88658301de not found: ID does not exist" containerID="a326aa201312b4fabea8b77148ca66f4d59bca4460992165e7a09c88658301de" Dec 11 08:20:10 crc kubenswrapper[4881]: I1211 08:20:10.039575 4881 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a326aa201312b4fabea8b77148ca66f4d59bca4460992165e7a09c88658301de"} err="failed to get container status \"a326aa201312b4fabea8b77148ca66f4d59bca4460992165e7a09c88658301de\": rpc error: code = NotFound desc = could not find container \"a326aa201312b4fabea8b77148ca66f4d59bca4460992165e7a09c88658301de\": container with ID starting with a326aa201312b4fabea8b77148ca66f4d59bca4460992165e7a09c88658301de not found: ID does not exist" Dec 11 08:20:10 crc kubenswrapper[4881]: I1211 08:20:10.039591 4881 scope.go:117] "RemoveContainer" containerID="9dbbdd7feb41d97da07f7f4f60bccecb75d4612c5ba6a140eaac44e9ca1d41be" Dec 11 08:20:10 crc kubenswrapper[4881]: I1211 08:20:10.050417 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-rxnhw"] Dec 11 08:20:10 crc kubenswrapper[4881]: I1211 08:20:10.053808 4881 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-rxnhw"] Dec 11 08:20:10 crc kubenswrapper[4881]: I1211 08:20:10.062706 4881 scope.go:117] "RemoveContainer" containerID="559460f96451f5ce04aea6564d4369de1e45a11f8b2c82801e11359dd0114167" Dec 11 08:20:10 crc kubenswrapper[4881]: I1211 08:20:10.085062 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-46vfl"] Dec 11 08:20:10 crc kubenswrapper[4881]: I1211 08:20:10.085418 4881 scope.go:117] "RemoveContainer" containerID="9dbbdd7feb41d97da07f7f4f60bccecb75d4612c5ba6a140eaac44e9ca1d41be" Dec 11 08:20:10 crc kubenswrapper[4881]: E1211 08:20:10.085971 4881 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9dbbdd7feb41d97da07f7f4f60bccecb75d4612c5ba6a140eaac44e9ca1d41be\": container with ID starting with 9dbbdd7feb41d97da07f7f4f60bccecb75d4612c5ba6a140eaac44e9ca1d41be not found: ID does not exist" containerID="9dbbdd7feb41d97da07f7f4f60bccecb75d4612c5ba6a140eaac44e9ca1d41be" Dec 11 08:20:10 crc kubenswrapper[4881]: I1211 08:20:10.086002 4881 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9dbbdd7feb41d97da07f7f4f60bccecb75d4612c5ba6a140eaac44e9ca1d41be"} err="failed to get container status \"9dbbdd7feb41d97da07f7f4f60bccecb75d4612c5ba6a140eaac44e9ca1d41be\": rpc error: code = NotFound desc = could not find container \"9dbbdd7feb41d97da07f7f4f60bccecb75d4612c5ba6a140eaac44e9ca1d41be\": container with ID starting with 9dbbdd7feb41d97da07f7f4f60bccecb75d4612c5ba6a140eaac44e9ca1d41be not found: ID does not exist" Dec 11 08:20:10 crc kubenswrapper[4881]: I1211 08:20:10.086027 4881 scope.go:117] "RemoveContainer" containerID="559460f96451f5ce04aea6564d4369de1e45a11f8b2c82801e11359dd0114167" Dec 11 08:20:10 crc kubenswrapper[4881]: E1211 08:20:10.086314 4881 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"559460f96451f5ce04aea6564d4369de1e45a11f8b2c82801e11359dd0114167\": container with ID starting with 559460f96451f5ce04aea6564d4369de1e45a11f8b2c82801e11359dd0114167 not found: ID does not exist" containerID="559460f96451f5ce04aea6564d4369de1e45a11f8b2c82801e11359dd0114167" Dec 11 08:20:10 crc kubenswrapper[4881]: I1211 08:20:10.086344 4881 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"559460f96451f5ce04aea6564d4369de1e45a11f8b2c82801e11359dd0114167"} err="failed to get container status \"559460f96451f5ce04aea6564d4369de1e45a11f8b2c82801e11359dd0114167\": rpc error: code = NotFound desc = could not find container \"559460f96451f5ce04aea6564d4369de1e45a11f8b2c82801e11359dd0114167\": container with ID starting with 559460f96451f5ce04aea6564d4369de1e45a11f8b2c82801e11359dd0114167 not found: ID does not exist" Dec 11 08:20:10 crc kubenswrapper[4881]: I1211 08:20:10.086356 4881 scope.go:117] "RemoveContainer" containerID="eb3145b23d3a22c90706313039c3f817fe8a4aefdf42d09692eaa83800ac3a46" Dec 11 08:20:10 crc kubenswrapper[4881]: I1211 08:20:10.089545 4881 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-46vfl"] Dec 11 08:20:10 crc kubenswrapper[4881]: I1211 08:20:10.106795 4881 scope.go:117] "RemoveContainer" containerID="929789db30077f0eb0ce126eb142540875c036c0d5b5c1300b4cac126086e838" Dec 11 08:20:10 crc kubenswrapper[4881]: I1211 08:20:10.109980 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-q4t54"] Dec 11 08:20:10 crc kubenswrapper[4881]: I1211 08:20:10.112016 4881 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-q4t54"] Dec 11 08:20:10 crc kubenswrapper[4881]: I1211 08:20:10.122580 4881 scope.go:117] "RemoveContainer" containerID="eb3145b23d3a22c90706313039c3f817fe8a4aefdf42d09692eaa83800ac3a46" Dec 11 08:20:10 crc kubenswrapper[4881]: E1211 08:20:10.122993 4881 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"eb3145b23d3a22c90706313039c3f817fe8a4aefdf42d09692eaa83800ac3a46\": container with ID starting with eb3145b23d3a22c90706313039c3f817fe8a4aefdf42d09692eaa83800ac3a46 not found: ID does not exist" containerID="eb3145b23d3a22c90706313039c3f817fe8a4aefdf42d09692eaa83800ac3a46" Dec 11 08:20:10 crc kubenswrapper[4881]: I1211 08:20:10.123033 4881 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"eb3145b23d3a22c90706313039c3f817fe8a4aefdf42d09692eaa83800ac3a46"} err="failed to get container status \"eb3145b23d3a22c90706313039c3f817fe8a4aefdf42d09692eaa83800ac3a46\": rpc error: code = NotFound desc = could not find container \"eb3145b23d3a22c90706313039c3f817fe8a4aefdf42d09692eaa83800ac3a46\": container with ID starting with eb3145b23d3a22c90706313039c3f817fe8a4aefdf42d09692eaa83800ac3a46 not found: ID does not exist" Dec 11 08:20:10 crc kubenswrapper[4881]: I1211 08:20:10.123081 4881 scope.go:117] "RemoveContainer" containerID="929789db30077f0eb0ce126eb142540875c036c0d5b5c1300b4cac126086e838" Dec 11 08:20:10 crc kubenswrapper[4881]: E1211 08:20:10.123399 4881 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"929789db30077f0eb0ce126eb142540875c036c0d5b5c1300b4cac126086e838\": container with ID starting with 929789db30077f0eb0ce126eb142540875c036c0d5b5c1300b4cac126086e838 not found: ID does not exist" containerID="929789db30077f0eb0ce126eb142540875c036c0d5b5c1300b4cac126086e838" Dec 11 08:20:10 crc kubenswrapper[4881]: I1211 08:20:10.123466 4881 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"929789db30077f0eb0ce126eb142540875c036c0d5b5c1300b4cac126086e838"} err="failed to get container status \"929789db30077f0eb0ce126eb142540875c036c0d5b5c1300b4cac126086e838\": rpc error: code = NotFound desc = could not find container \"929789db30077f0eb0ce126eb142540875c036c0d5b5c1300b4cac126086e838\": container with ID starting with 929789db30077f0eb0ce126eb142540875c036c0d5b5c1300b4cac126086e838 not found: ID does not exist" Dec 11 08:20:10 crc kubenswrapper[4881]: I1211 08:20:10.123480 4881 scope.go:117] "RemoveContainer" containerID="e0e92bdb25116e1a39b2fd2f795d6e6b630c1fbb8d63b7efedf2193485c303d3" Dec 11 08:20:10 crc kubenswrapper[4881]: I1211 08:20:10.139648 4881 scope.go:117] "RemoveContainer" containerID="bdbf2d0c70c3492727076c85071a800b9bba83c72833d54d76bb7ffe0135f725" Dec 11 08:20:10 crc kubenswrapper[4881]: I1211 08:20:10.156636 4881 scope.go:117] "RemoveContainer" containerID="e0e92bdb25116e1a39b2fd2f795d6e6b630c1fbb8d63b7efedf2193485c303d3" Dec 11 08:20:10 crc kubenswrapper[4881]: E1211 08:20:10.157781 4881 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e0e92bdb25116e1a39b2fd2f795d6e6b630c1fbb8d63b7efedf2193485c303d3\": container with ID starting with e0e92bdb25116e1a39b2fd2f795d6e6b630c1fbb8d63b7efedf2193485c303d3 not found: ID does not exist" containerID="e0e92bdb25116e1a39b2fd2f795d6e6b630c1fbb8d63b7efedf2193485c303d3" Dec 11 08:20:10 crc kubenswrapper[4881]: I1211 08:20:10.157942 4881 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e0e92bdb25116e1a39b2fd2f795d6e6b630c1fbb8d63b7efedf2193485c303d3"} err="failed to get container status \"e0e92bdb25116e1a39b2fd2f795d6e6b630c1fbb8d63b7efedf2193485c303d3\": rpc error: code = NotFound desc = could not find container \"e0e92bdb25116e1a39b2fd2f795d6e6b630c1fbb8d63b7efedf2193485c303d3\": container with ID starting with e0e92bdb25116e1a39b2fd2f795d6e6b630c1fbb8d63b7efedf2193485c303d3 not found: ID does not exist" Dec 11 08:20:10 crc kubenswrapper[4881]: I1211 08:20:10.158090 4881 scope.go:117] "RemoveContainer" containerID="bdbf2d0c70c3492727076c85071a800b9bba83c72833d54d76bb7ffe0135f725" Dec 11 08:20:10 crc kubenswrapper[4881]: E1211 08:20:10.158539 4881 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bdbf2d0c70c3492727076c85071a800b9bba83c72833d54d76bb7ffe0135f725\": container with ID starting with bdbf2d0c70c3492727076c85071a800b9bba83c72833d54d76bb7ffe0135f725 not found: ID does not exist" containerID="bdbf2d0c70c3492727076c85071a800b9bba83c72833d54d76bb7ffe0135f725" Dec 11 08:20:10 crc kubenswrapper[4881]: I1211 08:20:10.158586 4881 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bdbf2d0c70c3492727076c85071a800b9bba83c72833d54d76bb7ffe0135f725"} err="failed to get container status \"bdbf2d0c70c3492727076c85071a800b9bba83c72833d54d76bb7ffe0135f725\": rpc error: code = NotFound desc = could not find container \"bdbf2d0c70c3492727076c85071a800b9bba83c72833d54d76bb7ffe0135f725\": container with ID starting with bdbf2d0c70c3492727076c85071a800b9bba83c72833d54d76bb7ffe0135f725 not found: ID does not exist" Dec 11 08:20:10 crc kubenswrapper[4881]: I1211 08:20:10.350862 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-74d77"] Dec 11 08:20:10 crc kubenswrapper[4881]: E1211 08:20:10.351086 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1112a12e-84b7-447f-8c3c-a2bca7fc2096" containerName="registry-server" Dec 11 08:20:10 crc kubenswrapper[4881]: I1211 08:20:10.351100 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="1112a12e-84b7-447f-8c3c-a2bca7fc2096" containerName="registry-server" Dec 11 08:20:10 crc kubenswrapper[4881]: E1211 08:20:10.351111 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2bf5f48d-6c07-40f7-8f84-3762251f1d1d" containerName="extract-utilities" Dec 11 08:20:10 crc kubenswrapper[4881]: I1211 08:20:10.351118 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="2bf5f48d-6c07-40f7-8f84-3762251f1d1d" containerName="extract-utilities" Dec 11 08:20:10 crc kubenswrapper[4881]: E1211 08:20:10.351128 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2bf5f48d-6c07-40f7-8f84-3762251f1d1d" containerName="extract-content" Dec 11 08:20:10 crc kubenswrapper[4881]: I1211 08:20:10.351136 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="2bf5f48d-6c07-40f7-8f84-3762251f1d1d" containerName="extract-content" Dec 11 08:20:10 crc kubenswrapper[4881]: E1211 08:20:10.351147 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5d19974b-baa0-46b1-b1ba-24411f62a0c0" containerName="extract-content" Dec 11 08:20:10 crc kubenswrapper[4881]: I1211 08:20:10.351153 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="5d19974b-baa0-46b1-b1ba-24411f62a0c0" containerName="extract-content" Dec 11 08:20:10 crc kubenswrapper[4881]: E1211 08:20:10.351164 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8dc5d9d6-b64d-494d-a6e6-917ed40c01ae" containerName="marketplace-operator" Dec 11 08:20:10 crc kubenswrapper[4881]: I1211 08:20:10.351171 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="8dc5d9d6-b64d-494d-a6e6-917ed40c01ae" containerName="marketplace-operator" Dec 11 08:20:10 crc kubenswrapper[4881]: E1211 08:20:10.351180 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="054e2f3b-99d6-4586-8f79-bb4dadd92742" containerName="extract-utilities" Dec 11 08:20:10 crc kubenswrapper[4881]: I1211 08:20:10.351186 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="054e2f3b-99d6-4586-8f79-bb4dadd92742" containerName="extract-utilities" Dec 11 08:20:10 crc kubenswrapper[4881]: E1211 08:20:10.351194 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="81795d8f-af22-4a16-92de-455b31623c53" containerName="extract-content" Dec 11 08:20:10 crc kubenswrapper[4881]: I1211 08:20:10.351200 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="81795d8f-af22-4a16-92de-455b31623c53" containerName="extract-content" Dec 11 08:20:10 crc kubenswrapper[4881]: E1211 08:20:10.351209 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="81795d8f-af22-4a16-92de-455b31623c53" containerName="registry-server" Dec 11 08:20:10 crc kubenswrapper[4881]: I1211 08:20:10.351215 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="81795d8f-af22-4a16-92de-455b31623c53" containerName="registry-server" Dec 11 08:20:10 crc kubenswrapper[4881]: E1211 08:20:10.351224 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bcd451b4-9860-4079-9d8a-eab5bb3365c8" containerName="extract-content" Dec 11 08:20:10 crc kubenswrapper[4881]: I1211 08:20:10.351230 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="bcd451b4-9860-4079-9d8a-eab5bb3365c8" containerName="extract-content" Dec 11 08:20:10 crc kubenswrapper[4881]: E1211 08:20:10.351240 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5433ad4c-65f7-4ec1-8a01-f358f14685c4" containerName="extract-utilities" Dec 11 08:20:10 crc kubenswrapper[4881]: I1211 08:20:10.351246 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="5433ad4c-65f7-4ec1-8a01-f358f14685c4" containerName="extract-utilities" Dec 11 08:20:10 crc kubenswrapper[4881]: E1211 08:20:10.351252 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d1f50723-ab88-4c34-b3ad-099eeffd62c4" containerName="extract-content" Dec 11 08:20:10 crc kubenswrapper[4881]: I1211 08:20:10.351260 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="d1f50723-ab88-4c34-b3ad-099eeffd62c4" containerName="extract-content" Dec 11 08:20:10 crc kubenswrapper[4881]: E1211 08:20:10.351267 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1112a12e-84b7-447f-8c3c-a2bca7fc2096" containerName="extract-utilities" Dec 11 08:20:10 crc kubenswrapper[4881]: I1211 08:20:10.351275 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="1112a12e-84b7-447f-8c3c-a2bca7fc2096" containerName="extract-utilities" Dec 11 08:20:10 crc kubenswrapper[4881]: E1211 08:20:10.351297 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5d19974b-baa0-46b1-b1ba-24411f62a0c0" containerName="extract-utilities" Dec 11 08:20:10 crc kubenswrapper[4881]: I1211 08:20:10.351303 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="5d19974b-baa0-46b1-b1ba-24411f62a0c0" containerName="extract-utilities" Dec 11 08:20:10 crc kubenswrapper[4881]: E1211 08:20:10.351312 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1112a12e-84b7-447f-8c3c-a2bca7fc2096" containerName="extract-content" Dec 11 08:20:10 crc kubenswrapper[4881]: I1211 08:20:10.351318 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="1112a12e-84b7-447f-8c3c-a2bca7fc2096" containerName="extract-content" Dec 11 08:20:10 crc kubenswrapper[4881]: E1211 08:20:10.351327 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d1f50723-ab88-4c34-b3ad-099eeffd62c4" containerName="registry-server" Dec 11 08:20:10 crc kubenswrapper[4881]: I1211 08:20:10.351348 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="d1f50723-ab88-4c34-b3ad-099eeffd62c4" containerName="registry-server" Dec 11 08:20:10 crc kubenswrapper[4881]: E1211 08:20:10.351357 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bcd451b4-9860-4079-9d8a-eab5bb3365c8" containerName="extract-utilities" Dec 11 08:20:10 crc kubenswrapper[4881]: I1211 08:20:10.351363 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="bcd451b4-9860-4079-9d8a-eab5bb3365c8" containerName="extract-utilities" Dec 11 08:20:10 crc kubenswrapper[4881]: E1211 08:20:10.351371 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="054e2f3b-99d6-4586-8f79-bb4dadd92742" containerName="registry-server" Dec 11 08:20:10 crc kubenswrapper[4881]: I1211 08:20:10.351377 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="054e2f3b-99d6-4586-8f79-bb4dadd92742" containerName="registry-server" Dec 11 08:20:10 crc kubenswrapper[4881]: E1211 08:20:10.351385 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5433ad4c-65f7-4ec1-8a01-f358f14685c4" containerName="extract-content" Dec 11 08:20:10 crc kubenswrapper[4881]: I1211 08:20:10.351391 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="5433ad4c-65f7-4ec1-8a01-f358f14685c4" containerName="extract-content" Dec 11 08:20:10 crc kubenswrapper[4881]: E1211 08:20:10.351399 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="81795d8f-af22-4a16-92de-455b31623c53" containerName="extract-utilities" Dec 11 08:20:10 crc kubenswrapper[4881]: I1211 08:20:10.351405 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="81795d8f-af22-4a16-92de-455b31623c53" containerName="extract-utilities" Dec 11 08:20:10 crc kubenswrapper[4881]: E1211 08:20:10.351414 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="054e2f3b-99d6-4586-8f79-bb4dadd92742" containerName="extract-content" Dec 11 08:20:10 crc kubenswrapper[4881]: I1211 08:20:10.351421 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="054e2f3b-99d6-4586-8f79-bb4dadd92742" containerName="extract-content" Dec 11 08:20:10 crc kubenswrapper[4881]: E1211 08:20:10.351432 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d1f50723-ab88-4c34-b3ad-099eeffd62c4" containerName="extract-utilities" Dec 11 08:20:10 crc kubenswrapper[4881]: I1211 08:20:10.351439 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="d1f50723-ab88-4c34-b3ad-099eeffd62c4" containerName="extract-utilities" Dec 11 08:20:10 crc kubenswrapper[4881]: I1211 08:20:10.351542 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="d1f50723-ab88-4c34-b3ad-099eeffd62c4" containerName="registry-server" Dec 11 08:20:10 crc kubenswrapper[4881]: I1211 08:20:10.351553 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="81795d8f-af22-4a16-92de-455b31623c53" containerName="registry-server" Dec 11 08:20:10 crc kubenswrapper[4881]: I1211 08:20:10.351566 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="2bf5f48d-6c07-40f7-8f84-3762251f1d1d" containerName="extract-content" Dec 11 08:20:10 crc kubenswrapper[4881]: I1211 08:20:10.351573 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="054e2f3b-99d6-4586-8f79-bb4dadd92742" containerName="registry-server" Dec 11 08:20:10 crc kubenswrapper[4881]: I1211 08:20:10.351583 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="5433ad4c-65f7-4ec1-8a01-f358f14685c4" containerName="extract-content" Dec 11 08:20:10 crc kubenswrapper[4881]: I1211 08:20:10.351594 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="1112a12e-84b7-447f-8c3c-a2bca7fc2096" containerName="registry-server" Dec 11 08:20:10 crc kubenswrapper[4881]: I1211 08:20:10.351605 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="5d19974b-baa0-46b1-b1ba-24411f62a0c0" containerName="extract-content" Dec 11 08:20:10 crc kubenswrapper[4881]: I1211 08:20:10.351612 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="8dc5d9d6-b64d-494d-a6e6-917ed40c01ae" containerName="marketplace-operator" Dec 11 08:20:10 crc kubenswrapper[4881]: I1211 08:20:10.351622 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="bcd451b4-9860-4079-9d8a-eab5bb3365c8" containerName="extract-content" Dec 11 08:20:10 crc kubenswrapper[4881]: I1211 08:20:10.352386 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-74d77" Dec 11 08:20:10 crc kubenswrapper[4881]: I1211 08:20:10.358459 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Dec 11 08:20:10 crc kubenswrapper[4881]: I1211 08:20:10.365573 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-74d77"] Dec 11 08:20:10 crc kubenswrapper[4881]: I1211 08:20:10.460310 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/99c7c976-c996-4b9c-a389-fbaed3f71813-utilities\") pod \"community-operators-74d77\" (UID: \"99c7c976-c996-4b9c-a389-fbaed3f71813\") " pod="openshift-marketplace/community-operators-74d77" Dec 11 08:20:10 crc kubenswrapper[4881]: I1211 08:20:10.460611 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/99c7c976-c996-4b9c-a389-fbaed3f71813-catalog-content\") pod \"community-operators-74d77\" (UID: \"99c7c976-c996-4b9c-a389-fbaed3f71813\") " pod="openshift-marketplace/community-operators-74d77" Dec 11 08:20:10 crc kubenswrapper[4881]: I1211 08:20:10.460670 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c762h\" (UniqueName: \"kubernetes.io/projected/99c7c976-c996-4b9c-a389-fbaed3f71813-kube-api-access-c762h\") pod \"community-operators-74d77\" (UID: \"99c7c976-c996-4b9c-a389-fbaed3f71813\") " pod="openshift-marketplace/community-operators-74d77" Dec 11 08:20:10 crc kubenswrapper[4881]: I1211 08:20:10.562374 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/99c7c976-c996-4b9c-a389-fbaed3f71813-catalog-content\") pod \"community-operators-74d77\" (UID: \"99c7c976-c996-4b9c-a389-fbaed3f71813\") " pod="openshift-marketplace/community-operators-74d77" Dec 11 08:20:10 crc kubenswrapper[4881]: I1211 08:20:10.562435 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c762h\" (UniqueName: \"kubernetes.io/projected/99c7c976-c996-4b9c-a389-fbaed3f71813-kube-api-access-c762h\") pod \"community-operators-74d77\" (UID: \"99c7c976-c996-4b9c-a389-fbaed3f71813\") " pod="openshift-marketplace/community-operators-74d77" Dec 11 08:20:10 crc kubenswrapper[4881]: I1211 08:20:10.562469 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/99c7c976-c996-4b9c-a389-fbaed3f71813-utilities\") pod \"community-operators-74d77\" (UID: \"99c7c976-c996-4b9c-a389-fbaed3f71813\") " pod="openshift-marketplace/community-operators-74d77" Dec 11 08:20:10 crc kubenswrapper[4881]: I1211 08:20:10.562953 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/99c7c976-c996-4b9c-a389-fbaed3f71813-utilities\") pod \"community-operators-74d77\" (UID: \"99c7c976-c996-4b9c-a389-fbaed3f71813\") " pod="openshift-marketplace/community-operators-74d77" Dec 11 08:20:10 crc kubenswrapper[4881]: I1211 08:20:10.563217 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/99c7c976-c996-4b9c-a389-fbaed3f71813-catalog-content\") pod \"community-operators-74d77\" (UID: \"99c7c976-c996-4b9c-a389-fbaed3f71813\") " pod="openshift-marketplace/community-operators-74d77" Dec 11 08:20:10 crc kubenswrapper[4881]: I1211 08:20:10.584638 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c762h\" (UniqueName: \"kubernetes.io/projected/99c7c976-c996-4b9c-a389-fbaed3f71813-kube-api-access-c762h\") pod \"community-operators-74d77\" (UID: \"99c7c976-c996-4b9c-a389-fbaed3f71813\") " pod="openshift-marketplace/community-operators-74d77" Dec 11 08:20:10 crc kubenswrapper[4881]: I1211 08:20:10.665687 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-74d77" Dec 11 08:20:10 crc kubenswrapper[4881]: I1211 08:20:10.886700 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-74d77"] Dec 11 08:20:10 crc kubenswrapper[4881]: W1211 08:20:10.897691 4881 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod99c7c976_c996_4b9c_a389_fbaed3f71813.slice/crio-d413efc388b920ce3776847e721235231909ab5db30b62e963244dce8957fdac WatchSource:0}: Error finding container d413efc388b920ce3776847e721235231909ab5db30b62e963244dce8957fdac: Status 404 returned error can't find the container with id d413efc388b920ce3776847e721235231909ab5db30b62e963244dce8957fdac Dec 11 08:20:10 crc kubenswrapper[4881]: I1211 08:20:10.971841 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-74d77" event={"ID":"99c7c976-c996-4b9c-a389-fbaed3f71813","Type":"ContainerStarted","Data":"d413efc388b920ce3776847e721235231909ab5db30b62e963244dce8957fdac"} Dec 11 08:20:11 crc kubenswrapper[4881]: I1211 08:20:11.012988 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="054e2f3b-99d6-4586-8f79-bb4dadd92742" path="/var/lib/kubelet/pods/054e2f3b-99d6-4586-8f79-bb4dadd92742/volumes" Dec 11 08:20:11 crc kubenswrapper[4881]: I1211 08:20:11.014109 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1112a12e-84b7-447f-8c3c-a2bca7fc2096" path="/var/lib/kubelet/pods/1112a12e-84b7-447f-8c3c-a2bca7fc2096/volumes" Dec 11 08:20:11 crc kubenswrapper[4881]: I1211 08:20:11.014843 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2bf5f48d-6c07-40f7-8f84-3762251f1d1d" path="/var/lib/kubelet/pods/2bf5f48d-6c07-40f7-8f84-3762251f1d1d/volumes" Dec 11 08:20:11 crc kubenswrapper[4881]: I1211 08:20:11.016047 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5433ad4c-65f7-4ec1-8a01-f358f14685c4" path="/var/lib/kubelet/pods/5433ad4c-65f7-4ec1-8a01-f358f14685c4/volumes" Dec 11 08:20:11 crc kubenswrapper[4881]: I1211 08:20:11.016727 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5d19974b-baa0-46b1-b1ba-24411f62a0c0" path="/var/lib/kubelet/pods/5d19974b-baa0-46b1-b1ba-24411f62a0c0/volumes" Dec 11 08:20:11 crc kubenswrapper[4881]: I1211 08:20:11.018167 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="81795d8f-af22-4a16-92de-455b31623c53" path="/var/lib/kubelet/pods/81795d8f-af22-4a16-92de-455b31623c53/volumes" Dec 11 08:20:11 crc kubenswrapper[4881]: I1211 08:20:11.018940 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8dc5d9d6-b64d-494d-a6e6-917ed40c01ae" path="/var/lib/kubelet/pods/8dc5d9d6-b64d-494d-a6e6-917ed40c01ae/volumes" Dec 11 08:20:11 crc kubenswrapper[4881]: I1211 08:20:11.026516 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bcd451b4-9860-4079-9d8a-eab5bb3365c8" path="/var/lib/kubelet/pods/bcd451b4-9860-4079-9d8a-eab5bb3365c8/volumes" Dec 11 08:20:11 crc kubenswrapper[4881]: I1211 08:20:11.027194 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d1f50723-ab88-4c34-b3ad-099eeffd62c4" path="/var/lib/kubelet/pods/d1f50723-ab88-4c34-b3ad-099eeffd62c4/volumes" Dec 11 08:20:11 crc kubenswrapper[4881]: I1211 08:20:11.356405 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-zqwb5"] Dec 11 08:20:11 crc kubenswrapper[4881]: I1211 08:20:11.358763 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-zqwb5" Dec 11 08:20:11 crc kubenswrapper[4881]: I1211 08:20:11.363319 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Dec 11 08:20:11 crc kubenswrapper[4881]: I1211 08:20:11.368622 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-zqwb5"] Dec 11 08:20:11 crc kubenswrapper[4881]: I1211 08:20:11.371058 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cf59f823-b688-420e-9e5b-20f4441c9635-utilities\") pod \"certified-operators-zqwb5\" (UID: \"cf59f823-b688-420e-9e5b-20f4441c9635\") " pod="openshift-marketplace/certified-operators-zqwb5" Dec 11 08:20:11 crc kubenswrapper[4881]: I1211 08:20:11.371147 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cf59f823-b688-420e-9e5b-20f4441c9635-catalog-content\") pod \"certified-operators-zqwb5\" (UID: \"cf59f823-b688-420e-9e5b-20f4441c9635\") " pod="openshift-marketplace/certified-operators-zqwb5" Dec 11 08:20:11 crc kubenswrapper[4881]: I1211 08:20:11.371281 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w4pkf\" (UniqueName: \"kubernetes.io/projected/cf59f823-b688-420e-9e5b-20f4441c9635-kube-api-access-w4pkf\") pod \"certified-operators-zqwb5\" (UID: \"cf59f823-b688-420e-9e5b-20f4441c9635\") " pod="openshift-marketplace/certified-operators-zqwb5" Dec 11 08:20:11 crc kubenswrapper[4881]: I1211 08:20:11.473009 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cf59f823-b688-420e-9e5b-20f4441c9635-catalog-content\") pod \"certified-operators-zqwb5\" (UID: \"cf59f823-b688-420e-9e5b-20f4441c9635\") " pod="openshift-marketplace/certified-operators-zqwb5" Dec 11 08:20:11 crc kubenswrapper[4881]: I1211 08:20:11.473106 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w4pkf\" (UniqueName: \"kubernetes.io/projected/cf59f823-b688-420e-9e5b-20f4441c9635-kube-api-access-w4pkf\") pod \"certified-operators-zqwb5\" (UID: \"cf59f823-b688-420e-9e5b-20f4441c9635\") " pod="openshift-marketplace/certified-operators-zqwb5" Dec 11 08:20:11 crc kubenswrapper[4881]: I1211 08:20:11.473157 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cf59f823-b688-420e-9e5b-20f4441c9635-utilities\") pod \"certified-operators-zqwb5\" (UID: \"cf59f823-b688-420e-9e5b-20f4441c9635\") " pod="openshift-marketplace/certified-operators-zqwb5" Dec 11 08:20:11 crc kubenswrapper[4881]: I1211 08:20:11.473730 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cf59f823-b688-420e-9e5b-20f4441c9635-catalog-content\") pod \"certified-operators-zqwb5\" (UID: \"cf59f823-b688-420e-9e5b-20f4441c9635\") " pod="openshift-marketplace/certified-operators-zqwb5" Dec 11 08:20:11 crc kubenswrapper[4881]: I1211 08:20:11.473735 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cf59f823-b688-420e-9e5b-20f4441c9635-utilities\") pod \"certified-operators-zqwb5\" (UID: \"cf59f823-b688-420e-9e5b-20f4441c9635\") " pod="openshift-marketplace/certified-operators-zqwb5" Dec 11 08:20:11 crc kubenswrapper[4881]: I1211 08:20:11.491259 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w4pkf\" (UniqueName: \"kubernetes.io/projected/cf59f823-b688-420e-9e5b-20f4441c9635-kube-api-access-w4pkf\") pod \"certified-operators-zqwb5\" (UID: \"cf59f823-b688-420e-9e5b-20f4441c9635\") " pod="openshift-marketplace/certified-operators-zqwb5" Dec 11 08:20:11 crc kubenswrapper[4881]: I1211 08:20:11.680397 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-zqwb5" Dec 11 08:20:11 crc kubenswrapper[4881]: I1211 08:20:11.858311 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-zqwb5"] Dec 11 08:20:11 crc kubenswrapper[4881]: I1211 08:20:11.978807 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zqwb5" event={"ID":"cf59f823-b688-420e-9e5b-20f4441c9635","Type":"ContainerStarted","Data":"f369d688603915cfe07a9c76948634ab1ca7aae50ac50587fd5acc10f5ab3da4"} Dec 11 08:20:11 crc kubenswrapper[4881]: I1211 08:20:11.980196 4881 generic.go:334] "Generic (PLEG): container finished" podID="99c7c976-c996-4b9c-a389-fbaed3f71813" containerID="6dd5d132b784d3186eace1f04b9c8bdf809f11d5763cfe20ec46f99469121fc9" exitCode=0 Dec 11 08:20:11 crc kubenswrapper[4881]: I1211 08:20:11.980256 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-74d77" event={"ID":"99c7c976-c996-4b9c-a389-fbaed3f71813","Type":"ContainerDied","Data":"6dd5d132b784d3186eace1f04b9c8bdf809f11d5763cfe20ec46f99469121fc9"} Dec 11 08:20:12 crc kubenswrapper[4881]: I1211 08:20:12.754486 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-j9mc4"] Dec 11 08:20:12 crc kubenswrapper[4881]: I1211 08:20:12.756850 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-j9mc4" Dec 11 08:20:12 crc kubenswrapper[4881]: I1211 08:20:12.758703 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Dec 11 08:20:12 crc kubenswrapper[4881]: I1211 08:20:12.768007 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-j9mc4"] Dec 11 08:20:12 crc kubenswrapper[4881]: I1211 08:20:12.803411 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e1b26f03-480c-45ab-b37e-c2971f8e117a-catalog-content\") pod \"redhat-marketplace-j9mc4\" (UID: \"e1b26f03-480c-45ab-b37e-c2971f8e117a\") " pod="openshift-marketplace/redhat-marketplace-j9mc4" Dec 11 08:20:12 crc kubenswrapper[4881]: I1211 08:20:12.803510 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e1b26f03-480c-45ab-b37e-c2971f8e117a-utilities\") pod \"redhat-marketplace-j9mc4\" (UID: \"e1b26f03-480c-45ab-b37e-c2971f8e117a\") " pod="openshift-marketplace/redhat-marketplace-j9mc4" Dec 11 08:20:12 crc kubenswrapper[4881]: I1211 08:20:12.803547 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gzlxx\" (UniqueName: \"kubernetes.io/projected/e1b26f03-480c-45ab-b37e-c2971f8e117a-kube-api-access-gzlxx\") pod \"redhat-marketplace-j9mc4\" (UID: \"e1b26f03-480c-45ab-b37e-c2971f8e117a\") " pod="openshift-marketplace/redhat-marketplace-j9mc4" Dec 11 08:20:12 crc kubenswrapper[4881]: I1211 08:20:12.905005 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gzlxx\" (UniqueName: \"kubernetes.io/projected/e1b26f03-480c-45ab-b37e-c2971f8e117a-kube-api-access-gzlxx\") pod \"redhat-marketplace-j9mc4\" (UID: \"e1b26f03-480c-45ab-b37e-c2971f8e117a\") " pod="openshift-marketplace/redhat-marketplace-j9mc4" Dec 11 08:20:12 crc kubenswrapper[4881]: I1211 08:20:12.905063 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e1b26f03-480c-45ab-b37e-c2971f8e117a-catalog-content\") pod \"redhat-marketplace-j9mc4\" (UID: \"e1b26f03-480c-45ab-b37e-c2971f8e117a\") " pod="openshift-marketplace/redhat-marketplace-j9mc4" Dec 11 08:20:12 crc kubenswrapper[4881]: I1211 08:20:12.905154 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e1b26f03-480c-45ab-b37e-c2971f8e117a-utilities\") pod \"redhat-marketplace-j9mc4\" (UID: \"e1b26f03-480c-45ab-b37e-c2971f8e117a\") " pod="openshift-marketplace/redhat-marketplace-j9mc4" Dec 11 08:20:12 crc kubenswrapper[4881]: I1211 08:20:12.905645 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e1b26f03-480c-45ab-b37e-c2971f8e117a-utilities\") pod \"redhat-marketplace-j9mc4\" (UID: \"e1b26f03-480c-45ab-b37e-c2971f8e117a\") " pod="openshift-marketplace/redhat-marketplace-j9mc4" Dec 11 08:20:12 crc kubenswrapper[4881]: I1211 08:20:12.905746 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e1b26f03-480c-45ab-b37e-c2971f8e117a-catalog-content\") pod \"redhat-marketplace-j9mc4\" (UID: \"e1b26f03-480c-45ab-b37e-c2971f8e117a\") " pod="openshift-marketplace/redhat-marketplace-j9mc4" Dec 11 08:20:12 crc kubenswrapper[4881]: I1211 08:20:12.926195 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gzlxx\" (UniqueName: \"kubernetes.io/projected/e1b26f03-480c-45ab-b37e-c2971f8e117a-kube-api-access-gzlxx\") pod \"redhat-marketplace-j9mc4\" (UID: \"e1b26f03-480c-45ab-b37e-c2971f8e117a\") " pod="openshift-marketplace/redhat-marketplace-j9mc4" Dec 11 08:20:12 crc kubenswrapper[4881]: I1211 08:20:12.986179 4881 generic.go:334] "Generic (PLEG): container finished" podID="cf59f823-b688-420e-9e5b-20f4441c9635" containerID="10e8ddc36165caabb22f5f175b1bd4a8b0f3295773054c008d4965a4f6a1ae01" exitCode=0 Dec 11 08:20:12 crc kubenswrapper[4881]: I1211 08:20:12.986223 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zqwb5" event={"ID":"cf59f823-b688-420e-9e5b-20f4441c9635","Type":"ContainerDied","Data":"10e8ddc36165caabb22f5f175b1bd4a8b0f3295773054c008d4965a4f6a1ae01"} Dec 11 08:20:13 crc kubenswrapper[4881]: I1211 08:20:13.079373 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-j9mc4" Dec 11 08:20:13 crc kubenswrapper[4881]: I1211 08:20:13.501474 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-j9mc4"] Dec 11 08:20:13 crc kubenswrapper[4881]: W1211 08:20:13.510398 4881 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pode1b26f03_480c_45ab_b37e_c2971f8e117a.slice/crio-04d88974f52bb6ee170ba6e83150efd23ce9c5430fd0bff327da8b0c27cf23b8 WatchSource:0}: Error finding container 04d88974f52bb6ee170ba6e83150efd23ce9c5430fd0bff327da8b0c27cf23b8: Status 404 returned error can't find the container with id 04d88974f52bb6ee170ba6e83150efd23ce9c5430fd0bff327da8b0c27cf23b8 Dec 11 08:20:13 crc kubenswrapper[4881]: I1211 08:20:13.754738 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-kfk9x"] Dec 11 08:20:13 crc kubenswrapper[4881]: I1211 08:20:13.756132 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-kfk9x" Dec 11 08:20:13 crc kubenswrapper[4881]: I1211 08:20:13.757642 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-kfk9x"] Dec 11 08:20:13 crc kubenswrapper[4881]: I1211 08:20:13.759960 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Dec 11 08:20:13 crc kubenswrapper[4881]: I1211 08:20:13.818010 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xm9b4\" (UniqueName: \"kubernetes.io/projected/23d4f6b6-7fa3-4c1b-afc8-ce176ef3b69f-kube-api-access-xm9b4\") pod \"redhat-operators-kfk9x\" (UID: \"23d4f6b6-7fa3-4c1b-afc8-ce176ef3b69f\") " pod="openshift-marketplace/redhat-operators-kfk9x" Dec 11 08:20:13 crc kubenswrapper[4881]: I1211 08:20:13.818600 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/23d4f6b6-7fa3-4c1b-afc8-ce176ef3b69f-catalog-content\") pod \"redhat-operators-kfk9x\" (UID: \"23d4f6b6-7fa3-4c1b-afc8-ce176ef3b69f\") " pod="openshift-marketplace/redhat-operators-kfk9x" Dec 11 08:20:13 crc kubenswrapper[4881]: I1211 08:20:13.818716 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/23d4f6b6-7fa3-4c1b-afc8-ce176ef3b69f-utilities\") pod \"redhat-operators-kfk9x\" (UID: \"23d4f6b6-7fa3-4c1b-afc8-ce176ef3b69f\") " pod="openshift-marketplace/redhat-operators-kfk9x" Dec 11 08:20:13 crc kubenswrapper[4881]: I1211 08:20:13.920320 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xm9b4\" (UniqueName: \"kubernetes.io/projected/23d4f6b6-7fa3-4c1b-afc8-ce176ef3b69f-kube-api-access-xm9b4\") pod \"redhat-operators-kfk9x\" (UID: \"23d4f6b6-7fa3-4c1b-afc8-ce176ef3b69f\") " pod="openshift-marketplace/redhat-operators-kfk9x" Dec 11 08:20:13 crc kubenswrapper[4881]: I1211 08:20:13.920471 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/23d4f6b6-7fa3-4c1b-afc8-ce176ef3b69f-catalog-content\") pod \"redhat-operators-kfk9x\" (UID: \"23d4f6b6-7fa3-4c1b-afc8-ce176ef3b69f\") " pod="openshift-marketplace/redhat-operators-kfk9x" Dec 11 08:20:13 crc kubenswrapper[4881]: I1211 08:20:13.920505 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/23d4f6b6-7fa3-4c1b-afc8-ce176ef3b69f-utilities\") pod \"redhat-operators-kfk9x\" (UID: \"23d4f6b6-7fa3-4c1b-afc8-ce176ef3b69f\") " pod="openshift-marketplace/redhat-operators-kfk9x" Dec 11 08:20:13 crc kubenswrapper[4881]: I1211 08:20:13.921954 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/23d4f6b6-7fa3-4c1b-afc8-ce176ef3b69f-utilities\") pod \"redhat-operators-kfk9x\" (UID: \"23d4f6b6-7fa3-4c1b-afc8-ce176ef3b69f\") " pod="openshift-marketplace/redhat-operators-kfk9x" Dec 11 08:20:13 crc kubenswrapper[4881]: I1211 08:20:13.922004 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/23d4f6b6-7fa3-4c1b-afc8-ce176ef3b69f-catalog-content\") pod \"redhat-operators-kfk9x\" (UID: \"23d4f6b6-7fa3-4c1b-afc8-ce176ef3b69f\") " pod="openshift-marketplace/redhat-operators-kfk9x" Dec 11 08:20:13 crc kubenswrapper[4881]: I1211 08:20:13.941122 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xm9b4\" (UniqueName: \"kubernetes.io/projected/23d4f6b6-7fa3-4c1b-afc8-ce176ef3b69f-kube-api-access-xm9b4\") pod \"redhat-operators-kfk9x\" (UID: \"23d4f6b6-7fa3-4c1b-afc8-ce176ef3b69f\") " pod="openshift-marketplace/redhat-operators-kfk9x" Dec 11 08:20:13 crc kubenswrapper[4881]: I1211 08:20:13.997308 4881 generic.go:334] "Generic (PLEG): container finished" podID="e1b26f03-480c-45ab-b37e-c2971f8e117a" containerID="a510c1635f03a1a38db8822ace1e2f781dbe1b54d814c5c739bc9748655c63ca" exitCode=0 Dec 11 08:20:13 crc kubenswrapper[4881]: I1211 08:20:13.997415 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-j9mc4" event={"ID":"e1b26f03-480c-45ab-b37e-c2971f8e117a","Type":"ContainerDied","Data":"a510c1635f03a1a38db8822ace1e2f781dbe1b54d814c5c739bc9748655c63ca"} Dec 11 08:20:13 crc kubenswrapper[4881]: I1211 08:20:13.997446 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-j9mc4" event={"ID":"e1b26f03-480c-45ab-b37e-c2971f8e117a","Type":"ContainerStarted","Data":"04d88974f52bb6ee170ba6e83150efd23ce9c5430fd0bff327da8b0c27cf23b8"} Dec 11 08:20:14 crc kubenswrapper[4881]: I1211 08:20:14.096055 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-kfk9x" Dec 11 08:20:14 crc kubenswrapper[4881]: I1211 08:20:14.257575 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-kfk9x"] Dec 11 08:20:14 crc kubenswrapper[4881]: W1211 08:20:14.265283 4881 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod23d4f6b6_7fa3_4c1b_afc8_ce176ef3b69f.slice/crio-2385874a1b7839734c14e7fd2394d4546b76be99bb8a8df0962d248a57ed4f4c WatchSource:0}: Error finding container 2385874a1b7839734c14e7fd2394d4546b76be99bb8a8df0962d248a57ed4f4c: Status 404 returned error can't find the container with id 2385874a1b7839734c14e7fd2394d4546b76be99bb8a8df0962d248a57ed4f4c Dec 11 08:20:15 crc kubenswrapper[4881]: I1211 08:20:15.003556 4881 generic.go:334] "Generic (PLEG): container finished" podID="23d4f6b6-7fa3-4c1b-afc8-ce176ef3b69f" containerID="a3aa4f6942906646ca2b18da855ed09cd14037edbc81666525a946420a760814" exitCode=0 Dec 11 08:20:15 crc kubenswrapper[4881]: I1211 08:20:15.003649 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-kfk9x" event={"ID":"23d4f6b6-7fa3-4c1b-afc8-ce176ef3b69f","Type":"ContainerDied","Data":"a3aa4f6942906646ca2b18da855ed09cd14037edbc81666525a946420a760814"} Dec 11 08:20:15 crc kubenswrapper[4881]: I1211 08:20:15.003935 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-kfk9x" event={"ID":"23d4f6b6-7fa3-4c1b-afc8-ce176ef3b69f","Type":"ContainerStarted","Data":"2385874a1b7839734c14e7fd2394d4546b76be99bb8a8df0962d248a57ed4f4c"} Dec 11 08:20:18 crc kubenswrapper[4881]: I1211 08:20:18.022817 4881 generic.go:334] "Generic (PLEG): container finished" podID="99c7c976-c996-4b9c-a389-fbaed3f71813" containerID="0016d202d2ad187d252c55619cf31b1d41227d846eabdddcb2f567c6c4fc1195" exitCode=0 Dec 11 08:20:18 crc kubenswrapper[4881]: I1211 08:20:18.022919 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-74d77" event={"ID":"99c7c976-c996-4b9c-a389-fbaed3f71813","Type":"ContainerDied","Data":"0016d202d2ad187d252c55619cf31b1d41227d846eabdddcb2f567c6c4fc1195"} Dec 11 08:20:18 crc kubenswrapper[4881]: I1211 08:20:18.750505 4881 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Dec 11 08:20:18 crc kubenswrapper[4881]: I1211 08:20:18.751148 4881 kubelet.go:2431] "SyncLoop REMOVE" source="file" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Dec 11 08:20:18 crc kubenswrapper[4881]: I1211 08:20:18.751443 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" containerID="cri-o://f4bc17ebdc46bdf12b24515e7a055043646bcea98fe47ea7f5c44e679d02bde9" gracePeriod=15 Dec 11 08:20:18 crc kubenswrapper[4881]: I1211 08:20:18.751613 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Dec 11 08:20:18 crc kubenswrapper[4881]: I1211 08:20:18.751991 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" containerID="cri-o://204b876ced5657df64e1a6074ba7ddbb3e286df0e12630d709ff64d2905b8487" gracePeriod=15 Dec 11 08:20:18 crc kubenswrapper[4881]: I1211 08:20:18.752052 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" containerID="cri-o://6735b5cd08055deeae614855e5de8a056b276c8006a16c45f2a0fce97da172cb" gracePeriod=15 Dec 11 08:20:18 crc kubenswrapper[4881]: I1211 08:20:18.752092 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" containerID="cri-o://daf982782f40db65e6a33e1e1bcfd01493d5a385c52013546d38d921ac4dcad3" gracePeriod=15 Dec 11 08:20:18 crc kubenswrapper[4881]: I1211 08:20:18.752146 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" containerID="cri-o://3ceeda38e245a549205af6a7f2468c682ca1420574f2d3b375dfba7d93a3c669" gracePeriod=15 Dec 11 08:20:18 crc kubenswrapper[4881]: I1211 08:20:18.753541 4881 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Dec 11 08:20:18 crc kubenswrapper[4881]: E1211 08:20:18.753761 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Dec 11 08:20:18 crc kubenswrapper[4881]: I1211 08:20:18.753778 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Dec 11 08:20:18 crc kubenswrapper[4881]: E1211 08:20:18.753787 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Dec 11 08:20:18 crc kubenswrapper[4881]: I1211 08:20:18.753793 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Dec 11 08:20:18 crc kubenswrapper[4881]: E1211 08:20:18.753805 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Dec 11 08:20:18 crc kubenswrapper[4881]: I1211 08:20:18.753811 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Dec 11 08:20:18 crc kubenswrapper[4881]: E1211 08:20:18.753819 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="setup" Dec 11 08:20:18 crc kubenswrapper[4881]: I1211 08:20:18.753825 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="setup" Dec 11 08:20:18 crc kubenswrapper[4881]: E1211 08:20:18.753833 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Dec 11 08:20:18 crc kubenswrapper[4881]: I1211 08:20:18.753840 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Dec 11 08:20:18 crc kubenswrapper[4881]: E1211 08:20:18.753850 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Dec 11 08:20:18 crc kubenswrapper[4881]: I1211 08:20:18.753857 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Dec 11 08:20:18 crc kubenswrapper[4881]: E1211 08:20:18.753871 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Dec 11 08:20:18 crc kubenswrapper[4881]: I1211 08:20:18.753877 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Dec 11 08:20:18 crc kubenswrapper[4881]: I1211 08:20:18.753964 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Dec 11 08:20:18 crc kubenswrapper[4881]: I1211 08:20:18.753973 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Dec 11 08:20:18 crc kubenswrapper[4881]: I1211 08:20:18.753981 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Dec 11 08:20:18 crc kubenswrapper[4881]: I1211 08:20:18.753990 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Dec 11 08:20:18 crc kubenswrapper[4881]: I1211 08:20:18.753998 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Dec 11 08:20:18 crc kubenswrapper[4881]: I1211 08:20:18.754006 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Dec 11 08:20:18 crc kubenswrapper[4881]: I1211 08:20:18.792038 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Dec 11 08:20:18 crc kubenswrapper[4881]: I1211 08:20:18.797357 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Dec 11 08:20:18 crc kubenswrapper[4881]: I1211 08:20:18.797421 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Dec 11 08:20:18 crc kubenswrapper[4881]: I1211 08:20:18.797462 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 11 08:20:18 crc kubenswrapper[4881]: I1211 08:20:18.797479 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Dec 11 08:20:18 crc kubenswrapper[4881]: I1211 08:20:18.797504 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 11 08:20:18 crc kubenswrapper[4881]: I1211 08:20:18.797604 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Dec 11 08:20:18 crc kubenswrapper[4881]: I1211 08:20:18.797668 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 11 08:20:18 crc kubenswrapper[4881]: I1211 08:20:18.797695 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Dec 11 08:20:18 crc kubenswrapper[4881]: I1211 08:20:18.898759 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Dec 11 08:20:18 crc kubenswrapper[4881]: I1211 08:20:18.898808 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 11 08:20:18 crc kubenswrapper[4881]: I1211 08:20:18.898829 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Dec 11 08:20:18 crc kubenswrapper[4881]: I1211 08:20:18.898848 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Dec 11 08:20:18 crc kubenswrapper[4881]: I1211 08:20:18.898885 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Dec 11 08:20:18 crc kubenswrapper[4881]: I1211 08:20:18.898897 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Dec 11 08:20:18 crc kubenswrapper[4881]: I1211 08:20:18.898924 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 11 08:20:18 crc kubenswrapper[4881]: I1211 08:20:18.898946 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Dec 11 08:20:18 crc kubenswrapper[4881]: I1211 08:20:18.898917 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 11 08:20:18 crc kubenswrapper[4881]: I1211 08:20:18.898982 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Dec 11 08:20:18 crc kubenswrapper[4881]: I1211 08:20:18.899039 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 11 08:20:18 crc kubenswrapper[4881]: I1211 08:20:18.899050 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Dec 11 08:20:18 crc kubenswrapper[4881]: I1211 08:20:18.899157 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Dec 11 08:20:18 crc kubenswrapper[4881]: I1211 08:20:18.899216 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 11 08:20:18 crc kubenswrapper[4881]: I1211 08:20:18.899384 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 11 08:20:18 crc kubenswrapper[4881]: I1211 08:20:18.899421 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Dec 11 08:20:19 crc kubenswrapper[4881]: E1211 08:20:19.057534 4881 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:20:19Z\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:20:19Z\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:20:19Z\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:20:19Z\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:34f522750c260aee8d7d3d8c16bba58727f5dfb964b4aecc8b09e3e6f7056f12\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:9acec1ab208005d77c0ac2722e15bf8620aff3b5c4ab7910d45b05a66d2bb912\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1628955991},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:178c13b6a1b34d5a4da4710d46305ff33fc30a390d065c0e2ba191c863238f9e\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:fcefccd5340edefa42f3ec04805e7514cbd84b40e2ad4f0542e25acb4897c5a4\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1232534877},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:b0004ff683181b2b57df13c0ffc42453e10a5dcb1789d938a3f18527b08412d6\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d68762606abc1a4575916f8aec19a1d1c4e07b5c88745bc46602ddbd3b20496c\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1202271579},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:be25e28aabd5a6e06b4df55e58fa4be426c96c57e3387969e0070e6058149d04\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:e6f1bca5d60a93ec9f9bd8ae305cd4ded3f62b2a51bbfdf59e056ea57c0c5b9f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1154573130},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792}],\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Patch \"https://api-int.crc.testing:6443/api/v1/nodes/crc/status?timeout=10s\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:19 crc kubenswrapper[4881]: E1211 08:20:19.058327 4881 kubelet_node_status.go:585] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?timeout=10s\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:19 crc kubenswrapper[4881]: E1211 08:20:19.058737 4881 kubelet_node_status.go:585] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?timeout=10s\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:19 crc kubenswrapper[4881]: E1211 08:20:19.058960 4881 kubelet_node_status.go:585] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?timeout=10s\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:19 crc kubenswrapper[4881]: E1211 08:20:19.059201 4881 kubelet_node_status.go:585] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?timeout=10s\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:19 crc kubenswrapper[4881]: E1211 08:20:19.059216 4881 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Dec 11 08:20:19 crc kubenswrapper[4881]: I1211 08:20:19.089907 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Dec 11 08:20:19 crc kubenswrapper[4881]: W1211 08:20:19.106329 4881 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf85e55b1a89d02b0cb034b1ea31ed45a.slice/crio-8d2f76f5d8b711cf88236417a2fd3de554d4e521d25e4a61025ec342036902a8 WatchSource:0}: Error finding container 8d2f76f5d8b711cf88236417a2fd3de554d4e521d25e4a61025ec342036902a8: Status 404 returned error can't find the container with id 8d2f76f5d8b711cf88236417a2fd3de554d4e521d25e4a61025ec342036902a8 Dec 11 08:20:19 crc kubenswrapper[4881]: E1211 08:20:19.108981 4881 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/events\": dial tcp 38.102.83.20:6443: connect: connection refused" event="&Event{ObjectMeta:{kube-apiserver-startup-monitor-crc.18801b6f60ca286e openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-startup-monitor-crc,UID:f85e55b1a89d02b0cb034b1ea31ed45a,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{startup-monitor},},Reason:Pulled,Message:Container image \"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-12-11 08:20:19.108415598 +0000 UTC m=+267.485784325,LastTimestamp:2025-12-11 08:20:19.108415598 +0000 UTC m=+267.485784325,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Dec 11 08:20:20 crc kubenswrapper[4881]: I1211 08:20:20.034799 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" event={"ID":"f85e55b1a89d02b0cb034b1ea31ed45a","Type":"ContainerStarted","Data":"8d2f76f5d8b711cf88236417a2fd3de554d4e521d25e4a61025ec342036902a8"} Dec 11 08:20:20 crc kubenswrapper[4881]: I1211 08:20:20.037299 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Dec 11 08:20:20 crc kubenswrapper[4881]: I1211 08:20:20.038554 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Dec 11 08:20:20 crc kubenswrapper[4881]: I1211 08:20:20.039225 4881 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="3ceeda38e245a549205af6a7f2468c682ca1420574f2d3b375dfba7d93a3c669" exitCode=2 Dec 11 08:20:21 crc kubenswrapper[4881]: I1211 08:20:21.048908 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" event={"ID":"f85e55b1a89d02b0cb034b1ea31ed45a","Type":"ContainerStarted","Data":"78004bcb639503927632b34934fb8fa69ba836368d76b4c31a4e7bbf9c48d704"} Dec 11 08:20:21 crc kubenswrapper[4881]: I1211 08:20:21.050314 4881 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:21 crc kubenswrapper[4881]: I1211 08:20:21.052826 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-kfk9x" event={"ID":"23d4f6b6-7fa3-4c1b-afc8-ce176ef3b69f","Type":"ContainerStarted","Data":"dbfdc88294584a315d799efe78857835f758b1755ae6a99a18013ca76423fc9f"} Dec 11 08:20:21 crc kubenswrapper[4881]: I1211 08:20:21.053946 4881 status_manager.go:851] "Failed to get status for pod" podUID="23d4f6b6-7fa3-4c1b-afc8-ce176ef3b69f" pod="openshift-marketplace/redhat-operators-kfk9x" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-kfk9x\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:21 crc kubenswrapper[4881]: I1211 08:20:21.055622 4881 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:21 crc kubenswrapper[4881]: I1211 08:20:21.063948 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Dec 11 08:20:21 crc kubenswrapper[4881]: I1211 08:20:21.067269 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Dec 11 08:20:21 crc kubenswrapper[4881]: I1211 08:20:21.068763 4881 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="204b876ced5657df64e1a6074ba7ddbb3e286df0e12630d709ff64d2905b8487" exitCode=0 Dec 11 08:20:21 crc kubenswrapper[4881]: I1211 08:20:21.068798 4881 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="6735b5cd08055deeae614855e5de8a056b276c8006a16c45f2a0fce97da172cb" exitCode=0 Dec 11 08:20:21 crc kubenswrapper[4881]: I1211 08:20:21.068805 4881 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="daf982782f40db65e6a33e1e1bcfd01493d5a385c52013546d38d921ac4dcad3" exitCode=0 Dec 11 08:20:21 crc kubenswrapper[4881]: I1211 08:20:21.068883 4881 scope.go:117] "RemoveContainer" containerID="ff7ad52ec7f42bde82d0f5b9a6ef051a56c37004bb4172dbc504afde1e5f69bf" Dec 11 08:20:21 crc kubenswrapper[4881]: I1211 08:20:21.071551 4881 generic.go:334] "Generic (PLEG): container finished" podID="e1b26f03-480c-45ab-b37e-c2971f8e117a" containerID="2d401919cc8fafe650746a8878da3b4e3b4cad313dec0c4ab3ec8a8779cb532b" exitCode=0 Dec 11 08:20:21 crc kubenswrapper[4881]: I1211 08:20:21.071607 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-j9mc4" event={"ID":"e1b26f03-480c-45ab-b37e-c2971f8e117a","Type":"ContainerDied","Data":"2d401919cc8fafe650746a8878da3b4e3b4cad313dec0c4ab3ec8a8779cb532b"} Dec 11 08:20:21 crc kubenswrapper[4881]: I1211 08:20:21.077453 4881 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:21 crc kubenswrapper[4881]: I1211 08:20:21.077666 4881 status_manager.go:851] "Failed to get status for pod" podUID="23d4f6b6-7fa3-4c1b-afc8-ce176ef3b69f" pod="openshift-marketplace/redhat-operators-kfk9x" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-kfk9x\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:21 crc kubenswrapper[4881]: I1211 08:20:21.078723 4881 status_manager.go:851] "Failed to get status for pod" podUID="e1b26f03-480c-45ab-b37e-c2971f8e117a" pod="openshift-marketplace/redhat-marketplace-j9mc4" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-j9mc4\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:21 crc kubenswrapper[4881]: I1211 08:20:21.087427 4881 generic.go:334] "Generic (PLEG): container finished" podID="cf59f823-b688-420e-9e5b-20f4441c9635" containerID="1df2724fa0b6a6da73cbc0003b0e611442485a47a448a17b0619b54bc573c5b9" exitCode=0 Dec 11 08:20:21 crc kubenswrapper[4881]: I1211 08:20:21.087470 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zqwb5" event={"ID":"cf59f823-b688-420e-9e5b-20f4441c9635","Type":"ContainerDied","Data":"1df2724fa0b6a6da73cbc0003b0e611442485a47a448a17b0619b54bc573c5b9"} Dec 11 08:20:21 crc kubenswrapper[4881]: I1211 08:20:21.089911 4881 status_manager.go:851] "Failed to get status for pod" podUID="cf59f823-b688-420e-9e5b-20f4441c9635" pod="openshift-marketplace/certified-operators-zqwb5" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-zqwb5\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:21 crc kubenswrapper[4881]: I1211 08:20:21.091166 4881 status_manager.go:851] "Failed to get status for pod" podUID="e1b26f03-480c-45ab-b37e-c2971f8e117a" pod="openshift-marketplace/redhat-marketplace-j9mc4" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-j9mc4\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:21 crc kubenswrapper[4881]: I1211 08:20:21.093535 4881 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:21 crc kubenswrapper[4881]: I1211 08:20:21.095483 4881 status_manager.go:851] "Failed to get status for pod" podUID="23d4f6b6-7fa3-4c1b-afc8-ce176ef3b69f" pod="openshift-marketplace/redhat-operators-kfk9x" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-kfk9x\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:22 crc kubenswrapper[4881]: I1211 08:20:22.095783 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-74d77" event={"ID":"99c7c976-c996-4b9c-a389-fbaed3f71813","Type":"ContainerStarted","Data":"a89270ffab1342367f2a43c0ce2a03e3b2cfb5489d5a9906587aba0c29c2d4cb"} Dec 11 08:20:22 crc kubenswrapper[4881]: I1211 08:20:22.098113 4881 status_manager.go:851] "Failed to get status for pod" podUID="23d4f6b6-7fa3-4c1b-afc8-ce176ef3b69f" pod="openshift-marketplace/redhat-operators-kfk9x" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-kfk9x\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:22 crc kubenswrapper[4881]: I1211 08:20:22.098271 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-kfk9x" event={"ID":"23d4f6b6-7fa3-4c1b-afc8-ce176ef3b69f","Type":"ContainerDied","Data":"dbfdc88294584a315d799efe78857835f758b1755ae6a99a18013ca76423fc9f"} Dec 11 08:20:22 crc kubenswrapper[4881]: I1211 08:20:22.098220 4881 generic.go:334] "Generic (PLEG): container finished" podID="23d4f6b6-7fa3-4c1b-afc8-ce176ef3b69f" containerID="dbfdc88294584a315d799efe78857835f758b1755ae6a99a18013ca76423fc9f" exitCode=0 Dec 11 08:20:22 crc kubenswrapper[4881]: I1211 08:20:22.098762 4881 status_manager.go:851] "Failed to get status for pod" podUID="cf59f823-b688-420e-9e5b-20f4441c9635" pod="openshift-marketplace/certified-operators-zqwb5" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-zqwb5\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:22 crc kubenswrapper[4881]: I1211 08:20:22.099093 4881 status_manager.go:851] "Failed to get status for pod" podUID="99c7c976-c996-4b9c-a389-fbaed3f71813" pod="openshift-marketplace/community-operators-74d77" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-74d77\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:22 crc kubenswrapper[4881]: I1211 08:20:22.099422 4881 status_manager.go:851] "Failed to get status for pod" podUID="e1b26f03-480c-45ab-b37e-c2971f8e117a" pod="openshift-marketplace/redhat-marketplace-j9mc4" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-j9mc4\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:22 crc kubenswrapper[4881]: I1211 08:20:22.100217 4881 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:22 crc kubenswrapper[4881]: I1211 08:20:22.100660 4881 status_manager.go:851] "Failed to get status for pod" podUID="23d4f6b6-7fa3-4c1b-afc8-ce176ef3b69f" pod="openshift-marketplace/redhat-operators-kfk9x" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-kfk9x\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:22 crc kubenswrapper[4881]: I1211 08:20:22.101038 4881 status_manager.go:851] "Failed to get status for pod" podUID="cf59f823-b688-420e-9e5b-20f4441c9635" pod="openshift-marketplace/certified-operators-zqwb5" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-zqwb5\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:22 crc kubenswrapper[4881]: I1211 08:20:22.101274 4881 status_manager.go:851] "Failed to get status for pod" podUID="99c7c976-c996-4b9c-a389-fbaed3f71813" pod="openshift-marketplace/community-operators-74d77" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-74d77\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:22 crc kubenswrapper[4881]: I1211 08:20:22.101718 4881 status_manager.go:851] "Failed to get status for pod" podUID="e1b26f03-480c-45ab-b37e-c2971f8e117a" pod="openshift-marketplace/redhat-marketplace-j9mc4" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-j9mc4\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:22 crc kubenswrapper[4881]: I1211 08:20:22.102072 4881 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:22 crc kubenswrapper[4881]: I1211 08:20:22.102452 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Dec 11 08:20:23 crc kubenswrapper[4881]: I1211 08:20:23.009918 4881 status_manager.go:851] "Failed to get status for pod" podUID="cf59f823-b688-420e-9e5b-20f4441c9635" pod="openshift-marketplace/certified-operators-zqwb5" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-zqwb5\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:23 crc kubenswrapper[4881]: I1211 08:20:23.010764 4881 status_manager.go:851] "Failed to get status for pod" podUID="99c7c976-c996-4b9c-a389-fbaed3f71813" pod="openshift-marketplace/community-operators-74d77" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-74d77\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:23 crc kubenswrapper[4881]: I1211 08:20:23.011005 4881 status_manager.go:851] "Failed to get status for pod" podUID="e1b26f03-480c-45ab-b37e-c2971f8e117a" pod="openshift-marketplace/redhat-marketplace-j9mc4" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-j9mc4\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:23 crc kubenswrapper[4881]: I1211 08:20:23.011270 4881 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:23 crc kubenswrapper[4881]: I1211 08:20:23.011625 4881 status_manager.go:851] "Failed to get status for pod" podUID="23d4f6b6-7fa3-4c1b-afc8-ce176ef3b69f" pod="openshift-marketplace/redhat-operators-kfk9x" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-kfk9x\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:23 crc kubenswrapper[4881]: E1211 08:20:23.626701 4881 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/events\": dial tcp 38.102.83.20:6443: connect: connection refused" event="&Event{ObjectMeta:{kube-apiserver-startup-monitor-crc.18801b6f60ca286e openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-startup-monitor-crc,UID:f85e55b1a89d02b0cb034b1ea31ed45a,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{startup-monitor},},Reason:Pulled,Message:Container image \"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-12-11 08:20:19.108415598 +0000 UTC m=+267.485784325,LastTimestamp:2025-12-11 08:20:19.108415598 +0000 UTC m=+267.485784325,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Dec 11 08:20:24 crc kubenswrapper[4881]: I1211 08:20:24.115729 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Dec 11 08:20:24 crc kubenswrapper[4881]: I1211 08:20:24.116725 4881 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="f4bc17ebdc46bdf12b24515e7a055043646bcea98fe47ea7f5c44e679d02bde9" exitCode=0 Dec 11 08:20:27 crc kubenswrapper[4881]: I1211 08:20:27.005153 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 11 08:20:27 crc kubenswrapper[4881]: I1211 08:20:27.005576 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 11 08:20:27 crc kubenswrapper[4881]: W1211 08:20:27.006267 4881 reflector.go:561] object-"openshift-network-console"/"networking-console-plugin": failed to list *v1.ConfigMap: Get "https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-console/configmaps?fieldSelector=metadata.name%3Dnetworking-console-plugin&resourceVersion=27189": dial tcp 38.102.83.20:6443: connect: connection refused Dec 11 08:20:27 crc kubenswrapper[4881]: E1211 08:20:27.006451 4881 reflector.go:158] "Unhandled Error" err="object-\"openshift-network-console\"/\"networking-console-plugin\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-console/configmaps?fieldSelector=metadata.name%3Dnetworking-console-plugin&resourceVersion=27189\": dial tcp 38.102.83.20:6443: connect: connection refused" logger="UnhandledError" Dec 11 08:20:27 crc kubenswrapper[4881]: W1211 08:20:27.006613 4881 reflector.go:561] object-"openshift-network-console"/"networking-console-plugin-cert": failed to list *v1.Secret: Get "https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-console/secrets?fieldSelector=metadata.name%3Dnetworking-console-plugin-cert&resourceVersion=27189": dial tcp 38.102.83.20:6443: connect: connection refused Dec 11 08:20:27 crc kubenswrapper[4881]: E1211 08:20:27.006707 4881 reflector.go:158] "Unhandled Error" err="object-\"openshift-network-console\"/\"networking-console-plugin-cert\": Failed to watch *v1.Secret: failed to list *v1.Secret: Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-console/secrets?fieldSelector=metadata.name%3Dnetworking-console-plugin-cert&resourceVersion=27189\": dial tcp 38.102.83.20:6443: connect: connection refused" logger="UnhandledError" Dec 11 08:20:27 crc kubenswrapper[4881]: I1211 08:20:27.835108 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-authentication/oauth-openshift-558db77b4-79b8l" podUID="fbc88e77-4757-426f-9212-8e4c3d26b8e0" containerName="oauth-openshift" containerID="cri-o://2529e00c18dd5a4e1e02de975a5654ba6325ce7db6af218f77c0fb1a3f457634" gracePeriod=15 Dec 11 08:20:28 crc kubenswrapper[4881]: E1211 08:20:28.006255 4881 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: failed to sync configmap cache: timed out waiting for the condition Dec 11 08:20:28 crc kubenswrapper[4881]: E1211 08:20:28.006381 4881 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-12-11 08:22:30.006327103 +0000 UTC m=+398.383695800 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : failed to sync configmap cache: timed out waiting for the condition Dec 11 08:20:28 crc kubenswrapper[4881]: E1211 08:20:28.006419 4881 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: failed to sync secret cache: timed out waiting for the condition Dec 11 08:20:28 crc kubenswrapper[4881]: E1211 08:20:28.006537 4881 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-12-11 08:22:30.006507268 +0000 UTC m=+398.383876035 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : failed to sync secret cache: timed out waiting for the condition Dec 11 08:20:28 crc kubenswrapper[4881]: E1211 08:20:28.044860 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[networking-console-plugin-cert nginx-conf], unattached volumes=[], failed to process volumes=[]: context deadline exceeded" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 11 08:20:28 crc kubenswrapper[4881]: I1211 08:20:28.610062 4881 patch_prober.go:28] interesting pod/oauth-openshift-558db77b4-79b8l container/oauth-openshift namespace/openshift-authentication: Readiness probe status=failure output="Get \"https://10.217.0.17:6443/healthz\": dial tcp 10.217.0.17:6443: connect: connection refused" start-of-body= Dec 11 08:20:28 crc kubenswrapper[4881]: I1211 08:20:28.610212 4881 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-authentication/oauth-openshift-558db77b4-79b8l" podUID="fbc88e77-4757-426f-9212-8e4c3d26b8e0" containerName="oauth-openshift" probeResult="failure" output="Get \"https://10.217.0.17:6443/healthz\": dial tcp 10.217.0.17:6443: connect: connection refused" Dec 11 08:20:28 crc kubenswrapper[4881]: E1211 08:20:28.885862 4881 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:28 crc kubenswrapper[4881]: E1211 08:20:28.886533 4881 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:28 crc kubenswrapper[4881]: E1211 08:20:28.886801 4881 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:28 crc kubenswrapper[4881]: E1211 08:20:28.886955 4881 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:28 crc kubenswrapper[4881]: E1211 08:20:28.887259 4881 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:28 crc kubenswrapper[4881]: I1211 08:20:28.887437 4881 controller.go:115] "failed to update lease using latest lease, fallback to ensure lease" err="failed 5 attempts to update lease" Dec 11 08:20:28 crc kubenswrapper[4881]: E1211 08:20:28.888102 4881 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.20:6443: connect: connection refused" interval="200ms" Dec 11 08:20:29 crc kubenswrapper[4881]: W1211 08:20:29.066048 4881 reflector.go:561] object-"openshift-network-console"/"networking-console-plugin-cert": failed to list *v1.Secret: Get "https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-console/secrets?fieldSelector=metadata.name%3Dnetworking-console-plugin-cert&resourceVersion=27189": dial tcp 38.102.83.20:6443: connect: connection refused Dec 11 08:20:29 crc kubenswrapper[4881]: E1211 08:20:29.066164 4881 reflector.go:158] "Unhandled Error" err="object-\"openshift-network-console\"/\"networking-console-plugin-cert\": Failed to watch *v1.Secret: failed to list *v1.Secret: Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-console/secrets?fieldSelector=metadata.name%3Dnetworking-console-plugin-cert&resourceVersion=27189\": dial tcp 38.102.83.20:6443: connect: connection refused" logger="UnhandledError" Dec 11 08:20:29 crc kubenswrapper[4881]: E1211 08:20:29.088791 4881 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.20:6443: connect: connection refused" interval="400ms" Dec 11 08:20:29 crc kubenswrapper[4881]: E1211 08:20:29.343539 4881 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:20:29Z\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:20:29Z\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:20:29Z\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:20:29Z\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:34f522750c260aee8d7d3d8c16bba58727f5dfb964b4aecc8b09e3e6f7056f12\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:9acec1ab208005d77c0ac2722e15bf8620aff3b5c4ab7910d45b05a66d2bb912\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1628955991},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:178c13b6a1b34d5a4da4710d46305ff33fc30a390d065c0e2ba191c863238f9e\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:fcefccd5340edefa42f3ec04805e7514cbd84b40e2ad4f0542e25acb4897c5a4\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1232534877},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:b0004ff683181b2b57df13c0ffc42453e10a5dcb1789d938a3f18527b08412d6\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d68762606abc1a4575916f8aec19a1d1c4e07b5c88745bc46602ddbd3b20496c\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1202271579},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:be25e28aabd5a6e06b4df55e58fa4be426c96c57e3387969e0070e6058149d04\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:e6f1bca5d60a93ec9f9bd8ae305cd4ded3f62b2a51bbfdf59e056ea57c0c5b9f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1154573130},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792}]}}\" for node \"crc\": Patch \"https://api-int.crc.testing:6443/api/v1/nodes/crc/status?timeout=10s\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:29 crc kubenswrapper[4881]: E1211 08:20:29.344681 4881 kubelet_node_status.go:585] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?timeout=10s\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:29 crc kubenswrapper[4881]: E1211 08:20:29.345143 4881 kubelet_node_status.go:585] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?timeout=10s\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:29 crc kubenswrapper[4881]: E1211 08:20:29.345557 4881 kubelet_node_status.go:585] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?timeout=10s\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:29 crc kubenswrapper[4881]: E1211 08:20:29.345938 4881 kubelet_node_status.go:585] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?timeout=10s\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:29 crc kubenswrapper[4881]: E1211 08:20:29.345966 4881 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Dec 11 08:20:29 crc kubenswrapper[4881]: W1211 08:20:29.365667 4881 reflector.go:561] object-"openshift-network-console"/"networking-console-plugin": failed to list *v1.ConfigMap: Get "https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-console/configmaps?fieldSelector=metadata.name%3Dnetworking-console-plugin&resourceVersion=27189": dial tcp 38.102.83.20:6443: connect: connection refused Dec 11 08:20:29 crc kubenswrapper[4881]: E1211 08:20:29.365762 4881 reflector.go:158] "Unhandled Error" err="object-\"openshift-network-console\"/\"networking-console-plugin\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-console/configmaps?fieldSelector=metadata.name%3Dnetworking-console-plugin&resourceVersion=27189\": dial tcp 38.102.83.20:6443: connect: connection refused" logger="UnhandledError" Dec 11 08:20:29 crc kubenswrapper[4881]: E1211 08:20:29.489674 4881 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.20:6443: connect: connection refused" interval="800ms" Dec 11 08:20:30 crc kubenswrapper[4881]: E1211 08:20:30.291009 4881 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.20:6443: connect: connection refused" interval="1.6s" Dec 11 08:20:30 crc kubenswrapper[4881]: I1211 08:20:30.667418 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-74d77" Dec 11 08:20:30 crc kubenswrapper[4881]: I1211 08:20:30.667493 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-74d77" Dec 11 08:20:30 crc kubenswrapper[4881]: I1211 08:20:30.718500 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-74d77" Dec 11 08:20:30 crc kubenswrapper[4881]: I1211 08:20:30.719277 4881 status_manager.go:851] "Failed to get status for pod" podUID="23d4f6b6-7fa3-4c1b-afc8-ce176ef3b69f" pod="openshift-marketplace/redhat-operators-kfk9x" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-kfk9x\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:30 crc kubenswrapper[4881]: I1211 08:20:30.719938 4881 status_manager.go:851] "Failed to get status for pod" podUID="cf59f823-b688-420e-9e5b-20f4441c9635" pod="openshift-marketplace/certified-operators-zqwb5" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-zqwb5\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:30 crc kubenswrapper[4881]: I1211 08:20:30.720407 4881 status_manager.go:851] "Failed to get status for pod" podUID="99c7c976-c996-4b9c-a389-fbaed3f71813" pod="openshift-marketplace/community-operators-74d77" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-74d77\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:30 crc kubenswrapper[4881]: I1211 08:20:30.720841 4881 status_manager.go:851] "Failed to get status for pod" podUID="e1b26f03-480c-45ab-b37e-c2971f8e117a" pod="openshift-marketplace/redhat-marketplace-j9mc4" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-j9mc4\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:30 crc kubenswrapper[4881]: I1211 08:20:30.721222 4881 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:31 crc kubenswrapper[4881]: I1211 08:20:31.200534 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-74d77" Dec 11 08:20:31 crc kubenswrapper[4881]: I1211 08:20:31.201232 4881 status_manager.go:851] "Failed to get status for pod" podUID="99c7c976-c996-4b9c-a389-fbaed3f71813" pod="openshift-marketplace/community-operators-74d77" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-74d77\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:31 crc kubenswrapper[4881]: I1211 08:20:31.201756 4881 status_manager.go:851] "Failed to get status for pod" podUID="e1b26f03-480c-45ab-b37e-c2971f8e117a" pod="openshift-marketplace/redhat-marketplace-j9mc4" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-j9mc4\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:31 crc kubenswrapper[4881]: I1211 08:20:31.202242 4881 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:31 crc kubenswrapper[4881]: I1211 08:20:31.202720 4881 status_manager.go:851] "Failed to get status for pod" podUID="23d4f6b6-7fa3-4c1b-afc8-ce176ef3b69f" pod="openshift-marketplace/redhat-operators-kfk9x" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-kfk9x\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:31 crc kubenswrapper[4881]: I1211 08:20:31.203327 4881 status_manager.go:851] "Failed to get status for pod" podUID="cf59f823-b688-420e-9e5b-20f4441c9635" pod="openshift-marketplace/certified-operators-zqwb5" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-zqwb5\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:36 crc kubenswrapper[4881]: E1211 08:20:31.892660 4881 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.20:6443: connect: connection refused" interval="3.2s" Dec 11 08:20:36 crc kubenswrapper[4881]: I1211 08:20:33.008971 4881 status_manager.go:851] "Failed to get status for pod" podUID="cf59f823-b688-420e-9e5b-20f4441c9635" pod="openshift-marketplace/certified-operators-zqwb5" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-zqwb5\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:36 crc kubenswrapper[4881]: I1211 08:20:33.009592 4881 status_manager.go:851] "Failed to get status for pod" podUID="99c7c976-c996-4b9c-a389-fbaed3f71813" pod="openshift-marketplace/community-operators-74d77" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-74d77\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:36 crc kubenswrapper[4881]: I1211 08:20:33.009812 4881 status_manager.go:851] "Failed to get status for pod" podUID="e1b26f03-480c-45ab-b37e-c2971f8e117a" pod="openshift-marketplace/redhat-marketplace-j9mc4" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-j9mc4\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:36 crc kubenswrapper[4881]: I1211 08:20:33.010147 4881 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:36 crc kubenswrapper[4881]: I1211 08:20:33.010418 4881 status_manager.go:851] "Failed to get status for pod" podUID="23d4f6b6-7fa3-4c1b-afc8-ce176ef3b69f" pod="openshift-marketplace/redhat-operators-kfk9x" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-kfk9x\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:36 crc kubenswrapper[4881]: E1211 08:20:33.628175 4881 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/events\": dial tcp 38.102.83.20:6443: connect: connection refused" event="&Event{ObjectMeta:{kube-apiserver-startup-monitor-crc.18801b6f60ca286e openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-startup-monitor-crc,UID:f85e55b1a89d02b0cb034b1ea31ed45a,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{startup-monitor},},Reason:Pulled,Message:Container image \"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-12-11 08:20:19.108415598 +0000 UTC m=+267.485784325,LastTimestamp:2025-12-11 08:20:19.108415598 +0000 UTC m=+267.485784325,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Dec 11 08:20:36 crc kubenswrapper[4881]: W1211 08:20:34.906945 4881 reflector.go:561] object-"openshift-network-console"/"networking-console-plugin-cert": failed to list *v1.Secret: Get "https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-console/secrets?fieldSelector=metadata.name%3Dnetworking-console-plugin-cert&resourceVersion=27189": dial tcp 38.102.83.20:6443: connect: connection refused Dec 11 08:20:36 crc kubenswrapper[4881]: E1211 08:20:34.907027 4881 reflector.go:158] "Unhandled Error" err="object-\"openshift-network-console\"/\"networking-console-plugin-cert\": Failed to watch *v1.Secret: failed to list *v1.Secret: Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-console/secrets?fieldSelector=metadata.name%3Dnetworking-console-plugin-cert&resourceVersion=27189\": dial tcp 38.102.83.20:6443: connect: connection refused" logger="UnhandledError" Dec 11 08:20:36 crc kubenswrapper[4881]: E1211 08:20:35.093720 4881 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.20:6443: connect: connection refused" interval="6.4s" Dec 11 08:20:36 crc kubenswrapper[4881]: W1211 08:20:35.227447 4881 reflector.go:561] object-"openshift-network-console"/"networking-console-plugin": failed to list *v1.ConfigMap: Get "https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-console/configmaps?fieldSelector=metadata.name%3Dnetworking-console-plugin&resourceVersion=27189": dial tcp 38.102.83.20:6443: connect: connection refused Dec 11 08:20:36 crc kubenswrapper[4881]: E1211 08:20:35.227805 4881 reflector.go:158] "Unhandled Error" err="object-\"openshift-network-console\"/\"networking-console-plugin\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-console/configmaps?fieldSelector=metadata.name%3Dnetworking-console-plugin&resourceVersion=27189\": dial tcp 38.102.83.20:6443: connect: connection refused" logger="UnhandledError" Dec 11 08:20:36 crc kubenswrapper[4881]: E1211 08:20:35.747849 4881 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf614b9022728cf315e60c057852e563e.slice/crio-conmon-3dec421d26a9373c52bf2fb9434cc3f3b373f409b1d86ec1effca7534acb7262.scope\": RecentStats: unable to find data in memory cache]" Dec 11 08:20:36 crc kubenswrapper[4881]: I1211 08:20:35.901653 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-authentication_oauth-openshift-558db77b4-79b8l_fbc88e77-4757-426f-9212-8e4c3d26b8e0/oauth-openshift/0.log" Dec 11 08:20:36 crc kubenswrapper[4881]: I1211 08:20:35.901692 4881 generic.go:334] "Generic (PLEG): container finished" podID="fbc88e77-4757-426f-9212-8e4c3d26b8e0" containerID="2529e00c18dd5a4e1e02de975a5654ba6325ce7db6af218f77c0fb1a3f457634" exitCode=-1 Dec 11 08:20:36 crc kubenswrapper[4881]: I1211 08:20:35.902286 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-79b8l" event={"ID":"fbc88e77-4757-426f-9212-8e4c3d26b8e0","Type":"ContainerDied","Data":"2529e00c18dd5a4e1e02de975a5654ba6325ce7db6af218f77c0fb1a3f457634"} Dec 11 08:20:36 crc kubenswrapper[4881]: I1211 08:20:36.024492 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Dec 11 08:20:36 crc kubenswrapper[4881]: I1211 08:20:36.025420 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 11 08:20:36 crc kubenswrapper[4881]: I1211 08:20:36.025928 4881 status_manager.go:851] "Failed to get status for pod" podUID="23d4f6b6-7fa3-4c1b-afc8-ce176ef3b69f" pod="openshift-marketplace/redhat-operators-kfk9x" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-kfk9x\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:36 crc kubenswrapper[4881]: I1211 08:20:36.026608 4881 status_manager.go:851] "Failed to get status for pod" podUID="cf59f823-b688-420e-9e5b-20f4441c9635" pod="openshift-marketplace/certified-operators-zqwb5" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-zqwb5\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:36 crc kubenswrapper[4881]: I1211 08:20:36.027411 4881 status_manager.go:851] "Failed to get status for pod" podUID="99c7c976-c996-4b9c-a389-fbaed3f71813" pod="openshift-marketplace/community-operators-74d77" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-74d77\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:36 crc kubenswrapper[4881]: I1211 08:20:36.027805 4881 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:36 crc kubenswrapper[4881]: I1211 08:20:36.028078 4881 status_manager.go:851] "Failed to get status for pod" podUID="e1b26f03-480c-45ab-b37e-c2971f8e117a" pod="openshift-marketplace/redhat-marketplace-j9mc4" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-j9mc4\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:36 crc kubenswrapper[4881]: I1211 08:20:36.028452 4881 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:36 crc kubenswrapper[4881]: I1211 08:20:36.123157 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Dec 11 08:20:36 crc kubenswrapper[4881]: I1211 08:20:36.123267 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Dec 11 08:20:36 crc kubenswrapper[4881]: I1211 08:20:36.123621 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Dec 11 08:20:36 crc kubenswrapper[4881]: I1211 08:20:36.123363 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir" (OuterVolumeSpecName: "resource-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 11 08:20:36 crc kubenswrapper[4881]: I1211 08:20:36.123436 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir" (OuterVolumeSpecName: "cert-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "cert-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 11 08:20:36 crc kubenswrapper[4881]: I1211 08:20:36.123749 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir" (OuterVolumeSpecName: "audit-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "audit-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 11 08:20:36 crc kubenswrapper[4881]: I1211 08:20:36.123870 4881 reconciler_common.go:293] "Volume detached for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") on node \"crc\" DevicePath \"\"" Dec 11 08:20:36 crc kubenswrapper[4881]: I1211 08:20:36.123886 4881 reconciler_common.go:293] "Volume detached for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") on node \"crc\" DevicePath \"\"" Dec 11 08:20:36 crc kubenswrapper[4881]: I1211 08:20:36.123897 4881 reconciler_common.go:293] "Volume detached for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") on node \"crc\" DevicePath \"\"" Dec 11 08:20:36 crc kubenswrapper[4881]: I1211 08:20:36.915410 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-79b8l" event={"ID":"fbc88e77-4757-426f-9212-8e4c3d26b8e0","Type":"ContainerDied","Data":"a0098c4caccf1456e8cec9078f0f3ac67e557b45383e99b4108ae068dc4f527c"} Dec 11 08:20:36 crc kubenswrapper[4881]: I1211 08:20:36.915472 4881 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a0098c4caccf1456e8cec9078f0f3ac67e557b45383e99b4108ae068dc4f527c" Dec 11 08:20:36 crc kubenswrapper[4881]: I1211 08:20:36.919227 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Dec 11 08:20:36 crc kubenswrapper[4881]: I1211 08:20:36.919959 4881 scope.go:117] "RemoveContainer" containerID="204b876ced5657df64e1a6074ba7ddbb3e286df0e12630d709ff64d2905b8487" Dec 11 08:20:36 crc kubenswrapper[4881]: I1211 08:20:36.920038 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 11 08:20:36 crc kubenswrapper[4881]: I1211 08:20:36.932766 4881 generic.go:334] "Generic (PLEG): container finished" podID="eb7312c9-9c92-429a-8c3a-d86d8196564d" containerID="1b96acdc1951244f1de31b564e9731ad7eccd3629f5a5bf859f2715d38e919be" exitCode=0 Dec 11 08:20:36 crc kubenswrapper[4881]: I1211 08:20:36.932853 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"eb7312c9-9c92-429a-8c3a-d86d8196564d","Type":"ContainerDied","Data":"1b96acdc1951244f1de31b564e9731ad7eccd3629f5a5bf859f2715d38e919be"} Dec 11 08:20:36 crc kubenswrapper[4881]: I1211 08:20:36.933474 4881 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:36 crc kubenswrapper[4881]: I1211 08:20:36.933804 4881 status_manager.go:851] "Failed to get status for pod" podUID="eb7312c9-9c92-429a-8c3a-d86d8196564d" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:36 crc kubenswrapper[4881]: I1211 08:20:36.934023 4881 status_manager.go:851] "Failed to get status for pod" podUID="23d4f6b6-7fa3-4c1b-afc8-ce176ef3b69f" pod="openshift-marketplace/redhat-operators-kfk9x" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-kfk9x\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:36 crc kubenswrapper[4881]: I1211 08:20:36.934361 4881 status_manager.go:851] "Failed to get status for pod" podUID="cf59f823-b688-420e-9e5b-20f4441c9635" pod="openshift-marketplace/certified-operators-zqwb5" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-zqwb5\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:36 crc kubenswrapper[4881]: I1211 08:20:36.934684 4881 status_manager.go:851] "Failed to get status for pod" podUID="99c7c976-c996-4b9c-a389-fbaed3f71813" pod="openshift-marketplace/community-operators-74d77" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-74d77\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:36 crc kubenswrapper[4881]: I1211 08:20:36.935139 4881 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:36 crc kubenswrapper[4881]: I1211 08:20:36.935537 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/0.log" Dec 11 08:20:36 crc kubenswrapper[4881]: I1211 08:20:36.935576 4881 generic.go:334] "Generic (PLEG): container finished" podID="f614b9022728cf315e60c057852e563e" containerID="3dec421d26a9373c52bf2fb9434cc3f3b373f409b1d86ec1effca7534acb7262" exitCode=1 Dec 11 08:20:36 crc kubenswrapper[4881]: I1211 08:20:36.935602 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerDied","Data":"3dec421d26a9373c52bf2fb9434cc3f3b373f409b1d86ec1effca7534acb7262"} Dec 11 08:20:36 crc kubenswrapper[4881]: I1211 08:20:36.935677 4881 status_manager.go:851] "Failed to get status for pod" podUID="e1b26f03-480c-45ab-b37e-c2971f8e117a" pod="openshift-marketplace/redhat-marketplace-j9mc4" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-j9mc4\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:36 crc kubenswrapper[4881]: I1211 08:20:36.936041 4881 scope.go:117] "RemoveContainer" containerID="3dec421d26a9373c52bf2fb9434cc3f3b373f409b1d86ec1effca7534acb7262" Dec 11 08:20:36 crc kubenswrapper[4881]: I1211 08:20:36.936034 4881 status_manager.go:851] "Failed to get status for pod" podUID="eb7312c9-9c92-429a-8c3a-d86d8196564d" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:36 crc kubenswrapper[4881]: I1211 08:20:36.936920 4881 status_manager.go:851] "Failed to get status for pod" podUID="23d4f6b6-7fa3-4c1b-afc8-ce176ef3b69f" pod="openshift-marketplace/redhat-operators-kfk9x" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-kfk9x\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:36 crc kubenswrapper[4881]: I1211 08:20:36.937208 4881 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:36 crc kubenswrapper[4881]: I1211 08:20:36.937487 4881 status_manager.go:851] "Failed to get status for pod" podUID="cf59f823-b688-420e-9e5b-20f4441c9635" pod="openshift-marketplace/certified-operators-zqwb5" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-zqwb5\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:36 crc kubenswrapper[4881]: I1211 08:20:36.937747 4881 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:36 crc kubenswrapper[4881]: I1211 08:20:36.937982 4881 status_manager.go:851] "Failed to get status for pod" podUID="99c7c976-c996-4b9c-a389-fbaed3f71813" pod="openshift-marketplace/community-operators-74d77" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-74d77\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:36 crc kubenswrapper[4881]: I1211 08:20:36.938236 4881 status_manager.go:851] "Failed to get status for pod" podUID="e1b26f03-480c-45ab-b37e-c2971f8e117a" pod="openshift-marketplace/redhat-marketplace-j9mc4" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-j9mc4\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:36 crc kubenswrapper[4881]: I1211 08:20:36.938513 4881 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:36 crc kubenswrapper[4881]: I1211 08:20:36.973391 4881 scope.go:117] "RemoveContainer" containerID="6735b5cd08055deeae614855e5de8a056b276c8006a16c45f2a0fce97da172cb" Dec 11 08:20:37 crc kubenswrapper[4881]: I1211 08:20:37.012901 4881 status_manager.go:851] "Failed to get status for pod" podUID="eb7312c9-9c92-429a-8c3a-d86d8196564d" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:37 crc kubenswrapper[4881]: I1211 08:20:37.013356 4881 status_manager.go:851] "Failed to get status for pod" podUID="23d4f6b6-7fa3-4c1b-afc8-ce176ef3b69f" pod="openshift-marketplace/redhat-operators-kfk9x" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-kfk9x\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:37 crc kubenswrapper[4881]: I1211 08:20:37.013579 4881 status_manager.go:851] "Failed to get status for pod" podUID="cf59f823-b688-420e-9e5b-20f4441c9635" pod="openshift-marketplace/certified-operators-zqwb5" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-zqwb5\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:37 crc kubenswrapper[4881]: I1211 08:20:37.014012 4881 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:37 crc kubenswrapper[4881]: I1211 08:20:37.014799 4881 status_manager.go:851] "Failed to get status for pod" podUID="99c7c976-c996-4b9c-a389-fbaed3f71813" pod="openshift-marketplace/community-operators-74d77" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-74d77\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:37 crc kubenswrapper[4881]: I1211 08:20:37.015491 4881 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:37 crc kubenswrapper[4881]: I1211 08:20:37.016090 4881 status_manager.go:851] "Failed to get status for pod" podUID="e1b26f03-480c-45ab-b37e-c2971f8e117a" pod="openshift-marketplace/redhat-marketplace-j9mc4" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-j9mc4\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:37 crc kubenswrapper[4881]: I1211 08:20:37.016564 4881 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:37 crc kubenswrapper[4881]: I1211 08:20:37.017912 4881 scope.go:117] "RemoveContainer" containerID="daf982782f40db65e6a33e1e1bcfd01493d5a385c52013546d38d921ac4dcad3" Dec 11 08:20:37 crc kubenswrapper[4881]: I1211 08:20:37.018264 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f4b27818a5e8e43d0dc095d08835c792" path="/var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/volumes" Dec 11 08:20:37 crc kubenswrapper[4881]: I1211 08:20:37.042128 4881 scope.go:117] "RemoveContainer" containerID="3ceeda38e245a549205af6a7f2468c682ca1420574f2d3b375dfba7d93a3c669" Dec 11 08:20:37 crc kubenswrapper[4881]: I1211 08:20:37.062018 4881 scope.go:117] "RemoveContainer" containerID="f4bc17ebdc46bdf12b24515e7a055043646bcea98fe47ea7f5c44e679d02bde9" Dec 11 08:20:37 crc kubenswrapper[4881]: I1211 08:20:37.079871 4881 scope.go:117] "RemoveContainer" containerID="70fb9b6123864b3652eabbb833b18032659a0c038f4e5857e7363a3776022396" Dec 11 08:20:37 crc kubenswrapper[4881]: I1211 08:20:37.130640 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 11 08:20:37 crc kubenswrapper[4881]: I1211 08:20:37.137622 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-79b8l" Dec 11 08:20:37 crc kubenswrapper[4881]: I1211 08:20:37.138042 4881 status_manager.go:851] "Failed to get status for pod" podUID="23d4f6b6-7fa3-4c1b-afc8-ce176ef3b69f" pod="openshift-marketplace/redhat-operators-kfk9x" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-kfk9x\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:37 crc kubenswrapper[4881]: I1211 08:20:37.138198 4881 status_manager.go:851] "Failed to get status for pod" podUID="eb7312c9-9c92-429a-8c3a-d86d8196564d" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:37 crc kubenswrapper[4881]: I1211 08:20:37.138525 4881 status_manager.go:851] "Failed to get status for pod" podUID="cf59f823-b688-420e-9e5b-20f4441c9635" pod="openshift-marketplace/certified-operators-zqwb5" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-zqwb5\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:37 crc kubenswrapper[4881]: I1211 08:20:37.139042 4881 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:37 crc kubenswrapper[4881]: I1211 08:20:37.139305 4881 status_manager.go:851] "Failed to get status for pod" podUID="fbc88e77-4757-426f-9212-8e4c3d26b8e0" pod="openshift-authentication/oauth-openshift-558db77b4-79b8l" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-79b8l\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:37 crc kubenswrapper[4881]: I1211 08:20:37.139699 4881 status_manager.go:851] "Failed to get status for pod" podUID="99c7c976-c996-4b9c-a389-fbaed3f71813" pod="openshift-marketplace/community-operators-74d77" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-74d77\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:37 crc kubenswrapper[4881]: I1211 08:20:37.139887 4881 status_manager.go:851] "Failed to get status for pod" podUID="e1b26f03-480c-45ab-b37e-c2971f8e117a" pod="openshift-marketplace/redhat-marketplace-j9mc4" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-j9mc4\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:37 crc kubenswrapper[4881]: I1211 08:20:37.140036 4881 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:37 crc kubenswrapper[4881]: I1211 08:20:37.249713 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/fbc88e77-4757-426f-9212-8e4c3d26b8e0-audit-dir\") pod \"fbc88e77-4757-426f-9212-8e4c3d26b8e0\" (UID: \"fbc88e77-4757-426f-9212-8e4c3d26b8e0\") " Dec 11 08:20:37 crc kubenswrapper[4881]: I1211 08:20:37.249765 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/fbc88e77-4757-426f-9212-8e4c3d26b8e0-v4-0-config-system-trusted-ca-bundle\") pod \"fbc88e77-4757-426f-9212-8e4c3d26b8e0\" (UID: \"fbc88e77-4757-426f-9212-8e4c3d26b8e0\") " Dec 11 08:20:37 crc kubenswrapper[4881]: I1211 08:20:37.249808 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xbplp\" (UniqueName: \"kubernetes.io/projected/fbc88e77-4757-426f-9212-8e4c3d26b8e0-kube-api-access-xbplp\") pod \"fbc88e77-4757-426f-9212-8e4c3d26b8e0\" (UID: \"fbc88e77-4757-426f-9212-8e4c3d26b8e0\") " Dec 11 08:20:37 crc kubenswrapper[4881]: I1211 08:20:37.249837 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/fbc88e77-4757-426f-9212-8e4c3d26b8e0-v4-0-config-system-ocp-branding-template\") pod \"fbc88e77-4757-426f-9212-8e4c3d26b8e0\" (UID: \"fbc88e77-4757-426f-9212-8e4c3d26b8e0\") " Dec 11 08:20:37 crc kubenswrapper[4881]: I1211 08:20:37.249866 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/fbc88e77-4757-426f-9212-8e4c3d26b8e0-audit-policies\") pod \"fbc88e77-4757-426f-9212-8e4c3d26b8e0\" (UID: \"fbc88e77-4757-426f-9212-8e4c3d26b8e0\") " Dec 11 08:20:37 crc kubenswrapper[4881]: I1211 08:20:37.249852 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/fbc88e77-4757-426f-9212-8e4c3d26b8e0-audit-dir" (OuterVolumeSpecName: "audit-dir") pod "fbc88e77-4757-426f-9212-8e4c3d26b8e0" (UID: "fbc88e77-4757-426f-9212-8e4c3d26b8e0"). InnerVolumeSpecName "audit-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 11 08:20:37 crc kubenswrapper[4881]: I1211 08:20:37.250621 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fbc88e77-4757-426f-9212-8e4c3d26b8e0-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "fbc88e77-4757-426f-9212-8e4c3d26b8e0" (UID: "fbc88e77-4757-426f-9212-8e4c3d26b8e0"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:20:37 crc kubenswrapper[4881]: I1211 08:20:37.250685 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/fbc88e77-4757-426f-9212-8e4c3d26b8e0-v4-0-config-user-template-provider-selection\") pod \"fbc88e77-4757-426f-9212-8e4c3d26b8e0\" (UID: \"fbc88e77-4757-426f-9212-8e4c3d26b8e0\") " Dec 11 08:20:37 crc kubenswrapper[4881]: I1211 08:20:37.250736 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fbc88e77-4757-426f-9212-8e4c3d26b8e0-v4-0-config-system-trusted-ca-bundle" (OuterVolumeSpecName: "v4-0-config-system-trusted-ca-bundle") pod "fbc88e77-4757-426f-9212-8e4c3d26b8e0" (UID: "fbc88e77-4757-426f-9212-8e4c3d26b8e0"). InnerVolumeSpecName "v4-0-config-system-trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:20:37 crc kubenswrapper[4881]: I1211 08:20:37.250994 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/fbc88e77-4757-426f-9212-8e4c3d26b8e0-v4-0-config-system-service-ca\") pod \"fbc88e77-4757-426f-9212-8e4c3d26b8e0\" (UID: \"fbc88e77-4757-426f-9212-8e4c3d26b8e0\") " Dec 11 08:20:37 crc kubenswrapper[4881]: I1211 08:20:37.251040 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/fbc88e77-4757-426f-9212-8e4c3d26b8e0-v4-0-config-system-session\") pod \"fbc88e77-4757-426f-9212-8e4c3d26b8e0\" (UID: \"fbc88e77-4757-426f-9212-8e4c3d26b8e0\") " Dec 11 08:20:37 crc kubenswrapper[4881]: I1211 08:20:37.251067 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/fbc88e77-4757-426f-9212-8e4c3d26b8e0-v4-0-config-system-cliconfig\") pod \"fbc88e77-4757-426f-9212-8e4c3d26b8e0\" (UID: \"fbc88e77-4757-426f-9212-8e4c3d26b8e0\") " Dec 11 08:20:37 crc kubenswrapper[4881]: I1211 08:20:37.251082 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/fbc88e77-4757-426f-9212-8e4c3d26b8e0-v4-0-config-user-template-login\") pod \"fbc88e77-4757-426f-9212-8e4c3d26b8e0\" (UID: \"fbc88e77-4757-426f-9212-8e4c3d26b8e0\") " Dec 11 08:20:37 crc kubenswrapper[4881]: I1211 08:20:37.251101 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/fbc88e77-4757-426f-9212-8e4c3d26b8e0-v4-0-config-user-template-error\") pod \"fbc88e77-4757-426f-9212-8e4c3d26b8e0\" (UID: \"fbc88e77-4757-426f-9212-8e4c3d26b8e0\") " Dec 11 08:20:37 crc kubenswrapper[4881]: I1211 08:20:37.251130 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/fbc88e77-4757-426f-9212-8e4c3d26b8e0-v4-0-config-user-idp-0-file-data\") pod \"fbc88e77-4757-426f-9212-8e4c3d26b8e0\" (UID: \"fbc88e77-4757-426f-9212-8e4c3d26b8e0\") " Dec 11 08:20:37 crc kubenswrapper[4881]: I1211 08:20:37.251150 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/fbc88e77-4757-426f-9212-8e4c3d26b8e0-v4-0-config-system-serving-cert\") pod \"fbc88e77-4757-426f-9212-8e4c3d26b8e0\" (UID: \"fbc88e77-4757-426f-9212-8e4c3d26b8e0\") " Dec 11 08:20:37 crc kubenswrapper[4881]: I1211 08:20:37.251176 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/fbc88e77-4757-426f-9212-8e4c3d26b8e0-v4-0-config-system-router-certs\") pod \"fbc88e77-4757-426f-9212-8e4c3d26b8e0\" (UID: \"fbc88e77-4757-426f-9212-8e4c3d26b8e0\") " Dec 11 08:20:37 crc kubenswrapper[4881]: I1211 08:20:37.251090 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fbc88e77-4757-426f-9212-8e4c3d26b8e0-v4-0-config-system-service-ca" (OuterVolumeSpecName: "v4-0-config-system-service-ca") pod "fbc88e77-4757-426f-9212-8e4c3d26b8e0" (UID: "fbc88e77-4757-426f-9212-8e4c3d26b8e0"). InnerVolumeSpecName "v4-0-config-system-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:20:37 crc kubenswrapper[4881]: I1211 08:20:37.251352 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fbc88e77-4757-426f-9212-8e4c3d26b8e0-v4-0-config-system-cliconfig" (OuterVolumeSpecName: "v4-0-config-system-cliconfig") pod "fbc88e77-4757-426f-9212-8e4c3d26b8e0" (UID: "fbc88e77-4757-426f-9212-8e4c3d26b8e0"). InnerVolumeSpecName "v4-0-config-system-cliconfig". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:20:37 crc kubenswrapper[4881]: I1211 08:20:37.251412 4881 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/fbc88e77-4757-426f-9212-8e4c3d26b8e0-v4-0-config-system-cliconfig\") on node \"crc\" DevicePath \"\"" Dec 11 08:20:37 crc kubenswrapper[4881]: I1211 08:20:37.251424 4881 reconciler_common.go:293] "Volume detached for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/fbc88e77-4757-426f-9212-8e4c3d26b8e0-audit-dir\") on node \"crc\" DevicePath \"\"" Dec 11 08:20:37 crc kubenswrapper[4881]: I1211 08:20:37.251433 4881 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/fbc88e77-4757-426f-9212-8e4c3d26b8e0-v4-0-config-system-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 11 08:20:37 crc kubenswrapper[4881]: I1211 08:20:37.251441 4881 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/fbc88e77-4757-426f-9212-8e4c3d26b8e0-audit-policies\") on node \"crc\" DevicePath \"\"" Dec 11 08:20:37 crc kubenswrapper[4881]: I1211 08:20:37.251452 4881 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/fbc88e77-4757-426f-9212-8e4c3d26b8e0-v4-0-config-system-service-ca\") on node \"crc\" DevicePath \"\"" Dec 11 08:20:37 crc kubenswrapper[4881]: I1211 08:20:37.255879 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fbc88e77-4757-426f-9212-8e4c3d26b8e0-v4-0-config-system-serving-cert" (OuterVolumeSpecName: "v4-0-config-system-serving-cert") pod "fbc88e77-4757-426f-9212-8e4c3d26b8e0" (UID: "fbc88e77-4757-426f-9212-8e4c3d26b8e0"). InnerVolumeSpecName "v4-0-config-system-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:20:37 crc kubenswrapper[4881]: I1211 08:20:37.256264 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fbc88e77-4757-426f-9212-8e4c3d26b8e0-v4-0-config-user-template-login" (OuterVolumeSpecName: "v4-0-config-user-template-login") pod "fbc88e77-4757-426f-9212-8e4c3d26b8e0" (UID: "fbc88e77-4757-426f-9212-8e4c3d26b8e0"). InnerVolumeSpecName "v4-0-config-user-template-login". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:20:37 crc kubenswrapper[4881]: I1211 08:20:37.256763 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fbc88e77-4757-426f-9212-8e4c3d26b8e0-kube-api-access-xbplp" (OuterVolumeSpecName: "kube-api-access-xbplp") pod "fbc88e77-4757-426f-9212-8e4c3d26b8e0" (UID: "fbc88e77-4757-426f-9212-8e4c3d26b8e0"). InnerVolumeSpecName "kube-api-access-xbplp". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:20:37 crc kubenswrapper[4881]: I1211 08:20:37.258435 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fbc88e77-4757-426f-9212-8e4c3d26b8e0-v4-0-config-user-idp-0-file-data" (OuterVolumeSpecName: "v4-0-config-user-idp-0-file-data") pod "fbc88e77-4757-426f-9212-8e4c3d26b8e0" (UID: "fbc88e77-4757-426f-9212-8e4c3d26b8e0"). InnerVolumeSpecName "v4-0-config-user-idp-0-file-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:20:37 crc kubenswrapper[4881]: I1211 08:20:37.258795 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fbc88e77-4757-426f-9212-8e4c3d26b8e0-v4-0-config-system-ocp-branding-template" (OuterVolumeSpecName: "v4-0-config-system-ocp-branding-template") pod "fbc88e77-4757-426f-9212-8e4c3d26b8e0" (UID: "fbc88e77-4757-426f-9212-8e4c3d26b8e0"). InnerVolumeSpecName "v4-0-config-system-ocp-branding-template". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:20:37 crc kubenswrapper[4881]: I1211 08:20:37.268708 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fbc88e77-4757-426f-9212-8e4c3d26b8e0-v4-0-config-system-router-certs" (OuterVolumeSpecName: "v4-0-config-system-router-certs") pod "fbc88e77-4757-426f-9212-8e4c3d26b8e0" (UID: "fbc88e77-4757-426f-9212-8e4c3d26b8e0"). InnerVolumeSpecName "v4-0-config-system-router-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:20:37 crc kubenswrapper[4881]: I1211 08:20:37.268852 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fbc88e77-4757-426f-9212-8e4c3d26b8e0-v4-0-config-user-template-provider-selection" (OuterVolumeSpecName: "v4-0-config-user-template-provider-selection") pod "fbc88e77-4757-426f-9212-8e4c3d26b8e0" (UID: "fbc88e77-4757-426f-9212-8e4c3d26b8e0"). InnerVolumeSpecName "v4-0-config-user-template-provider-selection". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:20:37 crc kubenswrapper[4881]: I1211 08:20:37.269691 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fbc88e77-4757-426f-9212-8e4c3d26b8e0-v4-0-config-user-template-error" (OuterVolumeSpecName: "v4-0-config-user-template-error") pod "fbc88e77-4757-426f-9212-8e4c3d26b8e0" (UID: "fbc88e77-4757-426f-9212-8e4c3d26b8e0"). InnerVolumeSpecName "v4-0-config-user-template-error". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:20:37 crc kubenswrapper[4881]: I1211 08:20:37.270195 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fbc88e77-4757-426f-9212-8e4c3d26b8e0-v4-0-config-system-session" (OuterVolumeSpecName: "v4-0-config-system-session") pod "fbc88e77-4757-426f-9212-8e4c3d26b8e0" (UID: "fbc88e77-4757-426f-9212-8e4c3d26b8e0"). InnerVolumeSpecName "v4-0-config-system-session". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:20:37 crc kubenswrapper[4881]: I1211 08:20:37.352507 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xbplp\" (UniqueName: \"kubernetes.io/projected/fbc88e77-4757-426f-9212-8e4c3d26b8e0-kube-api-access-xbplp\") on node \"crc\" DevicePath \"\"" Dec 11 08:20:37 crc kubenswrapper[4881]: I1211 08:20:37.352558 4881 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/fbc88e77-4757-426f-9212-8e4c3d26b8e0-v4-0-config-system-ocp-branding-template\") on node \"crc\" DevicePath \"\"" Dec 11 08:20:37 crc kubenswrapper[4881]: I1211 08:20:37.352579 4881 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/fbc88e77-4757-426f-9212-8e4c3d26b8e0-v4-0-config-user-template-provider-selection\") on node \"crc\" DevicePath \"\"" Dec 11 08:20:37 crc kubenswrapper[4881]: I1211 08:20:37.352602 4881 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/fbc88e77-4757-426f-9212-8e4c3d26b8e0-v4-0-config-system-session\") on node \"crc\" DevicePath \"\"" Dec 11 08:20:37 crc kubenswrapper[4881]: I1211 08:20:37.352621 4881 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/fbc88e77-4757-426f-9212-8e4c3d26b8e0-v4-0-config-user-template-login\") on node \"crc\" DevicePath \"\"" Dec 11 08:20:37 crc kubenswrapper[4881]: I1211 08:20:37.352640 4881 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/fbc88e77-4757-426f-9212-8e4c3d26b8e0-v4-0-config-user-template-error\") on node \"crc\" DevicePath \"\"" Dec 11 08:20:37 crc kubenswrapper[4881]: I1211 08:20:37.352661 4881 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/fbc88e77-4757-426f-9212-8e4c3d26b8e0-v4-0-config-user-idp-0-file-data\") on node \"crc\" DevicePath \"\"" Dec 11 08:20:37 crc kubenswrapper[4881]: I1211 08:20:37.352681 4881 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/fbc88e77-4757-426f-9212-8e4c3d26b8e0-v4-0-config-system-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 11 08:20:37 crc kubenswrapper[4881]: I1211 08:20:37.352701 4881 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/fbc88e77-4757-426f-9212-8e4c3d26b8e0-v4-0-config-system-router-certs\") on node \"crc\" DevicePath \"\"" Dec 11 08:20:37 crc kubenswrapper[4881]: I1211 08:20:37.944552 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-j9mc4" event={"ID":"e1b26f03-480c-45ab-b37e-c2971f8e117a","Type":"ContainerStarted","Data":"a8dd4839999d7dc90e77e3aef317c3eb3e0d3f4aee58bd0b3084002e9efd4c8a"} Dec 11 08:20:37 crc kubenswrapper[4881]: I1211 08:20:37.945577 4881 status_manager.go:851] "Failed to get status for pod" podUID="23d4f6b6-7fa3-4c1b-afc8-ce176ef3b69f" pod="openshift-marketplace/redhat-operators-kfk9x" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-kfk9x\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:37 crc kubenswrapper[4881]: I1211 08:20:37.945744 4881 status_manager.go:851] "Failed to get status for pod" podUID="eb7312c9-9c92-429a-8c3a-d86d8196564d" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:37 crc kubenswrapper[4881]: I1211 08:20:37.945887 4881 status_manager.go:851] "Failed to get status for pod" podUID="cf59f823-b688-420e-9e5b-20f4441c9635" pod="openshift-marketplace/certified-operators-zqwb5" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-zqwb5\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:37 crc kubenswrapper[4881]: I1211 08:20:37.946092 4881 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:37 crc kubenswrapper[4881]: I1211 08:20:37.946423 4881 status_manager.go:851] "Failed to get status for pod" podUID="fbc88e77-4757-426f-9212-8e4c3d26b8e0" pod="openshift-authentication/oauth-openshift-558db77b4-79b8l" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-79b8l\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:37 crc kubenswrapper[4881]: I1211 08:20:37.946837 4881 status_manager.go:851] "Failed to get status for pod" podUID="99c7c976-c996-4b9c-a389-fbaed3f71813" pod="openshift-marketplace/community-operators-74d77" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-74d77\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:37 crc kubenswrapper[4881]: I1211 08:20:37.947065 4881 status_manager.go:851] "Failed to get status for pod" podUID="e1b26f03-480c-45ab-b37e-c2971f8e117a" pod="openshift-marketplace/redhat-marketplace-j9mc4" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-j9mc4\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:37 crc kubenswrapper[4881]: I1211 08:20:37.947285 4881 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:37 crc kubenswrapper[4881]: I1211 08:20:37.947658 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zqwb5" event={"ID":"cf59f823-b688-420e-9e5b-20f4441c9635","Type":"ContainerStarted","Data":"6c2c15c53f770597eb023fce51b25b81ecd613594b349f50f6f73fbdce968f02"} Dec 11 08:20:37 crc kubenswrapper[4881]: I1211 08:20:37.948246 4881 status_manager.go:851] "Failed to get status for pod" podUID="99c7c976-c996-4b9c-a389-fbaed3f71813" pod="openshift-marketplace/community-operators-74d77" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-74d77\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:37 crc kubenswrapper[4881]: I1211 08:20:37.948421 4881 status_manager.go:851] "Failed to get status for pod" podUID="e1b26f03-480c-45ab-b37e-c2971f8e117a" pod="openshift-marketplace/redhat-marketplace-j9mc4" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-j9mc4\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:37 crc kubenswrapper[4881]: I1211 08:20:37.948584 4881 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:37 crc kubenswrapper[4881]: I1211 08:20:37.948777 4881 status_manager.go:851] "Failed to get status for pod" podUID="eb7312c9-9c92-429a-8c3a-d86d8196564d" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:37 crc kubenswrapper[4881]: I1211 08:20:37.949009 4881 status_manager.go:851] "Failed to get status for pod" podUID="23d4f6b6-7fa3-4c1b-afc8-ce176ef3b69f" pod="openshift-marketplace/redhat-operators-kfk9x" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-kfk9x\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:37 crc kubenswrapper[4881]: I1211 08:20:37.949374 4881 status_manager.go:851] "Failed to get status for pod" podUID="cf59f823-b688-420e-9e5b-20f4441c9635" pod="openshift-marketplace/certified-operators-zqwb5" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-zqwb5\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:37 crc kubenswrapper[4881]: I1211 08:20:37.949597 4881 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:37 crc kubenswrapper[4881]: I1211 08:20:37.949808 4881 status_manager.go:851] "Failed to get status for pod" podUID="fbc88e77-4757-426f-9212-8e4c3d26b8e0" pod="openshift-authentication/oauth-openshift-558db77b4-79b8l" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-79b8l\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:37 crc kubenswrapper[4881]: I1211 08:20:37.951244 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/0.log" Dec 11 08:20:37 crc kubenswrapper[4881]: I1211 08:20:37.951325 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"25a8fb859e3a3aea529df7ccaeaf72afc98991a7bf563bbf1fbb79ce6759d68d"} Dec 11 08:20:37 crc kubenswrapper[4881]: I1211 08:20:37.952413 4881 status_manager.go:851] "Failed to get status for pod" podUID="99c7c976-c996-4b9c-a389-fbaed3f71813" pod="openshift-marketplace/community-operators-74d77" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-74d77\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:37 crc kubenswrapper[4881]: I1211 08:20:37.952756 4881 status_manager.go:851] "Failed to get status for pod" podUID="e1b26f03-480c-45ab-b37e-c2971f8e117a" pod="openshift-marketplace/redhat-marketplace-j9mc4" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-j9mc4\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:37 crc kubenswrapper[4881]: I1211 08:20:37.953052 4881 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:37 crc kubenswrapper[4881]: I1211 08:20:37.953369 4881 status_manager.go:851] "Failed to get status for pod" podUID="eb7312c9-9c92-429a-8c3a-d86d8196564d" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:37 crc kubenswrapper[4881]: I1211 08:20:37.953550 4881 status_manager.go:851] "Failed to get status for pod" podUID="23d4f6b6-7fa3-4c1b-afc8-ce176ef3b69f" pod="openshift-marketplace/redhat-operators-kfk9x" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-kfk9x\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:37 crc kubenswrapper[4881]: I1211 08:20:37.953693 4881 status_manager.go:851] "Failed to get status for pod" podUID="cf59f823-b688-420e-9e5b-20f4441c9635" pod="openshift-marketplace/certified-operators-zqwb5" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-zqwb5\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:37 crc kubenswrapper[4881]: I1211 08:20:37.953838 4881 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:37 crc kubenswrapper[4881]: I1211 08:20:37.953992 4881 status_manager.go:851] "Failed to get status for pod" podUID="fbc88e77-4757-426f-9212-8e4c3d26b8e0" pod="openshift-authentication/oauth-openshift-558db77b4-79b8l" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-79b8l\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:37 crc kubenswrapper[4881]: I1211 08:20:37.954196 4881 status_manager.go:851] "Failed to get status for pod" podUID="cf59f823-b688-420e-9e5b-20f4441c9635" pod="openshift-marketplace/certified-operators-zqwb5" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-zqwb5\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:37 crc kubenswrapper[4881]: I1211 08:20:37.954267 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-kfk9x" event={"ID":"23d4f6b6-7fa3-4c1b-afc8-ce176ef3b69f","Type":"ContainerStarted","Data":"d155ded5678a4e46335a950f11b3041ffb71d9b1297c968541fba7a4ce767cc7"} Dec 11 08:20:37 crc kubenswrapper[4881]: I1211 08:20:37.954323 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-79b8l" Dec 11 08:20:37 crc kubenswrapper[4881]: I1211 08:20:37.954424 4881 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:37 crc kubenswrapper[4881]: I1211 08:20:37.954698 4881 status_manager.go:851] "Failed to get status for pod" podUID="fbc88e77-4757-426f-9212-8e4c3d26b8e0" pod="openshift-authentication/oauth-openshift-558db77b4-79b8l" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-79b8l\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:37 crc kubenswrapper[4881]: I1211 08:20:37.954928 4881 status_manager.go:851] "Failed to get status for pod" podUID="99c7c976-c996-4b9c-a389-fbaed3f71813" pod="openshift-marketplace/community-operators-74d77" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-74d77\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:37 crc kubenswrapper[4881]: I1211 08:20:37.958700 4881 status_manager.go:851] "Failed to get status for pod" podUID="e1b26f03-480c-45ab-b37e-c2971f8e117a" pod="openshift-marketplace/redhat-marketplace-j9mc4" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-j9mc4\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:37 crc kubenswrapper[4881]: I1211 08:20:37.959077 4881 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:37 crc kubenswrapper[4881]: I1211 08:20:37.959355 4881 status_manager.go:851] "Failed to get status for pod" podUID="eb7312c9-9c92-429a-8c3a-d86d8196564d" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:37 crc kubenswrapper[4881]: I1211 08:20:37.959583 4881 status_manager.go:851] "Failed to get status for pod" podUID="23d4f6b6-7fa3-4c1b-afc8-ce176ef3b69f" pod="openshift-marketplace/redhat-operators-kfk9x" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-kfk9x\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:37 crc kubenswrapper[4881]: I1211 08:20:37.959855 4881 status_manager.go:851] "Failed to get status for pod" podUID="cf59f823-b688-420e-9e5b-20f4441c9635" pod="openshift-marketplace/certified-operators-zqwb5" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-zqwb5\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:37 crc kubenswrapper[4881]: I1211 08:20:37.960041 4881 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:37 crc kubenswrapper[4881]: I1211 08:20:37.960224 4881 status_manager.go:851] "Failed to get status for pod" podUID="fbc88e77-4757-426f-9212-8e4c3d26b8e0" pod="openshift-authentication/oauth-openshift-558db77b4-79b8l" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-79b8l\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:37 crc kubenswrapper[4881]: I1211 08:20:37.960414 4881 status_manager.go:851] "Failed to get status for pod" podUID="99c7c976-c996-4b9c-a389-fbaed3f71813" pod="openshift-marketplace/community-operators-74d77" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-74d77\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:37 crc kubenswrapper[4881]: I1211 08:20:37.960583 4881 status_manager.go:851] "Failed to get status for pod" podUID="e1b26f03-480c-45ab-b37e-c2971f8e117a" pod="openshift-marketplace/redhat-marketplace-j9mc4" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-j9mc4\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:37 crc kubenswrapper[4881]: I1211 08:20:37.960764 4881 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:37 crc kubenswrapper[4881]: I1211 08:20:37.960942 4881 status_manager.go:851] "Failed to get status for pod" podUID="23d4f6b6-7fa3-4c1b-afc8-ce176ef3b69f" pod="openshift-marketplace/redhat-operators-kfk9x" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-kfk9x\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:37 crc kubenswrapper[4881]: I1211 08:20:37.961160 4881 status_manager.go:851] "Failed to get status for pod" podUID="eb7312c9-9c92-429a-8c3a-d86d8196564d" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:37 crc kubenswrapper[4881]: I1211 08:20:37.979976 4881 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:37 crc kubenswrapper[4881]: I1211 08:20:37.980738 4881 status_manager.go:851] "Failed to get status for pod" podUID="eb7312c9-9c92-429a-8c3a-d86d8196564d" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:37 crc kubenswrapper[4881]: I1211 08:20:37.981223 4881 status_manager.go:851] "Failed to get status for pod" podUID="23d4f6b6-7fa3-4c1b-afc8-ce176ef3b69f" pod="openshift-marketplace/redhat-operators-kfk9x" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-kfk9x\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:37 crc kubenswrapper[4881]: I1211 08:20:37.981575 4881 status_manager.go:851] "Failed to get status for pod" podUID="cf59f823-b688-420e-9e5b-20f4441c9635" pod="openshift-marketplace/certified-operators-zqwb5" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-zqwb5\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:37 crc kubenswrapper[4881]: I1211 08:20:37.981906 4881 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:37 crc kubenswrapper[4881]: I1211 08:20:37.982208 4881 status_manager.go:851] "Failed to get status for pod" podUID="fbc88e77-4757-426f-9212-8e4c3d26b8e0" pod="openshift-authentication/oauth-openshift-558db77b4-79b8l" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-79b8l\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:37 crc kubenswrapper[4881]: I1211 08:20:37.982513 4881 status_manager.go:851] "Failed to get status for pod" podUID="99c7c976-c996-4b9c-a389-fbaed3f71813" pod="openshift-marketplace/community-operators-74d77" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-74d77\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:37 crc kubenswrapper[4881]: I1211 08:20:37.982780 4881 status_manager.go:851] "Failed to get status for pod" podUID="e1b26f03-480c-45ab-b37e-c2971f8e117a" pod="openshift-marketplace/redhat-marketplace-j9mc4" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-j9mc4\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:38 crc kubenswrapper[4881]: I1211 08:20:38.159928 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Dec 11 08:20:38 crc kubenswrapper[4881]: I1211 08:20:38.160537 4881 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:38 crc kubenswrapper[4881]: I1211 08:20:38.161067 4881 status_manager.go:851] "Failed to get status for pod" podUID="23d4f6b6-7fa3-4c1b-afc8-ce176ef3b69f" pod="openshift-marketplace/redhat-operators-kfk9x" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-kfk9x\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:38 crc kubenswrapper[4881]: I1211 08:20:38.161615 4881 status_manager.go:851] "Failed to get status for pod" podUID="eb7312c9-9c92-429a-8c3a-d86d8196564d" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:38 crc kubenswrapper[4881]: I1211 08:20:38.161853 4881 status_manager.go:851] "Failed to get status for pod" podUID="cf59f823-b688-420e-9e5b-20f4441c9635" pod="openshift-marketplace/certified-operators-zqwb5" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-zqwb5\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:38 crc kubenswrapper[4881]: I1211 08:20:38.162131 4881 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:38 crc kubenswrapper[4881]: I1211 08:20:38.162422 4881 status_manager.go:851] "Failed to get status for pod" podUID="fbc88e77-4757-426f-9212-8e4c3d26b8e0" pod="openshift-authentication/oauth-openshift-558db77b4-79b8l" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-79b8l\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:38 crc kubenswrapper[4881]: I1211 08:20:38.162666 4881 status_manager.go:851] "Failed to get status for pod" podUID="99c7c976-c996-4b9c-a389-fbaed3f71813" pod="openshift-marketplace/community-operators-74d77" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-74d77\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:38 crc kubenswrapper[4881]: I1211 08:20:38.162922 4881 status_manager.go:851] "Failed to get status for pod" podUID="e1b26f03-480c-45ab-b37e-c2971f8e117a" pod="openshift-marketplace/redhat-marketplace-j9mc4" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-j9mc4\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:38 crc kubenswrapper[4881]: I1211 08:20:38.223516 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 11 08:20:38 crc kubenswrapper[4881]: I1211 08:20:38.223789 4881 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/kube-controller-manager namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" start-of-body= Dec 11 08:20:38 crc kubenswrapper[4881]: I1211 08:20:38.223835 4881 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="kube-controller-manager" probeResult="failure" output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" Dec 11 08:20:38 crc kubenswrapper[4881]: I1211 08:20:38.261207 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/eb7312c9-9c92-429a-8c3a-d86d8196564d-kube-api-access\") pod \"eb7312c9-9c92-429a-8c3a-d86d8196564d\" (UID: \"eb7312c9-9c92-429a-8c3a-d86d8196564d\") " Dec 11 08:20:38 crc kubenswrapper[4881]: I1211 08:20:38.261270 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/eb7312c9-9c92-429a-8c3a-d86d8196564d-var-lock\") pod \"eb7312c9-9c92-429a-8c3a-d86d8196564d\" (UID: \"eb7312c9-9c92-429a-8c3a-d86d8196564d\") " Dec 11 08:20:38 crc kubenswrapper[4881]: I1211 08:20:38.261357 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/eb7312c9-9c92-429a-8c3a-d86d8196564d-kubelet-dir\") pod \"eb7312c9-9c92-429a-8c3a-d86d8196564d\" (UID: \"eb7312c9-9c92-429a-8c3a-d86d8196564d\") " Dec 11 08:20:38 crc kubenswrapper[4881]: I1211 08:20:38.261392 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/eb7312c9-9c92-429a-8c3a-d86d8196564d-var-lock" (OuterVolumeSpecName: "var-lock") pod "eb7312c9-9c92-429a-8c3a-d86d8196564d" (UID: "eb7312c9-9c92-429a-8c3a-d86d8196564d"). InnerVolumeSpecName "var-lock". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 11 08:20:38 crc kubenswrapper[4881]: I1211 08:20:38.261473 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/eb7312c9-9c92-429a-8c3a-d86d8196564d-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "eb7312c9-9c92-429a-8c3a-d86d8196564d" (UID: "eb7312c9-9c92-429a-8c3a-d86d8196564d"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 11 08:20:38 crc kubenswrapper[4881]: I1211 08:20:38.261702 4881 reconciler_common.go:293] "Volume detached for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/eb7312c9-9c92-429a-8c3a-d86d8196564d-var-lock\") on node \"crc\" DevicePath \"\"" Dec 11 08:20:38 crc kubenswrapper[4881]: I1211 08:20:38.261725 4881 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/eb7312c9-9c92-429a-8c3a-d86d8196564d-kubelet-dir\") on node \"crc\" DevicePath \"\"" Dec 11 08:20:38 crc kubenswrapper[4881]: I1211 08:20:38.270547 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/eb7312c9-9c92-429a-8c3a-d86d8196564d-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "eb7312c9-9c92-429a-8c3a-d86d8196564d" (UID: "eb7312c9-9c92-429a-8c3a-d86d8196564d"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:20:38 crc kubenswrapper[4881]: I1211 08:20:38.363491 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/eb7312c9-9c92-429a-8c3a-d86d8196564d-kube-api-access\") on node \"crc\" DevicePath \"\"" Dec 11 08:20:38 crc kubenswrapper[4881]: I1211 08:20:38.961715 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Dec 11 08:20:38 crc kubenswrapper[4881]: I1211 08:20:38.961708 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"eb7312c9-9c92-429a-8c3a-d86d8196564d","Type":"ContainerDied","Data":"47a279f89b32d5b2b734c086078cb938e46714f9c62a33a26c3b3de4bbaa2b9b"} Dec 11 08:20:38 crc kubenswrapper[4881]: I1211 08:20:38.961782 4881 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="47a279f89b32d5b2b734c086078cb938e46714f9c62a33a26c3b3de4bbaa2b9b" Dec 11 08:20:38 crc kubenswrapper[4881]: I1211 08:20:38.973537 4881 status_manager.go:851] "Failed to get status for pod" podUID="eb7312c9-9c92-429a-8c3a-d86d8196564d" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:38 crc kubenswrapper[4881]: I1211 08:20:38.973855 4881 status_manager.go:851] "Failed to get status for pod" podUID="23d4f6b6-7fa3-4c1b-afc8-ce176ef3b69f" pod="openshift-marketplace/redhat-operators-kfk9x" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-kfk9x\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:38 crc kubenswrapper[4881]: I1211 08:20:38.974069 4881 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:38 crc kubenswrapper[4881]: I1211 08:20:38.974251 4881 status_manager.go:851] "Failed to get status for pod" podUID="cf59f823-b688-420e-9e5b-20f4441c9635" pod="openshift-marketplace/certified-operators-zqwb5" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-zqwb5\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:38 crc kubenswrapper[4881]: I1211 08:20:38.974480 4881 status_manager.go:851] "Failed to get status for pod" podUID="fbc88e77-4757-426f-9212-8e4c3d26b8e0" pod="openshift-authentication/oauth-openshift-558db77b4-79b8l" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-79b8l\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:38 crc kubenswrapper[4881]: I1211 08:20:38.974663 4881 status_manager.go:851] "Failed to get status for pod" podUID="99c7c976-c996-4b9c-a389-fbaed3f71813" pod="openshift-marketplace/community-operators-74d77" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-74d77\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:38 crc kubenswrapper[4881]: I1211 08:20:38.974835 4881 status_manager.go:851] "Failed to get status for pod" podUID="e1b26f03-480c-45ab-b37e-c2971f8e117a" pod="openshift-marketplace/redhat-marketplace-j9mc4" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-j9mc4\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:38 crc kubenswrapper[4881]: I1211 08:20:38.975000 4881 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:39 crc kubenswrapper[4881]: E1211 08:20:39.643191 4881 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:20:39Z\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:20:39Z\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:20:39Z\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T08:20:39Z\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:34f522750c260aee8d7d3d8c16bba58727f5dfb964b4aecc8b09e3e6f7056f12\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:9acec1ab208005d77c0ac2722e15bf8620aff3b5c4ab7910d45b05a66d2bb912\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1628955991},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:178c13b6a1b34d5a4da4710d46305ff33fc30a390d065c0e2ba191c863238f9e\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:fcefccd5340edefa42f3ec04805e7514cbd84b40e2ad4f0542e25acb4897c5a4\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1232534877},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:b0004ff683181b2b57df13c0ffc42453e10a5dcb1789d938a3f18527b08412d6\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d68762606abc1a4575916f8aec19a1d1c4e07b5c88745bc46602ddbd3b20496c\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1202271579},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:be25e28aabd5a6e06b4df55e58fa4be426c96c57e3387969e0070e6058149d04\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:e6f1bca5d60a93ec9f9bd8ae305cd4ded3f62b2a51bbfdf59e056ea57c0c5b9f\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1154573130},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792}]}}\" for node \"crc\": Patch \"https://api-int.crc.testing:6443/api/v1/nodes/crc/status?timeout=10s\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:39 crc kubenswrapper[4881]: E1211 08:20:39.644216 4881 kubelet_node_status.go:585] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?timeout=10s\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:39 crc kubenswrapper[4881]: E1211 08:20:39.644464 4881 kubelet_node_status.go:585] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?timeout=10s\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:39 crc kubenswrapper[4881]: E1211 08:20:39.644621 4881 kubelet_node_status.go:585] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?timeout=10s\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:39 crc kubenswrapper[4881]: E1211 08:20:39.644751 4881 kubelet_node_status.go:585] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?timeout=10s\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:39 crc kubenswrapper[4881]: E1211 08:20:39.644763 4881 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Dec 11 08:20:40 crc kubenswrapper[4881]: I1211 08:20:40.004419 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 11 08:20:41 crc kubenswrapper[4881]: E1211 08:20:41.494347 4881 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.20:6443: connect: connection refused" interval="7s" Dec 11 08:20:41 crc kubenswrapper[4881]: I1211 08:20:41.681735 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-zqwb5" Dec 11 08:20:41 crc kubenswrapper[4881]: I1211 08:20:41.681782 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-zqwb5" Dec 11 08:20:41 crc kubenswrapper[4881]: I1211 08:20:41.734324 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-zqwb5" Dec 11 08:20:41 crc kubenswrapper[4881]: I1211 08:20:41.735007 4881 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:41 crc kubenswrapper[4881]: I1211 08:20:41.735290 4881 status_manager.go:851] "Failed to get status for pod" podUID="eb7312c9-9c92-429a-8c3a-d86d8196564d" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:41 crc kubenswrapper[4881]: I1211 08:20:41.736431 4881 status_manager.go:851] "Failed to get status for pod" podUID="23d4f6b6-7fa3-4c1b-afc8-ce176ef3b69f" pod="openshift-marketplace/redhat-operators-kfk9x" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-kfk9x\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:41 crc kubenswrapper[4881]: I1211 08:20:41.736725 4881 status_manager.go:851] "Failed to get status for pod" podUID="cf59f823-b688-420e-9e5b-20f4441c9635" pod="openshift-marketplace/certified-operators-zqwb5" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-zqwb5\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:41 crc kubenswrapper[4881]: I1211 08:20:41.737089 4881 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:41 crc kubenswrapper[4881]: I1211 08:20:41.737295 4881 status_manager.go:851] "Failed to get status for pod" podUID="fbc88e77-4757-426f-9212-8e4c3d26b8e0" pod="openshift-authentication/oauth-openshift-558db77b4-79b8l" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-79b8l\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:41 crc kubenswrapper[4881]: I1211 08:20:41.737469 4881 status_manager.go:851] "Failed to get status for pod" podUID="99c7c976-c996-4b9c-a389-fbaed3f71813" pod="openshift-marketplace/community-operators-74d77" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-74d77\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:41 crc kubenswrapper[4881]: I1211 08:20:41.737656 4881 status_manager.go:851] "Failed to get status for pod" podUID="e1b26f03-480c-45ab-b37e-c2971f8e117a" pod="openshift-marketplace/redhat-marketplace-j9mc4" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-j9mc4\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:43 crc kubenswrapper[4881]: I1211 08:20:43.008961 4881 status_manager.go:851] "Failed to get status for pod" podUID="cf59f823-b688-420e-9e5b-20f4441c9635" pod="openshift-marketplace/certified-operators-zqwb5" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-zqwb5\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:43 crc kubenswrapper[4881]: I1211 08:20:43.009436 4881 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:43 crc kubenswrapper[4881]: I1211 08:20:43.009984 4881 status_manager.go:851] "Failed to get status for pod" podUID="fbc88e77-4757-426f-9212-8e4c3d26b8e0" pod="openshift-authentication/oauth-openshift-558db77b4-79b8l" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-79b8l\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:43 crc kubenswrapper[4881]: I1211 08:20:43.010531 4881 status_manager.go:851] "Failed to get status for pod" podUID="99c7c976-c996-4b9c-a389-fbaed3f71813" pod="openshift-marketplace/community-operators-74d77" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-74d77\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:43 crc kubenswrapper[4881]: I1211 08:20:43.011990 4881 status_manager.go:851] "Failed to get status for pod" podUID="e1b26f03-480c-45ab-b37e-c2971f8e117a" pod="openshift-marketplace/redhat-marketplace-j9mc4" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-j9mc4\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:43 crc kubenswrapper[4881]: I1211 08:20:43.012325 4881 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:43 crc kubenswrapper[4881]: I1211 08:20:43.012763 4881 status_manager.go:851] "Failed to get status for pod" podUID="eb7312c9-9c92-429a-8c3a-d86d8196564d" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:43 crc kubenswrapper[4881]: I1211 08:20:43.013176 4881 status_manager.go:851] "Failed to get status for pod" podUID="23d4f6b6-7fa3-4c1b-afc8-ce176ef3b69f" pod="openshift-marketplace/redhat-operators-kfk9x" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-kfk9x\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:43 crc kubenswrapper[4881]: I1211 08:20:43.079931 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-j9mc4" Dec 11 08:20:43 crc kubenswrapper[4881]: I1211 08:20:43.080031 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-j9mc4" Dec 11 08:20:43 crc kubenswrapper[4881]: I1211 08:20:43.146781 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-j9mc4" Dec 11 08:20:43 crc kubenswrapper[4881]: I1211 08:20:43.147546 4881 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:43 crc kubenswrapper[4881]: I1211 08:20:43.148176 4881 status_manager.go:851] "Failed to get status for pod" podUID="cf59f823-b688-420e-9e5b-20f4441c9635" pod="openshift-marketplace/certified-operators-zqwb5" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-zqwb5\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:43 crc kubenswrapper[4881]: I1211 08:20:43.148580 4881 status_manager.go:851] "Failed to get status for pod" podUID="fbc88e77-4757-426f-9212-8e4c3d26b8e0" pod="openshift-authentication/oauth-openshift-558db77b4-79b8l" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-79b8l\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:43 crc kubenswrapper[4881]: I1211 08:20:43.149003 4881 status_manager.go:851] "Failed to get status for pod" podUID="99c7c976-c996-4b9c-a389-fbaed3f71813" pod="openshift-marketplace/community-operators-74d77" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-74d77\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:43 crc kubenswrapper[4881]: I1211 08:20:43.149435 4881 status_manager.go:851] "Failed to get status for pod" podUID="e1b26f03-480c-45ab-b37e-c2971f8e117a" pod="openshift-marketplace/redhat-marketplace-j9mc4" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-j9mc4\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:43 crc kubenswrapper[4881]: I1211 08:20:43.149821 4881 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:43 crc kubenswrapper[4881]: I1211 08:20:43.150176 4881 status_manager.go:851] "Failed to get status for pod" podUID="eb7312c9-9c92-429a-8c3a-d86d8196564d" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:43 crc kubenswrapper[4881]: I1211 08:20:43.150564 4881 status_manager.go:851] "Failed to get status for pod" podUID="23d4f6b6-7fa3-4c1b-afc8-ce176ef3b69f" pod="openshift-marketplace/redhat-operators-kfk9x" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-kfk9x\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:43 crc kubenswrapper[4881]: W1211 08:20:43.529439 4881 reflector.go:561] object-"openshift-network-console"/"networking-console-plugin-cert": failed to list *v1.Secret: Get "https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-console/secrets?fieldSelector=metadata.name%3Dnetworking-console-plugin-cert&resourceVersion=27189": dial tcp 38.102.83.20:6443: connect: connection refused Dec 11 08:20:43 crc kubenswrapper[4881]: E1211 08:20:43.529539 4881 reflector.go:158] "Unhandled Error" err="object-\"openshift-network-console\"/\"networking-console-plugin-cert\": Failed to watch *v1.Secret: failed to list *v1.Secret: Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-console/secrets?fieldSelector=metadata.name%3Dnetworking-console-plugin-cert&resourceVersion=27189\": dial tcp 38.102.83.20:6443: connect: connection refused" logger="UnhandledError" Dec 11 08:20:43 crc kubenswrapper[4881]: E1211 08:20:43.629921 4881 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/events\": dial tcp 38.102.83.20:6443: connect: connection refused" event="&Event{ObjectMeta:{kube-apiserver-startup-monitor-crc.18801b6f60ca286e openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-startup-monitor-crc,UID:f85e55b1a89d02b0cb034b1ea31ed45a,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{startup-monitor},},Reason:Pulled,Message:Container image \"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-12-11 08:20:19.108415598 +0000 UTC m=+267.485784325,LastTimestamp:2025-12-11 08:20:19.108415598 +0000 UTC m=+267.485784325,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Dec 11 08:20:44 crc kubenswrapper[4881]: I1211 08:20:44.058727 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-j9mc4" Dec 11 08:20:44 crc kubenswrapper[4881]: I1211 08:20:44.059462 4881 status_manager.go:851] "Failed to get status for pod" podUID="eb7312c9-9c92-429a-8c3a-d86d8196564d" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:44 crc kubenswrapper[4881]: I1211 08:20:44.060092 4881 status_manager.go:851] "Failed to get status for pod" podUID="23d4f6b6-7fa3-4c1b-afc8-ce176ef3b69f" pod="openshift-marketplace/redhat-operators-kfk9x" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-kfk9x\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:44 crc kubenswrapper[4881]: I1211 08:20:44.060532 4881 status_manager.go:851] "Failed to get status for pod" podUID="cf59f823-b688-420e-9e5b-20f4441c9635" pod="openshift-marketplace/certified-operators-zqwb5" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-zqwb5\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:44 crc kubenswrapper[4881]: I1211 08:20:44.061126 4881 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:44 crc kubenswrapper[4881]: I1211 08:20:44.061769 4881 status_manager.go:851] "Failed to get status for pod" podUID="fbc88e77-4757-426f-9212-8e4c3d26b8e0" pod="openshift-authentication/oauth-openshift-558db77b4-79b8l" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-79b8l\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:44 crc kubenswrapper[4881]: I1211 08:20:44.062077 4881 status_manager.go:851] "Failed to get status for pod" podUID="99c7c976-c996-4b9c-a389-fbaed3f71813" pod="openshift-marketplace/community-operators-74d77" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-74d77\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:44 crc kubenswrapper[4881]: I1211 08:20:44.062491 4881 status_manager.go:851] "Failed to get status for pod" podUID="e1b26f03-480c-45ab-b37e-c2971f8e117a" pod="openshift-marketplace/redhat-marketplace-j9mc4" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-j9mc4\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:44 crc kubenswrapper[4881]: I1211 08:20:44.062976 4881 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:44 crc kubenswrapper[4881]: I1211 08:20:44.096139 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-kfk9x" Dec 11 08:20:44 crc kubenswrapper[4881]: I1211 08:20:44.096376 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-kfk9x" Dec 11 08:20:44 crc kubenswrapper[4881]: I1211 08:20:44.158662 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-kfk9x" Dec 11 08:20:44 crc kubenswrapper[4881]: I1211 08:20:44.159283 4881 status_manager.go:851] "Failed to get status for pod" podUID="cf59f823-b688-420e-9e5b-20f4441c9635" pod="openshift-marketplace/certified-operators-zqwb5" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-zqwb5\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:44 crc kubenswrapper[4881]: I1211 08:20:44.159867 4881 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:44 crc kubenswrapper[4881]: I1211 08:20:44.160761 4881 status_manager.go:851] "Failed to get status for pod" podUID="fbc88e77-4757-426f-9212-8e4c3d26b8e0" pod="openshift-authentication/oauth-openshift-558db77b4-79b8l" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-79b8l\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:44 crc kubenswrapper[4881]: I1211 08:20:44.161503 4881 status_manager.go:851] "Failed to get status for pod" podUID="99c7c976-c996-4b9c-a389-fbaed3f71813" pod="openshift-marketplace/community-operators-74d77" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-74d77\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:44 crc kubenswrapper[4881]: I1211 08:20:44.161988 4881 status_manager.go:851] "Failed to get status for pod" podUID="e1b26f03-480c-45ab-b37e-c2971f8e117a" pod="openshift-marketplace/redhat-marketplace-j9mc4" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-j9mc4\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:44 crc kubenswrapper[4881]: I1211 08:20:44.162556 4881 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:44 crc kubenswrapper[4881]: I1211 08:20:44.163035 4881 status_manager.go:851] "Failed to get status for pod" podUID="eb7312c9-9c92-429a-8c3a-d86d8196564d" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:44 crc kubenswrapper[4881]: I1211 08:20:44.163550 4881 status_manager.go:851] "Failed to get status for pod" podUID="23d4f6b6-7fa3-4c1b-afc8-ce176ef3b69f" pod="openshift-marketplace/redhat-operators-kfk9x" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-kfk9x\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:45 crc kubenswrapper[4881]: I1211 08:20:45.004717 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 11 08:20:45 crc kubenswrapper[4881]: I1211 08:20:45.005583 4881 status_manager.go:851] "Failed to get status for pod" podUID="cf59f823-b688-420e-9e5b-20f4441c9635" pod="openshift-marketplace/certified-operators-zqwb5" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-zqwb5\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:45 crc kubenswrapper[4881]: I1211 08:20:45.006179 4881 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:45 crc kubenswrapper[4881]: I1211 08:20:45.007129 4881 status_manager.go:851] "Failed to get status for pod" podUID="fbc88e77-4757-426f-9212-8e4c3d26b8e0" pod="openshift-authentication/oauth-openshift-558db77b4-79b8l" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-79b8l\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:45 crc kubenswrapper[4881]: I1211 08:20:45.007719 4881 status_manager.go:851] "Failed to get status for pod" podUID="99c7c976-c996-4b9c-a389-fbaed3f71813" pod="openshift-marketplace/community-operators-74d77" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-74d77\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:45 crc kubenswrapper[4881]: I1211 08:20:45.008214 4881 status_manager.go:851] "Failed to get status for pod" podUID="e1b26f03-480c-45ab-b37e-c2971f8e117a" pod="openshift-marketplace/redhat-marketplace-j9mc4" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-j9mc4\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:45 crc kubenswrapper[4881]: I1211 08:20:45.008688 4881 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:45 crc kubenswrapper[4881]: I1211 08:20:45.009085 4881 status_manager.go:851] "Failed to get status for pod" podUID="eb7312c9-9c92-429a-8c3a-d86d8196564d" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:45 crc kubenswrapper[4881]: I1211 08:20:45.009511 4881 status_manager.go:851] "Failed to get status for pod" podUID="23d4f6b6-7fa3-4c1b-afc8-ce176ef3b69f" pod="openshift-marketplace/redhat-operators-kfk9x" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-kfk9x\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:45 crc kubenswrapper[4881]: I1211 08:20:45.032702 4881 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="712c62b1-8ccd-4aa3-bcf9-678e361454ec" Dec 11 08:20:45 crc kubenswrapper[4881]: I1211 08:20:45.033078 4881 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="712c62b1-8ccd-4aa3-bcf9-678e361454ec" Dec 11 08:20:45 crc kubenswrapper[4881]: E1211 08:20:45.033552 4881 mirror_client.go:138] "Failed deleting a mirror pod" err="Delete \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.20:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 11 08:20:45 crc kubenswrapper[4881]: I1211 08:20:45.033936 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 11 08:20:45 crc kubenswrapper[4881]: I1211 08:20:45.065654 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-kfk9x" Dec 11 08:20:45 crc kubenswrapper[4881]: I1211 08:20:45.066252 4881 status_manager.go:851] "Failed to get status for pod" podUID="e1b26f03-480c-45ab-b37e-c2971f8e117a" pod="openshift-marketplace/redhat-marketplace-j9mc4" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-j9mc4\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:45 crc kubenswrapper[4881]: I1211 08:20:45.067247 4881 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:45 crc kubenswrapper[4881]: I1211 08:20:45.067960 4881 status_manager.go:851] "Failed to get status for pod" podUID="eb7312c9-9c92-429a-8c3a-d86d8196564d" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:45 crc kubenswrapper[4881]: I1211 08:20:45.068425 4881 status_manager.go:851] "Failed to get status for pod" podUID="23d4f6b6-7fa3-4c1b-afc8-ce176ef3b69f" pod="openshift-marketplace/redhat-operators-kfk9x" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-kfk9x\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:45 crc kubenswrapper[4881]: I1211 08:20:45.069013 4881 status_manager.go:851] "Failed to get status for pod" podUID="cf59f823-b688-420e-9e5b-20f4441c9635" pod="openshift-marketplace/certified-operators-zqwb5" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-zqwb5\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:45 crc kubenswrapper[4881]: I1211 08:20:45.069590 4881 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:45 crc kubenswrapper[4881]: I1211 08:20:45.070145 4881 status_manager.go:851] "Failed to get status for pod" podUID="fbc88e77-4757-426f-9212-8e4c3d26b8e0" pod="openshift-authentication/oauth-openshift-558db77b4-79b8l" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-79b8l\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:45 crc kubenswrapper[4881]: I1211 08:20:45.070725 4881 status_manager.go:851] "Failed to get status for pod" podUID="99c7c976-c996-4b9c-a389-fbaed3f71813" pod="openshift-marketplace/community-operators-74d77" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-74d77\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:45 crc kubenswrapper[4881]: W1211 08:20:45.073072 4881 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod71bb4a3aecc4ba5b26c4b7318770ce13.slice/crio-c613ee1fd23ae78cf6e487762b36162cf851cd770fe20af3e4d69565064222de WatchSource:0}: Error finding container c613ee1fd23ae78cf6e487762b36162cf851cd770fe20af3e4d69565064222de: Status 404 returned error can't find the container with id c613ee1fd23ae78cf6e487762b36162cf851cd770fe20af3e4d69565064222de Dec 11 08:20:45 crc kubenswrapper[4881]: W1211 08:20:45.623528 4881 reflector.go:561] object-"openshift-network-console"/"networking-console-plugin": failed to list *v1.ConfigMap: Get "https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-console/configmaps?fieldSelector=metadata.name%3Dnetworking-console-plugin&resourceVersion=27189": dial tcp 38.102.83.20:6443: connect: connection refused Dec 11 08:20:45 crc kubenswrapper[4881]: E1211 08:20:45.623662 4881 reflector.go:158] "Unhandled Error" err="object-\"openshift-network-console\"/\"networking-console-plugin\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-console/configmaps?fieldSelector=metadata.name%3Dnetworking-console-plugin&resourceVersion=27189\": dial tcp 38.102.83.20:6443: connect: connection refused" logger="UnhandledError" Dec 11 08:20:46 crc kubenswrapper[4881]: I1211 08:20:46.008565 4881 generic.go:334] "Generic (PLEG): container finished" podID="71bb4a3aecc4ba5b26c4b7318770ce13" containerID="112cdb953e1b464a9d0f2e7f0c01f61447a72e1329a6c9e0b7a1e1910153abc7" exitCode=0 Dec 11 08:20:46 crc kubenswrapper[4881]: I1211 08:20:46.008696 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerDied","Data":"112cdb953e1b464a9d0f2e7f0c01f61447a72e1329a6c9e0b7a1e1910153abc7"} Dec 11 08:20:46 crc kubenswrapper[4881]: I1211 08:20:46.009026 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"c613ee1fd23ae78cf6e487762b36162cf851cd770fe20af3e4d69565064222de"} Dec 11 08:20:46 crc kubenswrapper[4881]: I1211 08:20:46.009721 4881 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="712c62b1-8ccd-4aa3-bcf9-678e361454ec" Dec 11 08:20:46 crc kubenswrapper[4881]: I1211 08:20:46.009763 4881 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="712c62b1-8ccd-4aa3-bcf9-678e361454ec" Dec 11 08:20:46 crc kubenswrapper[4881]: I1211 08:20:46.010319 4881 status_manager.go:851] "Failed to get status for pod" podUID="eb7312c9-9c92-429a-8c3a-d86d8196564d" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:46 crc kubenswrapper[4881]: E1211 08:20:46.010448 4881 mirror_client.go:138] "Failed deleting a mirror pod" err="Delete \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.20:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 11 08:20:46 crc kubenswrapper[4881]: I1211 08:20:46.010902 4881 status_manager.go:851] "Failed to get status for pod" podUID="23d4f6b6-7fa3-4c1b-afc8-ce176ef3b69f" pod="openshift-marketplace/redhat-operators-kfk9x" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-kfk9x\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:46 crc kubenswrapper[4881]: I1211 08:20:46.011418 4881 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:46 crc kubenswrapper[4881]: I1211 08:20:46.011878 4881 status_manager.go:851] "Failed to get status for pod" podUID="cf59f823-b688-420e-9e5b-20f4441c9635" pod="openshift-marketplace/certified-operators-zqwb5" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-zqwb5\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:46 crc kubenswrapper[4881]: I1211 08:20:46.012257 4881 status_manager.go:851] "Failed to get status for pod" podUID="fbc88e77-4757-426f-9212-8e4c3d26b8e0" pod="openshift-authentication/oauth-openshift-558db77b4-79b8l" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-79b8l\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:46 crc kubenswrapper[4881]: I1211 08:20:46.012651 4881 status_manager.go:851] "Failed to get status for pod" podUID="99c7c976-c996-4b9c-a389-fbaed3f71813" pod="openshift-marketplace/community-operators-74d77" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-74d77\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:46 crc kubenswrapper[4881]: I1211 08:20:46.013021 4881 status_manager.go:851] "Failed to get status for pod" podUID="e1b26f03-480c-45ab-b37e-c2971f8e117a" pod="openshift-marketplace/redhat-marketplace-j9mc4" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-j9mc4\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:46 crc kubenswrapper[4881]: I1211 08:20:46.013629 4881 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.20:6443: connect: connection refused" Dec 11 08:20:47 crc kubenswrapper[4881]: I1211 08:20:47.019833 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"0bd3781901f4bdbe213651a14979ad3b018d50a9ce180b4b4a9eddda3aa19891"} Dec 11 08:20:47 crc kubenswrapper[4881]: I1211 08:20:47.020218 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"59ff40e337f96a79e98a7dab263f3f9ae961da233153d6fbbb5a822014806000"} Dec 11 08:20:47 crc kubenswrapper[4881]: I1211 08:20:47.020234 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"20c754de23e231c698983319cd37ef8505a83e91fb7f7f5a1f06fbb572ae87fb"} Dec 11 08:20:47 crc kubenswrapper[4881]: I1211 08:20:47.020246 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"2536efd102b0655a420685afa5df43690a98b0bd8b95f0c9c562625d3533d50e"} Dec 11 08:20:47 crc kubenswrapper[4881]: I1211 08:20:47.103893 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 11 08:20:48 crc kubenswrapper[4881]: I1211 08:20:48.027203 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"989e7871b4960f958eda7195492cee20539aeb0ab452f256ecc961c915031a80"} Dec 11 08:20:48 crc kubenswrapper[4881]: I1211 08:20:48.027382 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 11 08:20:48 crc kubenswrapper[4881]: I1211 08:20:48.027462 4881 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="712c62b1-8ccd-4aa3-bcf9-678e361454ec" Dec 11 08:20:48 crc kubenswrapper[4881]: I1211 08:20:48.027484 4881 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="712c62b1-8ccd-4aa3-bcf9-678e361454ec" Dec 11 08:20:48 crc kubenswrapper[4881]: I1211 08:20:48.223919 4881 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/kube-controller-manager namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" start-of-body= Dec 11 08:20:48 crc kubenswrapper[4881]: I1211 08:20:48.224019 4881 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="kube-controller-manager" probeResult="failure" output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" Dec 11 08:20:50 crc kubenswrapper[4881]: I1211 08:20:50.034780 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 11 08:20:50 crc kubenswrapper[4881]: I1211 08:20:50.034868 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 11 08:20:50 crc kubenswrapper[4881]: I1211 08:20:50.044389 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 11 08:20:51 crc kubenswrapper[4881]: I1211 08:20:51.732553 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-zqwb5" Dec 11 08:20:53 crc kubenswrapper[4881]: I1211 08:20:53.041598 4881 kubelet.go:1914] "Deleted mirror pod because it is outdated" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 11 08:20:53 crc kubenswrapper[4881]: I1211 08:20:53.228555 4881 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openshift-kube-apiserver/kube-apiserver-crc" oldPodUID="71bb4a3aecc4ba5b26c4b7318770ce13" podUID="356e1ac7-f3a7-4b30-baa1-82c684c434ce" Dec 11 08:20:54 crc kubenswrapper[4881]: I1211 08:20:54.069717 4881 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="712c62b1-8ccd-4aa3-bcf9-678e361454ec" Dec 11 08:20:54 crc kubenswrapper[4881]: I1211 08:20:54.069751 4881 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="712c62b1-8ccd-4aa3-bcf9-678e361454ec" Dec 11 08:20:54 crc kubenswrapper[4881]: I1211 08:20:54.073315 4881 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openshift-kube-apiserver/kube-apiserver-crc" oldPodUID="71bb4a3aecc4ba5b26c4b7318770ce13" podUID="356e1ac7-f3a7-4b30-baa1-82c684c434ce" Dec 11 08:20:54 crc kubenswrapper[4881]: I1211 08:20:54.075002 4881 status_manager.go:308] "Container readiness changed before pod has synced" pod="openshift-kube-apiserver/kube-apiserver-crc" containerID="cri-o://2536efd102b0655a420685afa5df43690a98b0bd8b95f0c9c562625d3533d50e" Dec 11 08:20:54 crc kubenswrapper[4881]: I1211 08:20:54.075049 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 11 08:20:55 crc kubenswrapper[4881]: I1211 08:20:55.076851 4881 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="712c62b1-8ccd-4aa3-bcf9-678e361454ec" Dec 11 08:20:55 crc kubenswrapper[4881]: I1211 08:20:55.076896 4881 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="712c62b1-8ccd-4aa3-bcf9-678e361454ec" Dec 11 08:20:55 crc kubenswrapper[4881]: I1211 08:20:55.083075 4881 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openshift-kube-apiserver/kube-apiserver-crc" oldPodUID="71bb4a3aecc4ba5b26c4b7318770ce13" podUID="356e1ac7-f3a7-4b30-baa1-82c684c434ce" Dec 11 08:20:58 crc kubenswrapper[4881]: I1211 08:20:58.218377 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-console"/"networking-console-plugin-cert" Dec 11 08:20:58 crc kubenswrapper[4881]: I1211 08:20:58.223096 4881 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/kube-controller-manager namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" start-of-body= Dec 11 08:20:58 crc kubenswrapper[4881]: I1211 08:20:58.223158 4881 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="kube-controller-manager" probeResult="failure" output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" Dec 11 08:20:58 crc kubenswrapper[4881]: I1211 08:20:58.223211 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 11 08:20:58 crc kubenswrapper[4881]: I1211 08:20:58.223868 4881 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="kube-controller-manager" containerStatusID={"Type":"cri-o","ID":"25a8fb859e3a3aea529df7ccaeaf72afc98991a7bf563bbf1fbb79ce6759d68d"} pod="openshift-kube-controller-manager/kube-controller-manager-crc" containerMessage="Container kube-controller-manager failed startup probe, will be restarted" Dec 11 08:20:58 crc kubenswrapper[4881]: I1211 08:20:58.223995 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="kube-controller-manager" containerID="cri-o://25a8fb859e3a3aea529df7ccaeaf72afc98991a7bf563bbf1fbb79ce6759d68d" gracePeriod=30 Dec 11 08:21:02 crc kubenswrapper[4881]: I1211 08:21:02.646406 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-console"/"networking-console-plugin" Dec 11 08:21:18 crc kubenswrapper[4881]: I1211 08:21:18.240710 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"openshift-service-ca.crt" Dec 11 08:21:20 crc kubenswrapper[4881]: I1211 08:21:20.023612 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"kube-root-ca.crt" Dec 11 08:21:20 crc kubenswrapper[4881]: I1211 08:21:20.399512 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"openshift-service-ca.crt" Dec 11 08:21:20 crc kubenswrapper[4881]: I1211 08:21:20.440444 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" Dec 11 08:21:20 crc kubenswrapper[4881]: I1211 08:21:20.591241 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"kube-root-ca.crt" Dec 11 08:21:21 crc kubenswrapper[4881]: I1211 08:21:21.099748 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"kube-root-ca.crt" Dec 11 08:21:21 crc kubenswrapper[4881]: I1211 08:21:21.436598 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-dockercfg-mfbb7" Dec 11 08:21:21 crc kubenswrapper[4881]: I1211 08:21:21.570542 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"proxy-tls" Dec 11 08:21:21 crc kubenswrapper[4881]: I1211 08:21:21.760157 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" Dec 11 08:21:21 crc kubenswrapper[4881]: I1211 08:21:21.979332 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"machine-api-operator-images" Dec 11 08:21:22 crc kubenswrapper[4881]: I1211 08:21:22.130230 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"default-dockercfg-2q5b6" Dec 11 08:21:22 crc kubenswrapper[4881]: I1211 08:21:22.166473 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"trusted-ca-bundle" Dec 11 08:21:23 crc kubenswrapper[4881]: I1211 08:21:23.004666 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"node-ca-dockercfg-4777p" Dec 11 08:21:23 crc kubenswrapper[4881]: I1211 08:21:23.194762 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ancillary-tools-dockercfg-vnmsz" Dec 11 08:21:23 crc kubenswrapper[4881]: I1211 08:21:23.458990 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-controller-dockercfg-c2lfx" Dec 11 08:21:23 crc kubenswrapper[4881]: I1211 08:21:23.472965 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mcc-proxy-tls" Dec 11 08:21:23 crc kubenswrapper[4881]: I1211 08:21:23.919836 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"kube-root-ca.crt" Dec 11 08:21:24 crc kubenswrapper[4881]: I1211 08:21:24.023064 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-serving-cert" Dec 11 08:21:24 crc kubenswrapper[4881]: I1211 08:21:24.436635 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"installation-pull-secrets" Dec 11 08:21:24 crc kubenswrapper[4881]: I1211 08:21:24.669308 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"iptables-alerter-script" Dec 11 08:21:25 crc kubenswrapper[4881]: I1211 08:21:25.238808 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"openshift-service-ca.crt" Dec 11 08:21:25 crc kubenswrapper[4881]: I1211 08:21:25.392057 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-dockercfg-f62pw" Dec 11 08:21:25 crc kubenswrapper[4881]: I1211 08:21:25.423781 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"config" Dec 11 08:21:25 crc kubenswrapper[4881]: I1211 08:21:25.518464 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"openshift-service-ca.crt" Dec 11 08:21:25 crc kubenswrapper[4881]: I1211 08:21:25.565445 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-config" Dec 11 08:21:25 crc kubenswrapper[4881]: I1211 08:21:25.811203 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" Dec 11 08:21:26 crc kubenswrapper[4881]: I1211 08:21:26.068884 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"serving-cert" Dec 11 08:21:26 crc kubenswrapper[4881]: I1211 08:21:26.170617 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-root-ca.crt" Dec 11 08:21:26 crc kubenswrapper[4881]: I1211 08:21:26.308371 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"dns-default" Dec 11 08:21:26 crc kubenswrapper[4881]: I1211 08:21:26.476030 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"openshift-service-ca.crt" Dec 11 08:21:26 crc kubenswrapper[4881]: I1211 08:21:26.644497 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"kube-root-ca.crt" Dec 11 08:21:26 crc kubenswrapper[4881]: I1211 08:21:26.734824 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"cluster-version-operator-serving-cert" Dec 11 08:21:26 crc kubenswrapper[4881]: I1211 08:21:26.806154 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Dec 11 08:21:26 crc kubenswrapper[4881]: I1211 08:21:26.882119 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-serving-cert" Dec 11 08:21:26 crc kubenswrapper[4881]: I1211 08:21:26.935549 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"service-ca-dockercfg-pn86c" Dec 11 08:21:27 crc kubenswrapper[4881]: I1211 08:21:27.063760 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"default-dockercfg-2llfx" Dec 11 08:21:27 crc kubenswrapper[4881]: I1211 08:21:27.070762 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Dec 11 08:21:27 crc kubenswrapper[4881]: I1211 08:21:27.107113 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serviceaccount-dockercfg-rq7zk" Dec 11 08:21:27 crc kubenswrapper[4881]: I1211 08:21:27.342009 4881 reflector.go:368] Caches populated for *v1.Node from k8s.io/client-go/informers/factory.go:160 Dec 11 08:21:27 crc kubenswrapper[4881]: I1211 08:21:27.457142 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"etcd-serving-ca" Dec 11 08:21:27 crc kubenswrapper[4881]: I1211 08:21:27.618876 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"service-ca-operator-config" Dec 11 08:21:27 crc kubenswrapper[4881]: I1211 08:21:27.733661 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-config" Dec 11 08:21:27 crc kubenswrapper[4881]: I1211 08:21:27.773716 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"signing-cabundle" Dec 11 08:21:27 crc kubenswrapper[4881]: I1211 08:21:27.794473 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"openshift-service-ca.crt" Dec 11 08:21:27 crc kubenswrapper[4881]: I1211 08:21:27.890133 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Dec 11 08:21:27 crc kubenswrapper[4881]: I1211 08:21:27.923706 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ac-dockercfg-9lkdf" Dec 11 08:21:27 crc kubenswrapper[4881]: I1211 08:21:27.967686 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-service-ca.crt" Dec 11 08:21:28 crc kubenswrapper[4881]: I1211 08:21:28.209099 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"etcd-serving-ca" Dec 11 08:21:28 crc kubenswrapper[4881]: I1211 08:21:28.301277 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/1.log" Dec 11 08:21:28 crc kubenswrapper[4881]: I1211 08:21:28.303222 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"openshift-service-ca.crt" Dec 11 08:21:28 crc kubenswrapper[4881]: I1211 08:21:28.305199 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/0.log" Dec 11 08:21:28 crc kubenswrapper[4881]: I1211 08:21:28.305262 4881 generic.go:334] "Generic (PLEG): container finished" podID="f614b9022728cf315e60c057852e563e" containerID="25a8fb859e3a3aea529df7ccaeaf72afc98991a7bf563bbf1fbb79ce6759d68d" exitCode=137 Dec 11 08:21:28 crc kubenswrapper[4881]: I1211 08:21:28.305376 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerDied","Data":"25a8fb859e3a3aea529df7ccaeaf72afc98991a7bf563bbf1fbb79ce6759d68d"} Dec 11 08:21:28 crc kubenswrapper[4881]: I1211 08:21:28.305424 4881 scope.go:117] "RemoveContainer" containerID="3dec421d26a9373c52bf2fb9434cc3f3b373f409b1d86ec1effca7534acb7262" Dec 11 08:21:28 crc kubenswrapper[4881]: I1211 08:21:28.346566 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-client" Dec 11 08:21:28 crc kubenswrapper[4881]: I1211 08:21:28.511882 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Dec 11 08:21:28 crc kubenswrapper[4881]: I1211 08:21:28.520547 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Dec 11 08:21:28 crc kubenswrapper[4881]: I1211 08:21:28.593366 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-tls" Dec 11 08:21:28 crc kubenswrapper[4881]: I1211 08:21:28.806434 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-node-identity"/"network-node-identity-cert" Dec 11 08:21:28 crc kubenswrapper[4881]: I1211 08:21:28.828290 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-root-ca.crt" Dec 11 08:21:29 crc kubenswrapper[4881]: I1211 08:21:29.307524 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"marketplace-trusted-ca" Dec 11 08:21:29 crc kubenswrapper[4881]: I1211 08:21:29.317715 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/1.log" Dec 11 08:21:29 crc kubenswrapper[4881]: I1211 08:21:29.319611 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"e93b8dc618a3efea19172d67281ddd32f132515cee3fd53ee793bb5b64261dd7"} Dec 11 08:21:29 crc kubenswrapper[4881]: I1211 08:21:29.388443 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" Dec 11 08:21:29 crc kubenswrapper[4881]: I1211 08:21:29.414455 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-dockercfg-vw8fw" Dec 11 08:21:29 crc kubenswrapper[4881]: I1211 08:21:29.478004 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-sa-dockercfg-d427c" Dec 11 08:21:29 crc kubenswrapper[4881]: I1211 08:21:29.488695 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" Dec 11 08:21:29 crc kubenswrapper[4881]: I1211 08:21:29.718244 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"openshift-service-ca.crt" Dec 11 08:21:29 crc kubenswrapper[4881]: I1211 08:21:29.922509 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"openshift-service-ca.crt" Dec 11 08:21:30 crc kubenswrapper[4881]: I1211 08:21:30.018959 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"metrics-tls" Dec 11 08:21:30 crc kubenswrapper[4881]: I1211 08:21:30.081402 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-dockercfg-k9rxt" Dec 11 08:21:30 crc kubenswrapper[4881]: I1211 08:21:30.545934 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"kube-root-ca.crt" Dec 11 08:21:30 crc kubenswrapper[4881]: I1211 08:21:30.691797 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"kube-root-ca.crt" Dec 11 08:21:30 crc kubenswrapper[4881]: I1211 08:21:30.809891 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"kube-root-ca.crt" Dec 11 08:21:30 crc kubenswrapper[4881]: I1211 08:21:30.867888 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"config" Dec 11 08:21:30 crc kubenswrapper[4881]: I1211 08:21:30.946109 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"openshift-service-ca.crt" Dec 11 08:21:31 crc kubenswrapper[4881]: I1211 08:21:31.303946 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"kube-root-ca.crt" Dec 11 08:21:31 crc kubenswrapper[4881]: I1211 08:21:31.414689 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"audit-1" Dec 11 08:21:31 crc kubenswrapper[4881]: I1211 08:21:31.437728 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"authentication-operator-config" Dec 11 08:21:31 crc kubenswrapper[4881]: I1211 08:21:31.594091 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" Dec 11 08:21:31 crc kubenswrapper[4881]: I1211 08:21:31.622782 4881 reflector.go:368] Caches populated for *v1.Service from k8s.io/client-go/informers/factory.go:160 Dec 11 08:21:31 crc kubenswrapper[4881]: I1211 08:21:31.797812 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"env-overrides" Dec 11 08:21:31 crc kubenswrapper[4881]: I1211 08:21:31.847271 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"openshift-service-ca.crt" Dec 11 08:21:31 crc kubenswrapper[4881]: I1211 08:21:31.868727 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"multus-daemon-config" Dec 11 08:21:32 crc kubenswrapper[4881]: I1211 08:21:32.119664 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"service-ca-bundle" Dec 11 08:21:32 crc kubenswrapper[4881]: I1211 08:21:32.153944 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"openshift-service-ca.crt" Dec 11 08:21:32 crc kubenswrapper[4881]: I1211 08:21:32.416737 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Dec 11 08:21:32 crc kubenswrapper[4881]: I1211 08:21:32.449652 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"kube-root-ca.crt" Dec 11 08:21:32 crc kubenswrapper[4881]: I1211 08:21:32.532824 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"config-operator-serving-cert" Dec 11 08:21:32 crc kubenswrapper[4881]: I1211 08:21:32.720365 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"openshift-service-ca.crt" Dec 11 08:21:32 crc kubenswrapper[4881]: I1211 08:21:32.738854 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-admission-controller-secret" Dec 11 08:21:32 crc kubenswrapper[4881]: I1211 08:21:32.865448 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-stats-default" Dec 11 08:21:33 crc kubenswrapper[4881]: I1211 08:21:33.060797 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-service-ca-bundle" Dec 11 08:21:33 crc kubenswrapper[4881]: I1211 08:21:33.233240 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Dec 11 08:21:33 crc kubenswrapper[4881]: I1211 08:21:33.404685 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-tls" Dec 11 08:21:33 crc kubenswrapper[4881]: I1211 08:21:33.413726 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"default-cni-sysctl-allowlist" Dec 11 08:21:33 crc kubenswrapper[4881]: I1211 08:21:33.683635 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"default-dockercfg-gxtc4" Dec 11 08:21:33 crc kubenswrapper[4881]: I1211 08:21:33.798457 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" Dec 11 08:21:33 crc kubenswrapper[4881]: I1211 08:21:33.874554 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"openshift-service-ca.crt" Dec 11 08:21:34 crc kubenswrapper[4881]: I1211 08:21:34.229046 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-dockercfg-xtcjv" Dec 11 08:21:34 crc kubenswrapper[4881]: I1211 08:21:34.405129 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"kube-storage-version-migrator-operator-dockercfg-2bh8d" Dec 11 08:21:34 crc kubenswrapper[4881]: I1211 08:21:34.475710 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-dockercfg-jwfmh" Dec 11 08:21:34 crc kubenswrapper[4881]: I1211 08:21:34.482402 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-rbac-proxy" Dec 11 08:21:34 crc kubenswrapper[4881]: I1211 08:21:34.490322 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-operator-tls" Dec 11 08:21:34 crc kubenswrapper[4881]: I1211 08:21:34.687537 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-dockercfg-x57mr" Dec 11 08:21:34 crc kubenswrapper[4881]: I1211 08:21:34.729910 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-dockercfg-zdk86" Dec 11 08:21:34 crc kubenswrapper[4881]: I1211 08:21:34.764267 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"service-ca-operator-dockercfg-rg9jl" Dec 11 08:21:34 crc kubenswrapper[4881]: I1211 08:21:34.780628 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"kube-root-ca.crt" Dec 11 08:21:34 crc kubenswrapper[4881]: I1211 08:21:34.837945 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" Dec 11 08:21:34 crc kubenswrapper[4881]: I1211 08:21:34.949810 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" Dec 11 08:21:34 crc kubenswrapper[4881]: I1211 08:21:34.998937 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"trusted-ca" Dec 11 08:21:35 crc kubenswrapper[4881]: I1211 08:21:35.218726 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"cluster-image-registry-operator-dockercfg-m4qtx" Dec 11 08:21:35 crc kubenswrapper[4881]: I1211 08:21:35.240730 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"signing-key" Dec 11 08:21:35 crc kubenswrapper[4881]: I1211 08:21:35.672402 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"cni-copy-resources" Dec 11 08:21:35 crc kubenswrapper[4881]: I1211 08:21:35.699784 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-dockercfg-r9srn" Dec 11 08:21:35 crc kubenswrapper[4881]: I1211 08:21:35.764674 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-tls" Dec 11 08:21:35 crc kubenswrapper[4881]: I1211 08:21:35.787971 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"kube-root-ca.crt" Dec 11 08:21:35 crc kubenswrapper[4881]: I1211 08:21:35.930973 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"encryption-config-1" Dec 11 08:21:36 crc kubenswrapper[4881]: I1211 08:21:36.088388 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"kube-root-ca.crt" Dec 11 08:21:36 crc kubenswrapper[4881]: I1211 08:21:36.422383 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"openshift-service-ca.crt" Dec 11 08:21:36 crc kubenswrapper[4881]: I1211 08:21:36.439539 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"kube-root-ca.crt" Dec 11 08:21:36 crc kubenswrapper[4881]: I1211 08:21:36.517067 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"machine-config-operator-images" Dec 11 08:21:36 crc kubenswrapper[4881]: I1211 08:21:36.519119 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-service-ca.crt" Dec 11 08:21:36 crc kubenswrapper[4881]: I1211 08:21:36.623677 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"service-ca" Dec 11 08:21:36 crc kubenswrapper[4881]: I1211 08:21:36.870358 4881 reflector.go:368] Caches populated for *v1.CSIDriver from k8s.io/client-go/informers/factory.go:160 Dec 11 08:21:36 crc kubenswrapper[4881]: I1211 08:21:36.906042 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" Dec 11 08:21:37 crc kubenswrapper[4881]: I1211 08:21:37.065327 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"openshift-apiserver-sa-dockercfg-djjff" Dec 11 08:21:37 crc kubenswrapper[4881]: I1211 08:21:37.103687 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 11 08:21:37 crc kubenswrapper[4881]: I1211 08:21:37.141556 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"oauth-serving-cert" Dec 11 08:21:37 crc kubenswrapper[4881]: I1211 08:21:37.177274 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-rbac-proxy" Dec 11 08:21:37 crc kubenswrapper[4881]: I1211 08:21:37.287540 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"service-ca-bundle" Dec 11 08:21:37 crc kubenswrapper[4881]: I1211 08:21:37.374924 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"image-registry-certificates" Dec 11 08:21:37 crc kubenswrapper[4881]: I1211 08:21:37.390020 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"kube-root-ca.crt" Dec 11 08:21:37 crc kubenswrapper[4881]: I1211 08:21:37.431899 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"etcd-client" Dec 11 08:21:37 crc kubenswrapper[4881]: I1211 08:21:37.544264 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"dns-operator-dockercfg-9mqw5" Dec 11 08:21:37 crc kubenswrapper[4881]: I1211 08:21:37.548538 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"openshift-service-ca.crt" Dec 11 08:21:37 crc kubenswrapper[4881]: I1211 08:21:37.688612 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"trusted-ca" Dec 11 08:21:37 crc kubenswrapper[4881]: I1211 08:21:37.689768 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-default-metrics-tls" Dec 11 08:21:38 crc kubenswrapper[4881]: I1211 08:21:38.160443 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-dockercfg-qx5rd" Dec 11 08:21:38 crc kubenswrapper[4881]: I1211 08:21:38.223181 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 11 08:21:38 crc kubenswrapper[4881]: I1211 08:21:38.234141 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 11 08:21:38 crc kubenswrapper[4881]: I1211 08:21:38.326755 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"pprof-cert" Dec 11 08:21:38 crc kubenswrapper[4881]: I1211 08:21:38.380962 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 11 08:21:38 crc kubenswrapper[4881]: I1211 08:21:38.409290 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"packageserver-service-cert" Dec 11 08:21:38 crc kubenswrapper[4881]: I1211 08:21:38.431637 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"etcd-client" Dec 11 08:21:38 crc kubenswrapper[4881]: I1211 08:21:38.746309 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-dockercfg-gkqpw" Dec 11 08:21:38 crc kubenswrapper[4881]: I1211 08:21:38.760470 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-metrics" Dec 11 08:21:38 crc kubenswrapper[4881]: I1211 08:21:38.793635 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-operator-config" Dec 11 08:21:38 crc kubenswrapper[4881]: I1211 08:21:38.876191 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"openshift-service-ca.crt" Dec 11 08:21:38 crc kubenswrapper[4881]: I1211 08:21:38.980470 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"openshift-service-ca.crt" Dec 11 08:21:39 crc kubenswrapper[4881]: I1211 08:21:39.046693 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"kube-root-ca.crt" Dec 11 08:21:39 crc kubenswrapper[4881]: I1211 08:21:39.381194 4881 generic.go:334] "Generic (PLEG): container finished" podID="a98dd8bc-f2be-42b3-94c9-5eb725bfb1bf" containerID="f792b65425aa8e0d6e33043da9e324807b48fa9a42a8cf1582a7fc6d618698cc" exitCode=0 Dec 11 08:21:39 crc kubenswrapper[4881]: I1211 08:21:39.381269 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-tbhw6" event={"ID":"a98dd8bc-f2be-42b3-94c9-5eb725bfb1bf","Type":"ContainerDied","Data":"f792b65425aa8e0d6e33043da9e324807b48fa9a42a8cf1582a7fc6d618698cc"} Dec 11 08:21:39 crc kubenswrapper[4881]: I1211 08:21:39.382156 4881 scope.go:117] "RemoveContainer" containerID="f792b65425aa8e0d6e33043da9e324807b48fa9a42a8cf1582a7fc6d618698cc" Dec 11 08:21:39 crc kubenswrapper[4881]: I1211 08:21:39.386856 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"openshift-service-ca.crt" Dec 11 08:21:39 crc kubenswrapper[4881]: I1211 08:21:39.615258 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"serving-cert" Dec 11 08:21:39 crc kubenswrapper[4881]: I1211 08:21:39.735045 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" Dec 11 08:21:39 crc kubenswrapper[4881]: I1211 08:21:39.761293 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator"/"kube-storage-version-migrator-sa-dockercfg-5xfcg" Dec 11 08:21:39 crc kubenswrapper[4881]: I1211 08:21:39.777794 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"registry-dockercfg-kzzsd" Dec 11 08:21:39 crc kubenswrapper[4881]: I1211 08:21:39.845860 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"authentication-operator-dockercfg-mz9bj" Dec 11 08:21:39 crc kubenswrapper[4881]: I1211 08:21:39.958248 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"openshift-service-ca.crt" Dec 11 08:21:39 crc kubenswrapper[4881]: I1211 08:21:39.985246 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mco-proxy-tls" Dec 11 08:21:40 crc kubenswrapper[4881]: I1211 08:21:40.064880 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-daemon-dockercfg-r5tcq" Dec 11 08:21:40 crc kubenswrapper[4881]: I1211 08:21:40.144277 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"serving-cert" Dec 11 08:21:40 crc kubenswrapper[4881]: I1211 08:21:40.291535 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"openshift-service-ca.crt" Dec 11 08:21:40 crc kubenswrapper[4881]: I1211 08:21:40.388033 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-tbhw6" event={"ID":"a98dd8bc-f2be-42b3-94c9-5eb725bfb1bf","Type":"ContainerStarted","Data":"6df3f4d90e1365dea7a0281f77701ebe2fc1650a8aa2378e806081c2493062d1"} Dec 11 08:21:40 crc kubenswrapper[4881]: I1211 08:21:40.388371 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-tbhw6" Dec 11 08:21:40 crc kubenswrapper[4881]: I1211 08:21:40.395941 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-tbhw6" Dec 11 08:21:40 crc kubenswrapper[4881]: I1211 08:21:40.434251 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"kube-root-ca.crt" Dec 11 08:21:40 crc kubenswrapper[4881]: I1211 08:21:40.558121 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"cluster-samples-operator-dockercfg-xpp9w" Dec 11 08:21:40 crc kubenswrapper[4881]: I1211 08:21:40.591237 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-certs-default" Dec 11 08:21:40 crc kubenswrapper[4881]: I1211 08:21:40.869305 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Dec 11 08:21:40 crc kubenswrapper[4881]: I1211 08:21:40.955401 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"kube-root-ca.crt" Dec 11 08:21:40 crc kubenswrapper[4881]: I1211 08:21:40.983561 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"trusted-ca-bundle" Dec 11 08:21:41 crc kubenswrapper[4881]: I1211 08:21:41.120175 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"kube-root-ca.crt" Dec 11 08:21:41 crc kubenswrapper[4881]: I1211 08:21:41.535281 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"metrics-tls" Dec 11 08:21:41 crc kubenswrapper[4881]: I1211 08:21:41.588327 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-node-dockercfg-pwtwl" Dec 11 08:21:41 crc kubenswrapper[4881]: I1211 08:21:41.710166 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"node-bootstrapper-token" Dec 11 08:21:41 crc kubenswrapper[4881]: I1211 08:21:41.827440 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"kube-root-ca.crt" Dec 11 08:21:41 crc kubenswrapper[4881]: I1211 08:21:41.964267 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"kube-root-ca.crt" Dec 11 08:21:42 crc kubenswrapper[4881]: I1211 08:21:42.267615 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" Dec 11 08:21:42 crc kubenswrapper[4881]: I1211 08:21:42.486796 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"trusted-ca-bundle" Dec 11 08:21:42 crc kubenswrapper[4881]: I1211 08:21:42.582770 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" Dec 11 08:21:42 crc kubenswrapper[4881]: I1211 08:21:42.616478 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-operator"/"metrics-tls" Dec 11 08:21:42 crc kubenswrapper[4881]: I1211 08:21:42.714732 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"console-config" Dec 11 08:21:42 crc kubenswrapper[4881]: I1211 08:21:42.774219 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"serving-cert" Dec 11 08:21:43 crc kubenswrapper[4881]: I1211 08:21:43.057166 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-control-plane-metrics-cert" Dec 11 08:21:43 crc kubenswrapper[4881]: I1211 08:21:43.067318 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-script-lib" Dec 11 08:21:43 crc kubenswrapper[4881]: I1211 08:21:43.106604 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-root-ca.crt" Dec 11 08:21:43 crc kubenswrapper[4881]: I1211 08:21:43.132166 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"trusted-ca" Dec 11 08:21:43 crc kubenswrapper[4881]: I1211 08:21:43.177072 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Dec 11 08:21:43 crc kubenswrapper[4881]: I1211 08:21:43.298410 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-root-ca.crt" Dec 11 08:21:43 crc kubenswrapper[4881]: I1211 08:21:43.571689 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"openshift-config-operator-dockercfg-7pc5z" Dec 11 08:21:43 crc kubenswrapper[4881]: I1211 08:21:43.773191 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Dec 11 08:21:43 crc kubenswrapper[4881]: I1211 08:21:43.964939 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-rbac-proxy" Dec 11 08:21:44 crc kubenswrapper[4881]: I1211 08:21:44.306054 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-sa-dockercfg-nl2j4" Dec 11 08:21:44 crc kubenswrapper[4881]: I1211 08:21:44.328936 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"kube-root-ca.crt" Dec 11 08:21:44 crc kubenswrapper[4881]: I1211 08:21:44.341496 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"console-operator-dockercfg-4xjcr" Dec 11 08:21:44 crc kubenswrapper[4881]: I1211 08:21:44.376905 4881 reflector.go:368] Caches populated for *v1.Secret from object-"hostpath-provisioner"/"csi-hostpath-provisioner-sa-dockercfg-qd74k" Dec 11 08:21:44 crc kubenswrapper[4881]: I1211 08:21:44.472237 4881 reflector.go:368] Caches populated for *v1.Pod from pkg/kubelet/config/apiserver.go:66 Dec 11 08:21:44 crc kubenswrapper[4881]: I1211 08:21:44.472967 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-kfk9x" podStartSLOduration=71.84683026 podStartE2EDuration="1m31.472943869s" podCreationTimestamp="2025-12-11 08:20:13 +0000 UTC" firstStartedPulling="2025-12-11 08:20:17.237067588 +0000 UTC m=+265.614436295" lastFinishedPulling="2025-12-11 08:20:36.863181187 +0000 UTC m=+285.240549904" observedRunningTime="2025-12-11 08:20:53.063014308 +0000 UTC m=+301.440383005" watchObservedRunningTime="2025-12-11 08:21:44.472943869 +0000 UTC m=+352.850312576" Dec 11 08:21:44 crc kubenswrapper[4881]: I1211 08:21:44.473480 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-j9mc4" podStartSLOduration=69.596133201 podStartE2EDuration="1m32.473472452s" podCreationTimestamp="2025-12-11 08:20:12 +0000 UTC" firstStartedPulling="2025-12-11 08:20:13.999594714 +0000 UTC m=+262.376963411" lastFinishedPulling="2025-12-11 08:20:36.876933945 +0000 UTC m=+285.254302662" observedRunningTime="2025-12-11 08:20:53.189247935 +0000 UTC m=+301.566616632" watchObservedRunningTime="2025-12-11 08:21:44.473472452 +0000 UTC m=+352.850841159" Dec 11 08:21:44 crc kubenswrapper[4881]: I1211 08:21:44.480513 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-zqwb5" podStartSLOduration=69.591831375 podStartE2EDuration="1m33.480482413s" podCreationTimestamp="2025-12-11 08:20:11 +0000 UTC" firstStartedPulling="2025-12-11 08:20:12.987676033 +0000 UTC m=+261.365044730" lastFinishedPulling="2025-12-11 08:20:36.876327071 +0000 UTC m=+285.253695768" observedRunningTime="2025-12-11 08:20:53.089137622 +0000 UTC m=+301.466506319" watchObservedRunningTime="2025-12-11 08:21:44.480482413 +0000 UTC m=+352.857851140" Dec 11 08:21:44 crc kubenswrapper[4881]: I1211 08:21:44.480803 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-74d77" podStartSLOduration=86.008352561 podStartE2EDuration="1m34.480789751s" podCreationTimestamp="2025-12-11 08:20:10 +0000 UTC" firstStartedPulling="2025-12-11 08:20:11.981752376 +0000 UTC m=+260.359121063" lastFinishedPulling="2025-12-11 08:20:20.454189556 +0000 UTC m=+268.831558253" observedRunningTime="2025-12-11 08:20:53.147377165 +0000 UTC m=+301.524745862" watchObservedRunningTime="2025-12-11 08:21:44.480789751 +0000 UTC m=+352.858158478" Dec 11 08:21:44 crc kubenswrapper[4881]: I1211 08:21:44.486708 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" podStartSLOduration=86.486683265 podStartE2EDuration="1m26.486683265s" podCreationTimestamp="2025-12-11 08:20:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 08:20:53.198062586 +0000 UTC m=+301.575431283" watchObservedRunningTime="2025-12-11 08:21:44.486683265 +0000 UTC m=+352.864051992" Dec 11 08:21:44 crc kubenswrapper[4881]: I1211 08:21:44.487397 4881 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-79b8l","openshift-kube-apiserver/kube-apiserver-crc"] Dec 11 08:21:44 crc kubenswrapper[4881]: I1211 08:21:44.487470 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Dec 11 08:21:44 crc kubenswrapper[4881]: I1211 08:21:44.494635 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 11 08:21:44 crc kubenswrapper[4881]: I1211 08:21:44.515620 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-crc" podStartSLOduration=51.515600961 podStartE2EDuration="51.515600961s" podCreationTimestamp="2025-12-11 08:20:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 08:21:44.510452096 +0000 UTC m=+352.887820803" watchObservedRunningTime="2025-12-11 08:21:44.515600961 +0000 UTC m=+352.892969678" Dec 11 08:21:44 crc kubenswrapper[4881]: I1211 08:21:44.701709 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"serving-cert" Dec 11 08:21:45 crc kubenswrapper[4881]: I1211 08:21:45.014737 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fbc88e77-4757-426f-9212-8e4c3d26b8e0" path="/var/lib/kubelet/pods/fbc88e77-4757-426f-9212-8e4c3d26b8e0/volumes" Dec 11 08:21:45 crc kubenswrapper[4881]: I1211 08:21:45.088209 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"serving-cert" Dec 11 08:21:45 crc kubenswrapper[4881]: I1211 08:21:45.135307 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-node-metrics-cert" Dec 11 08:21:45 crc kubenswrapper[4881]: I1211 08:21:45.175943 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"ovnkube-identity-cm" Dec 11 08:21:45 crc kubenswrapper[4881]: I1211 08:21:45.405429 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-secret" Dec 11 08:21:45 crc kubenswrapper[4881]: I1211 08:21:45.487072 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication/oauth-openshift-846dc6fc5d-rv7gt"] Dec 11 08:21:45 crc kubenswrapper[4881]: E1211 08:21:45.487434 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="eb7312c9-9c92-429a-8c3a-d86d8196564d" containerName="installer" Dec 11 08:21:45 crc kubenswrapper[4881]: I1211 08:21:45.487467 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="eb7312c9-9c92-429a-8c3a-d86d8196564d" containerName="installer" Dec 11 08:21:45 crc kubenswrapper[4881]: E1211 08:21:45.487490 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fbc88e77-4757-426f-9212-8e4c3d26b8e0" containerName="oauth-openshift" Dec 11 08:21:45 crc kubenswrapper[4881]: I1211 08:21:45.487502 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="fbc88e77-4757-426f-9212-8e4c3d26b8e0" containerName="oauth-openshift" Dec 11 08:21:45 crc kubenswrapper[4881]: I1211 08:21:45.487655 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="eb7312c9-9c92-429a-8c3a-d86d8196564d" containerName="installer" Dec 11 08:21:45 crc kubenswrapper[4881]: I1211 08:21:45.487674 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="fbc88e77-4757-426f-9212-8e4c3d26b8e0" containerName="oauth-openshift" Dec 11 08:21:45 crc kubenswrapper[4881]: I1211 08:21:45.488245 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-846dc6fc5d-rv7gt" Dec 11 08:21:45 crc kubenswrapper[4881]: I1211 08:21:45.490569 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-router-certs" Dec 11 08:21:45 crc kubenswrapper[4881]: I1211 08:21:45.492648 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-error" Dec 11 08:21:45 crc kubenswrapper[4881]: I1211 08:21:45.493559 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-serving-cert" Dec 11 08:21:45 crc kubenswrapper[4881]: I1211 08:21:45.493994 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-cliconfig" Dec 11 08:21:45 crc kubenswrapper[4881]: I1211 08:21:45.494560 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-provider-selection" Dec 11 08:21:45 crc kubenswrapper[4881]: I1211 08:21:45.495558 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-idp-0-file-data" Dec 11 08:21:45 crc kubenswrapper[4881]: I1211 08:21:45.495642 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-service-ca" Dec 11 08:21:45 crc kubenswrapper[4881]: I1211 08:21:45.495910 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"audit" Dec 11 08:21:45 crc kubenswrapper[4881]: I1211 08:21:45.499899 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"kube-root-ca.crt" Dec 11 08:21:45 crc kubenswrapper[4881]: I1211 08:21:45.500067 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"oauth-openshift-dockercfg-znhcc" Dec 11 08:21:45 crc kubenswrapper[4881]: I1211 08:21:45.500464 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-session" Dec 11 08:21:45 crc kubenswrapper[4881]: I1211 08:21:45.502372 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"openshift-service-ca.crt" Dec 11 08:21:45 crc kubenswrapper[4881]: I1211 08:21:45.508776 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-login" Dec 11 08:21:45 crc kubenswrapper[4881]: I1211 08:21:45.513168 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" Dec 11 08:21:45 crc kubenswrapper[4881]: I1211 08:21:45.518094 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-846dc6fc5d-rv7gt"] Dec 11 08:21:45 crc kubenswrapper[4881]: I1211 08:21:45.523708 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-ocp-branding-template" Dec 11 08:21:45 crc kubenswrapper[4881]: I1211 08:21:45.523807 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"openshift-service-ca.crt" Dec 11 08:21:45 crc kubenswrapper[4881]: I1211 08:21:45.543825 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"kube-root-ca.crt" Dec 11 08:21:45 crc kubenswrapper[4881]: I1211 08:21:45.551175 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Dec 11 08:21:45 crc kubenswrapper[4881]: I1211 08:21:45.603351 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/c3868292-0936-4979-bd1f-c9406decb7a8-v4-0-config-system-serving-cert\") pod \"oauth-openshift-846dc6fc5d-rv7gt\" (UID: \"c3868292-0936-4979-bd1f-c9406decb7a8\") " pod="openshift-authentication/oauth-openshift-846dc6fc5d-rv7gt" Dec 11 08:21:45 crc kubenswrapper[4881]: I1211 08:21:45.603409 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/c3868292-0936-4979-bd1f-c9406decb7a8-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-846dc6fc5d-rv7gt\" (UID: \"c3868292-0936-4979-bd1f-c9406decb7a8\") " pod="openshift-authentication/oauth-openshift-846dc6fc5d-rv7gt" Dec 11 08:21:45 crc kubenswrapper[4881]: I1211 08:21:45.603451 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/c3868292-0936-4979-bd1f-c9406decb7a8-v4-0-config-system-cliconfig\") pod \"oauth-openshift-846dc6fc5d-rv7gt\" (UID: \"c3868292-0936-4979-bd1f-c9406decb7a8\") " pod="openshift-authentication/oauth-openshift-846dc6fc5d-rv7gt" Dec 11 08:21:45 crc kubenswrapper[4881]: I1211 08:21:45.603575 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/c3868292-0936-4979-bd1f-c9406decb7a8-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-846dc6fc5d-rv7gt\" (UID: \"c3868292-0936-4979-bd1f-c9406decb7a8\") " pod="openshift-authentication/oauth-openshift-846dc6fc5d-rv7gt" Dec 11 08:21:45 crc kubenswrapper[4881]: I1211 08:21:45.603634 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zphkz\" (UniqueName: \"kubernetes.io/projected/c3868292-0936-4979-bd1f-c9406decb7a8-kube-api-access-zphkz\") pod \"oauth-openshift-846dc6fc5d-rv7gt\" (UID: \"c3868292-0936-4979-bd1f-c9406decb7a8\") " pod="openshift-authentication/oauth-openshift-846dc6fc5d-rv7gt" Dec 11 08:21:45 crc kubenswrapper[4881]: I1211 08:21:45.603701 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/c3868292-0936-4979-bd1f-c9406decb7a8-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-846dc6fc5d-rv7gt\" (UID: \"c3868292-0936-4979-bd1f-c9406decb7a8\") " pod="openshift-authentication/oauth-openshift-846dc6fc5d-rv7gt" Dec 11 08:21:45 crc kubenswrapper[4881]: I1211 08:21:45.603758 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c3868292-0936-4979-bd1f-c9406decb7a8-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-846dc6fc5d-rv7gt\" (UID: \"c3868292-0936-4979-bd1f-c9406decb7a8\") " pod="openshift-authentication/oauth-openshift-846dc6fc5d-rv7gt" Dec 11 08:21:45 crc kubenswrapper[4881]: I1211 08:21:45.603798 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/c3868292-0936-4979-bd1f-c9406decb7a8-v4-0-config-user-template-login\") pod \"oauth-openshift-846dc6fc5d-rv7gt\" (UID: \"c3868292-0936-4979-bd1f-c9406decb7a8\") " pod="openshift-authentication/oauth-openshift-846dc6fc5d-rv7gt" Dec 11 08:21:45 crc kubenswrapper[4881]: I1211 08:21:45.603821 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/c3868292-0936-4979-bd1f-c9406decb7a8-audit-policies\") pod \"oauth-openshift-846dc6fc5d-rv7gt\" (UID: \"c3868292-0936-4979-bd1f-c9406decb7a8\") " pod="openshift-authentication/oauth-openshift-846dc6fc5d-rv7gt" Dec 11 08:21:45 crc kubenswrapper[4881]: I1211 08:21:45.603848 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/c3868292-0936-4979-bd1f-c9406decb7a8-v4-0-config-user-template-error\") pod \"oauth-openshift-846dc6fc5d-rv7gt\" (UID: \"c3868292-0936-4979-bd1f-c9406decb7a8\") " pod="openshift-authentication/oauth-openshift-846dc6fc5d-rv7gt" Dec 11 08:21:45 crc kubenswrapper[4881]: I1211 08:21:45.603876 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/c3868292-0936-4979-bd1f-c9406decb7a8-audit-dir\") pod \"oauth-openshift-846dc6fc5d-rv7gt\" (UID: \"c3868292-0936-4979-bd1f-c9406decb7a8\") " pod="openshift-authentication/oauth-openshift-846dc6fc5d-rv7gt" Dec 11 08:21:45 crc kubenswrapper[4881]: I1211 08:21:45.603915 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/c3868292-0936-4979-bd1f-c9406decb7a8-v4-0-config-system-session\") pod \"oauth-openshift-846dc6fc5d-rv7gt\" (UID: \"c3868292-0936-4979-bd1f-c9406decb7a8\") " pod="openshift-authentication/oauth-openshift-846dc6fc5d-rv7gt" Dec 11 08:21:45 crc kubenswrapper[4881]: I1211 08:21:45.603948 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/c3868292-0936-4979-bd1f-c9406decb7a8-v4-0-config-system-router-certs\") pod \"oauth-openshift-846dc6fc5d-rv7gt\" (UID: \"c3868292-0936-4979-bd1f-c9406decb7a8\") " pod="openshift-authentication/oauth-openshift-846dc6fc5d-rv7gt" Dec 11 08:21:45 crc kubenswrapper[4881]: I1211 08:21:45.603998 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/c3868292-0936-4979-bd1f-c9406decb7a8-v4-0-config-system-service-ca\") pod \"oauth-openshift-846dc6fc5d-rv7gt\" (UID: \"c3868292-0936-4979-bd1f-c9406decb7a8\") " pod="openshift-authentication/oauth-openshift-846dc6fc5d-rv7gt" Dec 11 08:21:45 crc kubenswrapper[4881]: I1211 08:21:45.705491 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c3868292-0936-4979-bd1f-c9406decb7a8-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-846dc6fc5d-rv7gt\" (UID: \"c3868292-0936-4979-bd1f-c9406decb7a8\") " pod="openshift-authentication/oauth-openshift-846dc6fc5d-rv7gt" Dec 11 08:21:45 crc kubenswrapper[4881]: I1211 08:21:45.705611 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/c3868292-0936-4979-bd1f-c9406decb7a8-v4-0-config-user-template-login\") pod \"oauth-openshift-846dc6fc5d-rv7gt\" (UID: \"c3868292-0936-4979-bd1f-c9406decb7a8\") " pod="openshift-authentication/oauth-openshift-846dc6fc5d-rv7gt" Dec 11 08:21:45 crc kubenswrapper[4881]: I1211 08:21:45.705672 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/c3868292-0936-4979-bd1f-c9406decb7a8-audit-policies\") pod \"oauth-openshift-846dc6fc5d-rv7gt\" (UID: \"c3868292-0936-4979-bd1f-c9406decb7a8\") " pod="openshift-authentication/oauth-openshift-846dc6fc5d-rv7gt" Dec 11 08:21:45 crc kubenswrapper[4881]: I1211 08:21:45.705714 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/c3868292-0936-4979-bd1f-c9406decb7a8-v4-0-config-user-template-error\") pod \"oauth-openshift-846dc6fc5d-rv7gt\" (UID: \"c3868292-0936-4979-bd1f-c9406decb7a8\") " pod="openshift-authentication/oauth-openshift-846dc6fc5d-rv7gt" Dec 11 08:21:45 crc kubenswrapper[4881]: I1211 08:21:45.705748 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/c3868292-0936-4979-bd1f-c9406decb7a8-audit-dir\") pod \"oauth-openshift-846dc6fc5d-rv7gt\" (UID: \"c3868292-0936-4979-bd1f-c9406decb7a8\") " pod="openshift-authentication/oauth-openshift-846dc6fc5d-rv7gt" Dec 11 08:21:45 crc kubenswrapper[4881]: I1211 08:21:45.705789 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/c3868292-0936-4979-bd1f-c9406decb7a8-v4-0-config-system-session\") pod \"oauth-openshift-846dc6fc5d-rv7gt\" (UID: \"c3868292-0936-4979-bd1f-c9406decb7a8\") " pod="openshift-authentication/oauth-openshift-846dc6fc5d-rv7gt" Dec 11 08:21:45 crc kubenswrapper[4881]: I1211 08:21:45.705828 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/c3868292-0936-4979-bd1f-c9406decb7a8-v4-0-config-system-router-certs\") pod \"oauth-openshift-846dc6fc5d-rv7gt\" (UID: \"c3868292-0936-4979-bd1f-c9406decb7a8\") " pod="openshift-authentication/oauth-openshift-846dc6fc5d-rv7gt" Dec 11 08:21:45 crc kubenswrapper[4881]: I1211 08:21:45.705885 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/c3868292-0936-4979-bd1f-c9406decb7a8-v4-0-config-system-service-ca\") pod \"oauth-openshift-846dc6fc5d-rv7gt\" (UID: \"c3868292-0936-4979-bd1f-c9406decb7a8\") " pod="openshift-authentication/oauth-openshift-846dc6fc5d-rv7gt" Dec 11 08:21:45 crc kubenswrapper[4881]: I1211 08:21:45.705998 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/c3868292-0936-4979-bd1f-c9406decb7a8-v4-0-config-system-serving-cert\") pod \"oauth-openshift-846dc6fc5d-rv7gt\" (UID: \"c3868292-0936-4979-bd1f-c9406decb7a8\") " pod="openshift-authentication/oauth-openshift-846dc6fc5d-rv7gt" Dec 11 08:21:45 crc kubenswrapper[4881]: I1211 08:21:45.706034 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/c3868292-0936-4979-bd1f-c9406decb7a8-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-846dc6fc5d-rv7gt\" (UID: \"c3868292-0936-4979-bd1f-c9406decb7a8\") " pod="openshift-authentication/oauth-openshift-846dc6fc5d-rv7gt" Dec 11 08:21:45 crc kubenswrapper[4881]: I1211 08:21:45.706084 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/c3868292-0936-4979-bd1f-c9406decb7a8-v4-0-config-system-cliconfig\") pod \"oauth-openshift-846dc6fc5d-rv7gt\" (UID: \"c3868292-0936-4979-bd1f-c9406decb7a8\") " pod="openshift-authentication/oauth-openshift-846dc6fc5d-rv7gt" Dec 11 08:21:45 crc kubenswrapper[4881]: I1211 08:21:45.706122 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/c3868292-0936-4979-bd1f-c9406decb7a8-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-846dc6fc5d-rv7gt\" (UID: \"c3868292-0936-4979-bd1f-c9406decb7a8\") " pod="openshift-authentication/oauth-openshift-846dc6fc5d-rv7gt" Dec 11 08:21:45 crc kubenswrapper[4881]: I1211 08:21:45.706156 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zphkz\" (UniqueName: \"kubernetes.io/projected/c3868292-0936-4979-bd1f-c9406decb7a8-kube-api-access-zphkz\") pod \"oauth-openshift-846dc6fc5d-rv7gt\" (UID: \"c3868292-0936-4979-bd1f-c9406decb7a8\") " pod="openshift-authentication/oauth-openshift-846dc6fc5d-rv7gt" Dec 11 08:21:45 crc kubenswrapper[4881]: I1211 08:21:45.706204 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/c3868292-0936-4979-bd1f-c9406decb7a8-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-846dc6fc5d-rv7gt\" (UID: \"c3868292-0936-4979-bd1f-c9406decb7a8\") " pod="openshift-authentication/oauth-openshift-846dc6fc5d-rv7gt" Dec 11 08:21:45 crc kubenswrapper[4881]: I1211 08:21:45.707044 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/c3868292-0936-4979-bd1f-c9406decb7a8-audit-dir\") pod \"oauth-openshift-846dc6fc5d-rv7gt\" (UID: \"c3868292-0936-4979-bd1f-c9406decb7a8\") " pod="openshift-authentication/oauth-openshift-846dc6fc5d-rv7gt" Dec 11 08:21:45 crc kubenswrapper[4881]: I1211 08:21:45.707724 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/c3868292-0936-4979-bd1f-c9406decb7a8-audit-policies\") pod \"oauth-openshift-846dc6fc5d-rv7gt\" (UID: \"c3868292-0936-4979-bd1f-c9406decb7a8\") " pod="openshift-authentication/oauth-openshift-846dc6fc5d-rv7gt" Dec 11 08:21:45 crc kubenswrapper[4881]: I1211 08:21:45.708123 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/c3868292-0936-4979-bd1f-c9406decb7a8-v4-0-config-system-service-ca\") pod \"oauth-openshift-846dc6fc5d-rv7gt\" (UID: \"c3868292-0936-4979-bd1f-c9406decb7a8\") " pod="openshift-authentication/oauth-openshift-846dc6fc5d-rv7gt" Dec 11 08:21:45 crc kubenswrapper[4881]: I1211 08:21:45.708179 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/c3868292-0936-4979-bd1f-c9406decb7a8-v4-0-config-system-cliconfig\") pod \"oauth-openshift-846dc6fc5d-rv7gt\" (UID: \"c3868292-0936-4979-bd1f-c9406decb7a8\") " pod="openshift-authentication/oauth-openshift-846dc6fc5d-rv7gt" Dec 11 08:21:45 crc kubenswrapper[4881]: I1211 08:21:45.708727 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c3868292-0936-4979-bd1f-c9406decb7a8-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-846dc6fc5d-rv7gt\" (UID: \"c3868292-0936-4979-bd1f-c9406decb7a8\") " pod="openshift-authentication/oauth-openshift-846dc6fc5d-rv7gt" Dec 11 08:21:45 crc kubenswrapper[4881]: I1211 08:21:45.717314 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/c3868292-0936-4979-bd1f-c9406decb7a8-v4-0-config-user-template-error\") pod \"oauth-openshift-846dc6fc5d-rv7gt\" (UID: \"c3868292-0936-4979-bd1f-c9406decb7a8\") " pod="openshift-authentication/oauth-openshift-846dc6fc5d-rv7gt" Dec 11 08:21:45 crc kubenswrapper[4881]: I1211 08:21:45.717480 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/c3868292-0936-4979-bd1f-c9406decb7a8-v4-0-config-system-session\") pod \"oauth-openshift-846dc6fc5d-rv7gt\" (UID: \"c3868292-0936-4979-bd1f-c9406decb7a8\") " pod="openshift-authentication/oauth-openshift-846dc6fc5d-rv7gt" Dec 11 08:21:45 crc kubenswrapper[4881]: I1211 08:21:45.717504 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/c3868292-0936-4979-bd1f-c9406decb7a8-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-846dc6fc5d-rv7gt\" (UID: \"c3868292-0936-4979-bd1f-c9406decb7a8\") " pod="openshift-authentication/oauth-openshift-846dc6fc5d-rv7gt" Dec 11 08:21:45 crc kubenswrapper[4881]: I1211 08:21:45.717819 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/c3868292-0936-4979-bd1f-c9406decb7a8-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-846dc6fc5d-rv7gt\" (UID: \"c3868292-0936-4979-bd1f-c9406decb7a8\") " pod="openshift-authentication/oauth-openshift-846dc6fc5d-rv7gt" Dec 11 08:21:45 crc kubenswrapper[4881]: I1211 08:21:45.717922 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/c3868292-0936-4979-bd1f-c9406decb7a8-v4-0-config-system-serving-cert\") pod \"oauth-openshift-846dc6fc5d-rv7gt\" (UID: \"c3868292-0936-4979-bd1f-c9406decb7a8\") " pod="openshift-authentication/oauth-openshift-846dc6fc5d-rv7gt" Dec 11 08:21:45 crc kubenswrapper[4881]: I1211 08:21:45.717937 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/c3868292-0936-4979-bd1f-c9406decb7a8-v4-0-config-system-router-certs\") pod \"oauth-openshift-846dc6fc5d-rv7gt\" (UID: \"c3868292-0936-4979-bd1f-c9406decb7a8\") " pod="openshift-authentication/oauth-openshift-846dc6fc5d-rv7gt" Dec 11 08:21:45 crc kubenswrapper[4881]: I1211 08:21:45.719503 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/c3868292-0936-4979-bd1f-c9406decb7a8-v4-0-config-user-template-login\") pod \"oauth-openshift-846dc6fc5d-rv7gt\" (UID: \"c3868292-0936-4979-bd1f-c9406decb7a8\") " pod="openshift-authentication/oauth-openshift-846dc6fc5d-rv7gt" Dec 11 08:21:45 crc kubenswrapper[4881]: I1211 08:21:45.720007 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/c3868292-0936-4979-bd1f-c9406decb7a8-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-846dc6fc5d-rv7gt\" (UID: \"c3868292-0936-4979-bd1f-c9406decb7a8\") " pod="openshift-authentication/oauth-openshift-846dc6fc5d-rv7gt" Dec 11 08:21:45 crc kubenswrapper[4881]: I1211 08:21:45.729299 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zphkz\" (UniqueName: \"kubernetes.io/projected/c3868292-0936-4979-bd1f-c9406decb7a8-kube-api-access-zphkz\") pod \"oauth-openshift-846dc6fc5d-rv7gt\" (UID: \"c3868292-0936-4979-bd1f-c9406decb7a8\") " pod="openshift-authentication/oauth-openshift-846dc6fc5d-rv7gt" Dec 11 08:21:45 crc kubenswrapper[4881]: I1211 08:21:45.751161 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"ingress-operator-dockercfg-7lnqk" Dec 11 08:21:45 crc kubenswrapper[4881]: I1211 08:21:45.823916 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-846dc6fc5d-rv7gt" Dec 11 08:21:46 crc kubenswrapper[4881]: I1211 08:21:46.029539 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"kube-root-ca.crt" Dec 11 08:21:46 crc kubenswrapper[4881]: I1211 08:21:46.053513 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"samples-operator-tls" Dec 11 08:21:46 crc kubenswrapper[4881]: I1211 08:21:46.091398 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"image-import-ca" Dec 11 08:21:46 crc kubenswrapper[4881]: I1211 08:21:46.236193 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"kube-root-ca.crt" Dec 11 08:21:46 crc kubenswrapper[4881]: I1211 08:21:46.265240 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-846dc6fc5d-rv7gt"] Dec 11 08:21:46 crc kubenswrapper[4881]: W1211 08:21:46.278492 4881 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc3868292_0936_4979_bd1f_c9406decb7a8.slice/crio-0e8cae9047319e588b5b15a13a482c64c005d0dc19e3dc4d0682ac4336de4e98 WatchSource:0}: Error finding container 0e8cae9047319e588b5b15a13a482c64c005d0dc19e3dc4d0682ac4336de4e98: Status 404 returned error can't find the container with id 0e8cae9047319e588b5b15a13a482c64c005d0dc19e3dc4d0682ac4336de4e98 Dec 11 08:21:46 crc kubenswrapper[4881]: I1211 08:21:46.425976 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-846dc6fc5d-rv7gt" event={"ID":"c3868292-0936-4979-bd1f-c9406decb7a8","Type":"ContainerStarted","Data":"0e8cae9047319e588b5b15a13a482c64c005d0dc19e3dc4d0682ac4336de4e98"} Dec 11 08:21:46 crc kubenswrapper[4881]: I1211 08:21:46.602376 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"machine-approver-config" Dec 11 08:21:46 crc kubenswrapper[4881]: I1211 08:21:46.615992 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"env-overrides" Dec 11 08:21:46 crc kubenswrapper[4881]: I1211 08:21:46.667053 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"openshift-service-ca.crt" Dec 11 08:21:46 crc kubenswrapper[4881]: I1211 08:21:46.778734 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" Dec 11 08:21:46 crc kubenswrapper[4881]: I1211 08:21:46.861313 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"encryption-config-1" Dec 11 08:21:47 crc kubenswrapper[4881]: I1211 08:21:47.171081 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"default-dockercfg-chnjx" Dec 11 08:21:47 crc kubenswrapper[4881]: I1211 08:21:47.243140 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"canary-serving-cert" Dec 11 08:21:47 crc kubenswrapper[4881]: I1211 08:21:47.433377 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-846dc6fc5d-rv7gt" event={"ID":"c3868292-0936-4979-bd1f-c9406decb7a8","Type":"ContainerStarted","Data":"0d07146cf217b1a8dc7bb941752959867ef56e33d6e739f7d70c99efd4af9d4c"} Dec 11 08:21:47 crc kubenswrapper[4881]: I1211 08:21:47.433779 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-846dc6fc5d-rv7gt" Dec 11 08:21:47 crc kubenswrapper[4881]: I1211 08:21:47.440057 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-846dc6fc5d-rv7gt" Dec 11 08:21:47 crc kubenswrapper[4881]: I1211 08:21:47.463614 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication/oauth-openshift-846dc6fc5d-rv7gt" podStartSLOduration=105.463587123 podStartE2EDuration="1m45.463587123s" podCreationTimestamp="2025-12-11 08:20:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 08:21:47.458164561 +0000 UTC m=+355.835533288" watchObservedRunningTime="2025-12-11 08:21:47.463587123 +0000 UTC m=+355.840955860" Dec 11 08:21:47 crc kubenswrapper[4881]: I1211 08:21:47.717433 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"openshift-service-ca.crt" Dec 11 08:21:47 crc kubenswrapper[4881]: I1211 08:21:47.747762 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-tls" Dec 11 08:21:47 crc kubenswrapper[4881]: I1211 08:21:47.848722 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"kube-root-ca.crt" Dec 11 08:21:48 crc kubenswrapper[4881]: I1211 08:21:48.083004 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" Dec 11 08:21:48 crc kubenswrapper[4881]: I1211 08:21:48.144900 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Dec 11 08:21:48 crc kubenswrapper[4881]: I1211 08:21:48.172636 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-tls" Dec 11 08:21:48 crc kubenswrapper[4881]: I1211 08:21:48.180104 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-control-plane-dockercfg-gs7dd" Dec 11 08:21:48 crc kubenswrapper[4881]: I1211 08:21:48.359093 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-metrics-certs-default" Dec 11 08:21:48 crc kubenswrapper[4881]: I1211 08:21:48.388965 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"audit-1" Dec 11 08:21:48 crc kubenswrapper[4881]: I1211 08:21:48.403817 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"kube-root-ca.crt" Dec 11 08:21:48 crc kubenswrapper[4881]: I1211 08:21:48.412473 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-oauth-config" Dec 11 08:21:48 crc kubenswrapper[4881]: I1211 08:21:48.515894 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"kube-root-ca.crt" Dec 11 08:21:48 crc kubenswrapper[4881]: I1211 08:21:48.710398 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Dec 11 08:21:48 crc kubenswrapper[4881]: I1211 08:21:48.827188 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Dec 11 08:21:49 crc kubenswrapper[4881]: I1211 08:21:49.264179 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" Dec 11 08:21:49 crc kubenswrapper[4881]: I1211 08:21:49.463964 4881 kubelet.go:2431] "SyncLoop REMOVE" source="file" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Dec 11 08:21:49 crc kubenswrapper[4881]: I1211 08:21:49.464175 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" containerID="cri-o://78004bcb639503927632b34934fb8fa69ba836368d76b4c31a4e7bbf9c48d704" gracePeriod=5 Dec 11 08:21:49 crc kubenswrapper[4881]: I1211 08:21:49.508917 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-ca-bundle" Dec 11 08:21:49 crc kubenswrapper[4881]: I1211 08:21:49.746928 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"kube-root-ca.crt" Dec 11 08:21:49 crc kubenswrapper[4881]: I1211 08:21:49.868874 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-root-ca.crt" Dec 11 08:21:49 crc kubenswrapper[4881]: I1211 08:21:49.974879 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-dockercfg-5nsgg" Dec 11 08:21:50 crc kubenswrapper[4881]: I1211 08:21:50.135585 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"openshift-service-ca.crt" Dec 11 08:21:50 crc kubenswrapper[4881]: I1211 08:21:50.146397 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"oauth-apiserver-sa-dockercfg-6r2bq" Dec 11 08:21:50 crc kubenswrapper[4881]: I1211 08:21:50.238461 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"openshift-service-ca.crt" Dec 11 08:21:50 crc kubenswrapper[4881]: I1211 08:21:50.407656 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"console-operator-config" Dec 11 08:21:50 crc kubenswrapper[4881]: I1211 08:21:50.505765 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-operator-dockercfg-98p87" Dec 11 08:21:50 crc kubenswrapper[4881]: I1211 08:21:50.812654 4881 reflector.go:368] Caches populated for *v1.RuntimeClass from k8s.io/client-go/informers/factory.go:160 Dec 11 08:21:51 crc kubenswrapper[4881]: I1211 08:21:51.232832 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Dec 11 08:21:51 crc kubenswrapper[4881]: I1211 08:21:51.584520 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"trusted-ca-bundle" Dec 11 08:21:51 crc kubenswrapper[4881]: I1211 08:21:51.805460 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Dec 11 08:21:52 crc kubenswrapper[4881]: I1211 08:21:52.339506 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Dec 11 08:21:52 crc kubenswrapper[4881]: I1211 08:21:52.452046 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"openshift-service-ca.crt" Dec 11 08:21:52 crc kubenswrapper[4881]: I1211 08:21:52.653187 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"node-resolver-dockercfg-kz9s7" Dec 11 08:21:54 crc kubenswrapper[4881]: I1211 08:21:54.184300 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-dockercfg-qt55r" Dec 11 08:21:55 crc kubenswrapper[4881]: I1211 08:21:55.074716 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-startup-monitor-crc_f85e55b1a89d02b0cb034b1ea31ed45a/startup-monitor/0.log" Dec 11 08:21:55 crc kubenswrapper[4881]: I1211 08:21:55.075167 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Dec 11 08:21:55 crc kubenswrapper[4881]: I1211 08:21:55.236234 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Dec 11 08:21:55 crc kubenswrapper[4881]: I1211 08:21:55.236426 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock" (OuterVolumeSpecName: "var-lock") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "var-lock". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 11 08:21:55 crc kubenswrapper[4881]: I1211 08:21:55.236528 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests" (OuterVolumeSpecName: "manifests") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "manifests". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 11 08:21:55 crc kubenswrapper[4881]: I1211 08:21:55.236574 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Dec 11 08:21:55 crc kubenswrapper[4881]: I1211 08:21:55.236639 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Dec 11 08:21:55 crc kubenswrapper[4881]: I1211 08:21:55.236673 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Dec 11 08:21:55 crc kubenswrapper[4881]: I1211 08:21:55.236717 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Dec 11 08:21:55 crc kubenswrapper[4881]: I1211 08:21:55.237159 4881 reconciler_common.go:293] "Volume detached for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") on node \"crc\" DevicePath \"\"" Dec 11 08:21:55 crc kubenswrapper[4881]: I1211 08:21:55.237185 4881 reconciler_common.go:293] "Volume detached for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") on node \"crc\" DevicePath \"\"" Dec 11 08:21:55 crc kubenswrapper[4881]: I1211 08:21:55.237394 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log" (OuterVolumeSpecName: "var-log") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "var-log". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 11 08:21:55 crc kubenswrapper[4881]: I1211 08:21:55.237418 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir" (OuterVolumeSpecName: "resource-dir") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 11 08:21:55 crc kubenswrapper[4881]: I1211 08:21:55.247228 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir" (OuterVolumeSpecName: "pod-resource-dir") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "pod-resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 11 08:21:55 crc kubenswrapper[4881]: I1211 08:21:55.339417 4881 reconciler_common.go:293] "Volume detached for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") on node \"crc\" DevicePath \"\"" Dec 11 08:21:55 crc kubenswrapper[4881]: I1211 08:21:55.339492 4881 reconciler_common.go:293] "Volume detached for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") on node \"crc\" DevicePath \"\"" Dec 11 08:21:55 crc kubenswrapper[4881]: I1211 08:21:55.339519 4881 reconciler_common.go:293] "Volume detached for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") on node \"crc\" DevicePath \"\"" Dec 11 08:21:55 crc kubenswrapper[4881]: I1211 08:21:55.477149 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-startup-monitor-crc_f85e55b1a89d02b0cb034b1ea31ed45a/startup-monitor/0.log" Dec 11 08:21:55 crc kubenswrapper[4881]: I1211 08:21:55.477213 4881 generic.go:334] "Generic (PLEG): container finished" podID="f85e55b1a89d02b0cb034b1ea31ed45a" containerID="78004bcb639503927632b34934fb8fa69ba836368d76b4c31a4e7bbf9c48d704" exitCode=137 Dec 11 08:21:55 crc kubenswrapper[4881]: I1211 08:21:55.477269 4881 scope.go:117] "RemoveContainer" containerID="78004bcb639503927632b34934fb8fa69ba836368d76b4c31a4e7bbf9c48d704" Dec 11 08:21:55 crc kubenswrapper[4881]: I1211 08:21:55.477427 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Dec 11 08:21:55 crc kubenswrapper[4881]: I1211 08:21:55.505939 4881 scope.go:117] "RemoveContainer" containerID="78004bcb639503927632b34934fb8fa69ba836368d76b4c31a4e7bbf9c48d704" Dec 11 08:21:55 crc kubenswrapper[4881]: E1211 08:21:55.506613 4881 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"78004bcb639503927632b34934fb8fa69ba836368d76b4c31a4e7bbf9c48d704\": container with ID starting with 78004bcb639503927632b34934fb8fa69ba836368d76b4c31a4e7bbf9c48d704 not found: ID does not exist" containerID="78004bcb639503927632b34934fb8fa69ba836368d76b4c31a4e7bbf9c48d704" Dec 11 08:21:55 crc kubenswrapper[4881]: I1211 08:21:55.506695 4881 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"78004bcb639503927632b34934fb8fa69ba836368d76b4c31a4e7bbf9c48d704"} err="failed to get container status \"78004bcb639503927632b34934fb8fa69ba836368d76b4c31a4e7bbf9c48d704\": rpc error: code = NotFound desc = could not find container \"78004bcb639503927632b34934fb8fa69ba836368d76b4c31a4e7bbf9c48d704\": container with ID starting with 78004bcb639503927632b34934fb8fa69ba836368d76b4c31a4e7bbf9c48d704 not found: ID does not exist" Dec 11 08:21:57 crc kubenswrapper[4881]: I1211 08:21:57.013856 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" path="/var/lib/kubelet/pods/f85e55b1a89d02b0cb034b1ea31ed45a/volumes" Dec 11 08:21:57 crc kubenswrapper[4881]: I1211 08:21:57.014720 4881 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" podUID="" Dec 11 08:21:57 crc kubenswrapper[4881]: I1211 08:21:57.028136 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Dec 11 08:21:57 crc kubenswrapper[4881]: I1211 08:21:57.028392 4881 kubelet.go:2649] "Unable to find pod for mirror pod, skipping" mirrorPod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" mirrorPodUID="ebd940a6-6032-4465-b570-5c62f3c76ab5" Dec 11 08:21:57 crc kubenswrapper[4881]: I1211 08:21:57.032015 4881 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Dec 11 08:21:57 crc kubenswrapper[4881]: I1211 08:21:57.032050 4881 kubelet.go:2673] "Unable to find pod for mirror pod, skipping" mirrorPod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" mirrorPodUID="ebd940a6-6032-4465-b570-5c62f3c76ab5" Dec 11 08:21:59 crc kubenswrapper[4881]: I1211 08:21:59.401020 4881 patch_prober.go:28] interesting pod/machine-config-daemon-z9nnh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 11 08:21:59 crc kubenswrapper[4881]: I1211 08:21:59.401138 4881 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 11 08:22:06 crc kubenswrapper[4881]: I1211 08:22:06.100028 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-zl8wv"] Dec 11 08:22:06 crc kubenswrapper[4881]: I1211 08:22:06.100939 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-879f6c89f-zl8wv" podUID="69071241-3547-43b1-bf14-5bb03184a08a" containerName="controller-manager" containerID="cri-o://70ff0aaec8f648b2dc4d291830b5d486b1802873c485f4f53ac14e413786f6c1" gracePeriod=30 Dec 11 08:22:06 crc kubenswrapper[4881]: I1211 08:22:06.187410 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-882gs"] Dec 11 08:22:06 crc kubenswrapper[4881]: I1211 08:22:06.187695 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-882gs" podUID="317a7f01-2747-4994-8000-54613f522149" containerName="route-controller-manager" containerID="cri-o://58d7d7483976135ae4372f256517c1db5701fd4c7db66c21c7c91467bcfdb9c1" gracePeriod=30 Dec 11 08:22:06 crc kubenswrapper[4881]: I1211 08:22:06.447032 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-zl8wv" Dec 11 08:22:06 crc kubenswrapper[4881]: I1211 08:22:06.495189 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rjg5j\" (UniqueName: \"kubernetes.io/projected/69071241-3547-43b1-bf14-5bb03184a08a-kube-api-access-rjg5j\") pod \"69071241-3547-43b1-bf14-5bb03184a08a\" (UID: \"69071241-3547-43b1-bf14-5bb03184a08a\") " Dec 11 08:22:06 crc kubenswrapper[4881]: I1211 08:22:06.495256 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/69071241-3547-43b1-bf14-5bb03184a08a-client-ca\") pod \"69071241-3547-43b1-bf14-5bb03184a08a\" (UID: \"69071241-3547-43b1-bf14-5bb03184a08a\") " Dec 11 08:22:06 crc kubenswrapper[4881]: I1211 08:22:06.495304 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/69071241-3547-43b1-bf14-5bb03184a08a-config\") pod \"69071241-3547-43b1-bf14-5bb03184a08a\" (UID: \"69071241-3547-43b1-bf14-5bb03184a08a\") " Dec 11 08:22:06 crc kubenswrapper[4881]: I1211 08:22:06.495352 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/69071241-3547-43b1-bf14-5bb03184a08a-proxy-ca-bundles\") pod \"69071241-3547-43b1-bf14-5bb03184a08a\" (UID: \"69071241-3547-43b1-bf14-5bb03184a08a\") " Dec 11 08:22:06 crc kubenswrapper[4881]: I1211 08:22:06.495429 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/69071241-3547-43b1-bf14-5bb03184a08a-serving-cert\") pod \"69071241-3547-43b1-bf14-5bb03184a08a\" (UID: \"69071241-3547-43b1-bf14-5bb03184a08a\") " Dec 11 08:22:06 crc kubenswrapper[4881]: I1211 08:22:06.496961 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/69071241-3547-43b1-bf14-5bb03184a08a-client-ca" (OuterVolumeSpecName: "client-ca") pod "69071241-3547-43b1-bf14-5bb03184a08a" (UID: "69071241-3547-43b1-bf14-5bb03184a08a"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:22:06 crc kubenswrapper[4881]: I1211 08:22:06.497570 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/69071241-3547-43b1-bf14-5bb03184a08a-config" (OuterVolumeSpecName: "config") pod "69071241-3547-43b1-bf14-5bb03184a08a" (UID: "69071241-3547-43b1-bf14-5bb03184a08a"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:22:06 crc kubenswrapper[4881]: I1211 08:22:06.497918 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/69071241-3547-43b1-bf14-5bb03184a08a-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "69071241-3547-43b1-bf14-5bb03184a08a" (UID: "69071241-3547-43b1-bf14-5bb03184a08a"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:22:06 crc kubenswrapper[4881]: I1211 08:22:06.501395 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/69071241-3547-43b1-bf14-5bb03184a08a-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "69071241-3547-43b1-bf14-5bb03184a08a" (UID: "69071241-3547-43b1-bf14-5bb03184a08a"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:22:06 crc kubenswrapper[4881]: I1211 08:22:06.501580 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/69071241-3547-43b1-bf14-5bb03184a08a-kube-api-access-rjg5j" (OuterVolumeSpecName: "kube-api-access-rjg5j") pod "69071241-3547-43b1-bf14-5bb03184a08a" (UID: "69071241-3547-43b1-bf14-5bb03184a08a"). InnerVolumeSpecName "kube-api-access-rjg5j". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:22:06 crc kubenswrapper[4881]: I1211 08:22:06.519559 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-882gs" Dec 11 08:22:06 crc kubenswrapper[4881]: I1211 08:22:06.544147 4881 generic.go:334] "Generic (PLEG): container finished" podID="317a7f01-2747-4994-8000-54613f522149" containerID="58d7d7483976135ae4372f256517c1db5701fd4c7db66c21c7c91467bcfdb9c1" exitCode=0 Dec 11 08:22:06 crc kubenswrapper[4881]: I1211 08:22:06.544187 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-882gs" Dec 11 08:22:06 crc kubenswrapper[4881]: I1211 08:22:06.544202 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-882gs" event={"ID":"317a7f01-2747-4994-8000-54613f522149","Type":"ContainerDied","Data":"58d7d7483976135ae4372f256517c1db5701fd4c7db66c21c7c91467bcfdb9c1"} Dec 11 08:22:06 crc kubenswrapper[4881]: I1211 08:22:06.544614 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-882gs" event={"ID":"317a7f01-2747-4994-8000-54613f522149","Type":"ContainerDied","Data":"edfd292e6ad31da44c6945dc5412228428f94067ec0fbf0eddce1c0b98023e6b"} Dec 11 08:22:06 crc kubenswrapper[4881]: I1211 08:22:06.544686 4881 scope.go:117] "RemoveContainer" containerID="58d7d7483976135ae4372f256517c1db5701fd4c7db66c21c7c91467bcfdb9c1" Dec 11 08:22:06 crc kubenswrapper[4881]: I1211 08:22:06.552585 4881 generic.go:334] "Generic (PLEG): container finished" podID="69071241-3547-43b1-bf14-5bb03184a08a" containerID="70ff0aaec8f648b2dc4d291830b5d486b1802873c485f4f53ac14e413786f6c1" exitCode=0 Dec 11 08:22:06 crc kubenswrapper[4881]: I1211 08:22:06.552613 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-zl8wv" event={"ID":"69071241-3547-43b1-bf14-5bb03184a08a","Type":"ContainerDied","Data":"70ff0aaec8f648b2dc4d291830b5d486b1802873c485f4f53ac14e413786f6c1"} Dec 11 08:22:06 crc kubenswrapper[4881]: I1211 08:22:06.552649 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-zl8wv" event={"ID":"69071241-3547-43b1-bf14-5bb03184a08a","Type":"ContainerDied","Data":"3786245efae87026ed9e3a675598e3bf1b6dd1710c99ef6a13862685addf9161"} Dec 11 08:22:06 crc kubenswrapper[4881]: I1211 08:22:06.552779 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-zl8wv" Dec 11 08:22:06 crc kubenswrapper[4881]: I1211 08:22:06.578458 4881 scope.go:117] "RemoveContainer" containerID="58d7d7483976135ae4372f256517c1db5701fd4c7db66c21c7c91467bcfdb9c1" Dec 11 08:22:06 crc kubenswrapper[4881]: E1211 08:22:06.579289 4881 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"58d7d7483976135ae4372f256517c1db5701fd4c7db66c21c7c91467bcfdb9c1\": container with ID starting with 58d7d7483976135ae4372f256517c1db5701fd4c7db66c21c7c91467bcfdb9c1 not found: ID does not exist" containerID="58d7d7483976135ae4372f256517c1db5701fd4c7db66c21c7c91467bcfdb9c1" Dec 11 08:22:06 crc kubenswrapper[4881]: I1211 08:22:06.579317 4881 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"58d7d7483976135ae4372f256517c1db5701fd4c7db66c21c7c91467bcfdb9c1"} err="failed to get container status \"58d7d7483976135ae4372f256517c1db5701fd4c7db66c21c7c91467bcfdb9c1\": rpc error: code = NotFound desc = could not find container \"58d7d7483976135ae4372f256517c1db5701fd4c7db66c21c7c91467bcfdb9c1\": container with ID starting with 58d7d7483976135ae4372f256517c1db5701fd4c7db66c21c7c91467bcfdb9c1 not found: ID does not exist" Dec 11 08:22:06 crc kubenswrapper[4881]: I1211 08:22:06.579361 4881 scope.go:117] "RemoveContainer" containerID="70ff0aaec8f648b2dc4d291830b5d486b1802873c485f4f53ac14e413786f6c1" Dec 11 08:22:06 crc kubenswrapper[4881]: I1211 08:22:06.592304 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-zl8wv"] Dec 11 08:22:06 crc kubenswrapper[4881]: I1211 08:22:06.594354 4881 scope.go:117] "RemoveContainer" containerID="70ff0aaec8f648b2dc4d291830b5d486b1802873c485f4f53ac14e413786f6c1" Dec 11 08:22:06 crc kubenswrapper[4881]: I1211 08:22:06.595226 4881 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-zl8wv"] Dec 11 08:22:06 crc kubenswrapper[4881]: E1211 08:22:06.595430 4881 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"70ff0aaec8f648b2dc4d291830b5d486b1802873c485f4f53ac14e413786f6c1\": container with ID starting with 70ff0aaec8f648b2dc4d291830b5d486b1802873c485f4f53ac14e413786f6c1 not found: ID does not exist" containerID="70ff0aaec8f648b2dc4d291830b5d486b1802873c485f4f53ac14e413786f6c1" Dec 11 08:22:06 crc kubenswrapper[4881]: I1211 08:22:06.595485 4881 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"70ff0aaec8f648b2dc4d291830b5d486b1802873c485f4f53ac14e413786f6c1"} err="failed to get container status \"70ff0aaec8f648b2dc4d291830b5d486b1802873c485f4f53ac14e413786f6c1\": rpc error: code = NotFound desc = could not find container \"70ff0aaec8f648b2dc4d291830b5d486b1802873c485f4f53ac14e413786f6c1\": container with ID starting with 70ff0aaec8f648b2dc4d291830b5d486b1802873c485f4f53ac14e413786f6c1 not found: ID does not exist" Dec 11 08:22:06 crc kubenswrapper[4881]: I1211 08:22:06.596681 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/317a7f01-2747-4994-8000-54613f522149-serving-cert\") pod \"317a7f01-2747-4994-8000-54613f522149\" (UID: \"317a7f01-2747-4994-8000-54613f522149\") " Dec 11 08:22:06 crc kubenswrapper[4881]: I1211 08:22:06.596793 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7bxfc\" (UniqueName: \"kubernetes.io/projected/317a7f01-2747-4994-8000-54613f522149-kube-api-access-7bxfc\") pod \"317a7f01-2747-4994-8000-54613f522149\" (UID: \"317a7f01-2747-4994-8000-54613f522149\") " Dec 11 08:22:06 crc kubenswrapper[4881]: I1211 08:22:06.596950 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/317a7f01-2747-4994-8000-54613f522149-client-ca\") pod \"317a7f01-2747-4994-8000-54613f522149\" (UID: \"317a7f01-2747-4994-8000-54613f522149\") " Dec 11 08:22:06 crc kubenswrapper[4881]: I1211 08:22:06.597030 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/317a7f01-2747-4994-8000-54613f522149-config\") pod \"317a7f01-2747-4994-8000-54613f522149\" (UID: \"317a7f01-2747-4994-8000-54613f522149\") " Dec 11 08:22:06 crc kubenswrapper[4881]: I1211 08:22:06.597368 4881 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/69071241-3547-43b1-bf14-5bb03184a08a-client-ca\") on node \"crc\" DevicePath \"\"" Dec 11 08:22:06 crc kubenswrapper[4881]: I1211 08:22:06.597495 4881 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/69071241-3547-43b1-bf14-5bb03184a08a-config\") on node \"crc\" DevicePath \"\"" Dec 11 08:22:06 crc kubenswrapper[4881]: I1211 08:22:06.597645 4881 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/69071241-3547-43b1-bf14-5bb03184a08a-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Dec 11 08:22:06 crc kubenswrapper[4881]: I1211 08:22:06.597837 4881 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/69071241-3547-43b1-bf14-5bb03184a08a-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 11 08:22:06 crc kubenswrapper[4881]: I1211 08:22:06.597912 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rjg5j\" (UniqueName: \"kubernetes.io/projected/69071241-3547-43b1-bf14-5bb03184a08a-kube-api-access-rjg5j\") on node \"crc\" DevicePath \"\"" Dec 11 08:22:06 crc kubenswrapper[4881]: I1211 08:22:06.597667 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/317a7f01-2747-4994-8000-54613f522149-config" (OuterVolumeSpecName: "config") pod "317a7f01-2747-4994-8000-54613f522149" (UID: "317a7f01-2747-4994-8000-54613f522149"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:22:06 crc kubenswrapper[4881]: I1211 08:22:06.597673 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/317a7f01-2747-4994-8000-54613f522149-client-ca" (OuterVolumeSpecName: "client-ca") pod "317a7f01-2747-4994-8000-54613f522149" (UID: "317a7f01-2747-4994-8000-54613f522149"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:22:06 crc kubenswrapper[4881]: I1211 08:22:06.600408 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/317a7f01-2747-4994-8000-54613f522149-kube-api-access-7bxfc" (OuterVolumeSpecName: "kube-api-access-7bxfc") pod "317a7f01-2747-4994-8000-54613f522149" (UID: "317a7f01-2747-4994-8000-54613f522149"). InnerVolumeSpecName "kube-api-access-7bxfc". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:22:06 crc kubenswrapper[4881]: I1211 08:22:06.600449 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/317a7f01-2747-4994-8000-54613f522149-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "317a7f01-2747-4994-8000-54613f522149" (UID: "317a7f01-2747-4994-8000-54613f522149"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:22:06 crc kubenswrapper[4881]: I1211 08:22:06.699398 4881 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/317a7f01-2747-4994-8000-54613f522149-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 11 08:22:06 crc kubenswrapper[4881]: I1211 08:22:06.699436 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7bxfc\" (UniqueName: \"kubernetes.io/projected/317a7f01-2747-4994-8000-54613f522149-kube-api-access-7bxfc\") on node \"crc\" DevicePath \"\"" Dec 11 08:22:06 crc kubenswrapper[4881]: I1211 08:22:06.699451 4881 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/317a7f01-2747-4994-8000-54613f522149-client-ca\") on node \"crc\" DevicePath \"\"" Dec 11 08:22:06 crc kubenswrapper[4881]: I1211 08:22:06.699465 4881 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/317a7f01-2747-4994-8000-54613f522149-config\") on node \"crc\" DevicePath \"\"" Dec 11 08:22:06 crc kubenswrapper[4881]: I1211 08:22:06.885359 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-882gs"] Dec 11 08:22:06 crc kubenswrapper[4881]: I1211 08:22:06.888964 4881 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-882gs"] Dec 11 08:22:07 crc kubenswrapper[4881]: I1211 08:22:07.013694 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="317a7f01-2747-4994-8000-54613f522149" path="/var/lib/kubelet/pods/317a7f01-2747-4994-8000-54613f522149/volumes" Dec 11 08:22:07 crc kubenswrapper[4881]: I1211 08:22:07.014273 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="69071241-3547-43b1-bf14-5bb03184a08a" path="/var/lib/kubelet/pods/69071241-3547-43b1-bf14-5bb03184a08a/volumes" Dec 11 08:22:07 crc kubenswrapper[4881]: I1211 08:22:07.721319 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-96b64b5cc-d77sm"] Dec 11 08:22:07 crc kubenswrapper[4881]: E1211 08:22:07.721756 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="69071241-3547-43b1-bf14-5bb03184a08a" containerName="controller-manager" Dec 11 08:22:07 crc kubenswrapper[4881]: I1211 08:22:07.721790 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="69071241-3547-43b1-bf14-5bb03184a08a" containerName="controller-manager" Dec 11 08:22:07 crc kubenswrapper[4881]: E1211 08:22:07.721815 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Dec 11 08:22:07 crc kubenswrapper[4881]: I1211 08:22:07.721827 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Dec 11 08:22:07 crc kubenswrapper[4881]: E1211 08:22:07.721866 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="317a7f01-2747-4994-8000-54613f522149" containerName="route-controller-manager" Dec 11 08:22:07 crc kubenswrapper[4881]: I1211 08:22:07.721879 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="317a7f01-2747-4994-8000-54613f522149" containerName="route-controller-manager" Dec 11 08:22:07 crc kubenswrapper[4881]: I1211 08:22:07.722029 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="69071241-3547-43b1-bf14-5bb03184a08a" containerName="controller-manager" Dec 11 08:22:07 crc kubenswrapper[4881]: I1211 08:22:07.722048 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Dec 11 08:22:07 crc kubenswrapper[4881]: I1211 08:22:07.722063 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="317a7f01-2747-4994-8000-54613f522149" containerName="route-controller-manager" Dec 11 08:22:07 crc kubenswrapper[4881]: I1211 08:22:07.722698 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-96b64b5cc-d77sm" Dec 11 08:22:07 crc kubenswrapper[4881]: I1211 08:22:07.729607 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Dec 11 08:22:07 crc kubenswrapper[4881]: I1211 08:22:07.729705 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Dec 11 08:22:07 crc kubenswrapper[4881]: I1211 08:22:07.731713 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Dec 11 08:22:07 crc kubenswrapper[4881]: I1211 08:22:07.732731 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Dec 11 08:22:07 crc kubenswrapper[4881]: I1211 08:22:07.733089 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Dec 11 08:22:07 crc kubenswrapper[4881]: I1211 08:22:07.734981 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Dec 11 08:22:07 crc kubenswrapper[4881]: I1211 08:22:07.736175 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-796b84794c-pcmjj"] Dec 11 08:22:07 crc kubenswrapper[4881]: I1211 08:22:07.737903 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-796b84794c-pcmjj" Dec 11 08:22:07 crc kubenswrapper[4881]: I1211 08:22:07.742691 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Dec 11 08:22:07 crc kubenswrapper[4881]: I1211 08:22:07.743106 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Dec 11 08:22:07 crc kubenswrapper[4881]: I1211 08:22:07.743591 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Dec 11 08:22:07 crc kubenswrapper[4881]: I1211 08:22:07.745200 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Dec 11 08:22:07 crc kubenswrapper[4881]: I1211 08:22:07.745231 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Dec 11 08:22:07 crc kubenswrapper[4881]: I1211 08:22:07.745427 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Dec 11 08:22:07 crc kubenswrapper[4881]: I1211 08:22:07.760492 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-96b64b5cc-d77sm"] Dec 11 08:22:07 crc kubenswrapper[4881]: I1211 08:22:07.761558 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Dec 11 08:22:07 crc kubenswrapper[4881]: I1211 08:22:07.764511 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-796b84794c-pcmjj"] Dec 11 08:22:07 crc kubenswrapper[4881]: I1211 08:22:07.812472 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/787b8c2f-fb8b-4fb3-bebb-5f84a298516f-client-ca\") pod \"route-controller-manager-96b64b5cc-d77sm\" (UID: \"787b8c2f-fb8b-4fb3-bebb-5f84a298516f\") " pod="openshift-route-controller-manager/route-controller-manager-96b64b5cc-d77sm" Dec 11 08:22:07 crc kubenswrapper[4881]: I1211 08:22:07.812550 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q8fzh\" (UniqueName: \"kubernetes.io/projected/85255ad8-c128-434e-9ea8-d34d21ba0523-kube-api-access-q8fzh\") pod \"controller-manager-796b84794c-pcmjj\" (UID: \"85255ad8-c128-434e-9ea8-d34d21ba0523\") " pod="openshift-controller-manager/controller-manager-796b84794c-pcmjj" Dec 11 08:22:07 crc kubenswrapper[4881]: I1211 08:22:07.812706 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/85255ad8-c128-434e-9ea8-d34d21ba0523-config\") pod \"controller-manager-796b84794c-pcmjj\" (UID: \"85255ad8-c128-434e-9ea8-d34d21ba0523\") " pod="openshift-controller-manager/controller-manager-796b84794c-pcmjj" Dec 11 08:22:07 crc kubenswrapper[4881]: I1211 08:22:07.812756 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/85255ad8-c128-434e-9ea8-d34d21ba0523-serving-cert\") pod \"controller-manager-796b84794c-pcmjj\" (UID: \"85255ad8-c128-434e-9ea8-d34d21ba0523\") " pod="openshift-controller-manager/controller-manager-796b84794c-pcmjj" Dec 11 08:22:07 crc kubenswrapper[4881]: I1211 08:22:07.812779 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/85255ad8-c128-434e-9ea8-d34d21ba0523-proxy-ca-bundles\") pod \"controller-manager-796b84794c-pcmjj\" (UID: \"85255ad8-c128-434e-9ea8-d34d21ba0523\") " pod="openshift-controller-manager/controller-manager-796b84794c-pcmjj" Dec 11 08:22:07 crc kubenswrapper[4881]: I1211 08:22:07.812826 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/787b8c2f-fb8b-4fb3-bebb-5f84a298516f-config\") pod \"route-controller-manager-96b64b5cc-d77sm\" (UID: \"787b8c2f-fb8b-4fb3-bebb-5f84a298516f\") " pod="openshift-route-controller-manager/route-controller-manager-96b64b5cc-d77sm" Dec 11 08:22:07 crc kubenswrapper[4881]: I1211 08:22:07.812872 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/85255ad8-c128-434e-9ea8-d34d21ba0523-client-ca\") pod \"controller-manager-796b84794c-pcmjj\" (UID: \"85255ad8-c128-434e-9ea8-d34d21ba0523\") " pod="openshift-controller-manager/controller-manager-796b84794c-pcmjj" Dec 11 08:22:07 crc kubenswrapper[4881]: I1211 08:22:07.812899 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/787b8c2f-fb8b-4fb3-bebb-5f84a298516f-serving-cert\") pod \"route-controller-manager-96b64b5cc-d77sm\" (UID: \"787b8c2f-fb8b-4fb3-bebb-5f84a298516f\") " pod="openshift-route-controller-manager/route-controller-manager-96b64b5cc-d77sm" Dec 11 08:22:07 crc kubenswrapper[4881]: I1211 08:22:07.812917 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tbxpl\" (UniqueName: \"kubernetes.io/projected/787b8c2f-fb8b-4fb3-bebb-5f84a298516f-kube-api-access-tbxpl\") pod \"route-controller-manager-96b64b5cc-d77sm\" (UID: \"787b8c2f-fb8b-4fb3-bebb-5f84a298516f\") " pod="openshift-route-controller-manager/route-controller-manager-96b64b5cc-d77sm" Dec 11 08:22:07 crc kubenswrapper[4881]: I1211 08:22:07.914022 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/85255ad8-c128-434e-9ea8-d34d21ba0523-config\") pod \"controller-manager-796b84794c-pcmjj\" (UID: \"85255ad8-c128-434e-9ea8-d34d21ba0523\") " pod="openshift-controller-manager/controller-manager-796b84794c-pcmjj" Dec 11 08:22:07 crc kubenswrapper[4881]: I1211 08:22:07.914115 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/85255ad8-c128-434e-9ea8-d34d21ba0523-serving-cert\") pod \"controller-manager-796b84794c-pcmjj\" (UID: \"85255ad8-c128-434e-9ea8-d34d21ba0523\") " pod="openshift-controller-manager/controller-manager-796b84794c-pcmjj" Dec 11 08:22:07 crc kubenswrapper[4881]: I1211 08:22:07.914156 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/85255ad8-c128-434e-9ea8-d34d21ba0523-proxy-ca-bundles\") pod \"controller-manager-796b84794c-pcmjj\" (UID: \"85255ad8-c128-434e-9ea8-d34d21ba0523\") " pod="openshift-controller-manager/controller-manager-796b84794c-pcmjj" Dec 11 08:22:07 crc kubenswrapper[4881]: I1211 08:22:07.914207 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/787b8c2f-fb8b-4fb3-bebb-5f84a298516f-config\") pod \"route-controller-manager-96b64b5cc-d77sm\" (UID: \"787b8c2f-fb8b-4fb3-bebb-5f84a298516f\") " pod="openshift-route-controller-manager/route-controller-manager-96b64b5cc-d77sm" Dec 11 08:22:07 crc kubenswrapper[4881]: I1211 08:22:07.914262 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/85255ad8-c128-434e-9ea8-d34d21ba0523-client-ca\") pod \"controller-manager-796b84794c-pcmjj\" (UID: \"85255ad8-c128-434e-9ea8-d34d21ba0523\") " pod="openshift-controller-manager/controller-manager-796b84794c-pcmjj" Dec 11 08:22:07 crc kubenswrapper[4881]: I1211 08:22:07.914305 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/787b8c2f-fb8b-4fb3-bebb-5f84a298516f-serving-cert\") pod \"route-controller-manager-96b64b5cc-d77sm\" (UID: \"787b8c2f-fb8b-4fb3-bebb-5f84a298516f\") " pod="openshift-route-controller-manager/route-controller-manager-96b64b5cc-d77sm" Dec 11 08:22:07 crc kubenswrapper[4881]: I1211 08:22:07.914379 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tbxpl\" (UniqueName: \"kubernetes.io/projected/787b8c2f-fb8b-4fb3-bebb-5f84a298516f-kube-api-access-tbxpl\") pod \"route-controller-manager-96b64b5cc-d77sm\" (UID: \"787b8c2f-fb8b-4fb3-bebb-5f84a298516f\") " pod="openshift-route-controller-manager/route-controller-manager-96b64b5cc-d77sm" Dec 11 08:22:07 crc kubenswrapper[4881]: I1211 08:22:07.914448 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/787b8c2f-fb8b-4fb3-bebb-5f84a298516f-client-ca\") pod \"route-controller-manager-96b64b5cc-d77sm\" (UID: \"787b8c2f-fb8b-4fb3-bebb-5f84a298516f\") " pod="openshift-route-controller-manager/route-controller-manager-96b64b5cc-d77sm" Dec 11 08:22:07 crc kubenswrapper[4881]: I1211 08:22:07.914556 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q8fzh\" (UniqueName: \"kubernetes.io/projected/85255ad8-c128-434e-9ea8-d34d21ba0523-kube-api-access-q8fzh\") pod \"controller-manager-796b84794c-pcmjj\" (UID: \"85255ad8-c128-434e-9ea8-d34d21ba0523\") " pod="openshift-controller-manager/controller-manager-796b84794c-pcmjj" Dec 11 08:22:07 crc kubenswrapper[4881]: I1211 08:22:07.915852 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/85255ad8-c128-434e-9ea8-d34d21ba0523-proxy-ca-bundles\") pod \"controller-manager-796b84794c-pcmjj\" (UID: \"85255ad8-c128-434e-9ea8-d34d21ba0523\") " pod="openshift-controller-manager/controller-manager-796b84794c-pcmjj" Dec 11 08:22:07 crc kubenswrapper[4881]: I1211 08:22:07.917576 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/787b8c2f-fb8b-4fb3-bebb-5f84a298516f-client-ca\") pod \"route-controller-manager-96b64b5cc-d77sm\" (UID: \"787b8c2f-fb8b-4fb3-bebb-5f84a298516f\") " pod="openshift-route-controller-manager/route-controller-manager-96b64b5cc-d77sm" Dec 11 08:22:07 crc kubenswrapper[4881]: I1211 08:22:07.918468 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/787b8c2f-fb8b-4fb3-bebb-5f84a298516f-config\") pod \"route-controller-manager-96b64b5cc-d77sm\" (UID: \"787b8c2f-fb8b-4fb3-bebb-5f84a298516f\") " pod="openshift-route-controller-manager/route-controller-manager-96b64b5cc-d77sm" Dec 11 08:22:07 crc kubenswrapper[4881]: I1211 08:22:07.918827 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/85255ad8-c128-434e-9ea8-d34d21ba0523-config\") pod \"controller-manager-796b84794c-pcmjj\" (UID: \"85255ad8-c128-434e-9ea8-d34d21ba0523\") " pod="openshift-controller-manager/controller-manager-796b84794c-pcmjj" Dec 11 08:22:07 crc kubenswrapper[4881]: I1211 08:22:07.920073 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/787b8c2f-fb8b-4fb3-bebb-5f84a298516f-serving-cert\") pod \"route-controller-manager-96b64b5cc-d77sm\" (UID: \"787b8c2f-fb8b-4fb3-bebb-5f84a298516f\") " pod="openshift-route-controller-manager/route-controller-manager-96b64b5cc-d77sm" Dec 11 08:22:07 crc kubenswrapper[4881]: I1211 08:22:07.923118 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/85255ad8-c128-434e-9ea8-d34d21ba0523-serving-cert\") pod \"controller-manager-796b84794c-pcmjj\" (UID: \"85255ad8-c128-434e-9ea8-d34d21ba0523\") " pod="openshift-controller-manager/controller-manager-796b84794c-pcmjj" Dec 11 08:22:07 crc kubenswrapper[4881]: I1211 08:22:07.926815 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/85255ad8-c128-434e-9ea8-d34d21ba0523-client-ca\") pod \"controller-manager-796b84794c-pcmjj\" (UID: \"85255ad8-c128-434e-9ea8-d34d21ba0523\") " pod="openshift-controller-manager/controller-manager-796b84794c-pcmjj" Dec 11 08:22:07 crc kubenswrapper[4881]: I1211 08:22:07.950553 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tbxpl\" (UniqueName: \"kubernetes.io/projected/787b8c2f-fb8b-4fb3-bebb-5f84a298516f-kube-api-access-tbxpl\") pod \"route-controller-manager-96b64b5cc-d77sm\" (UID: \"787b8c2f-fb8b-4fb3-bebb-5f84a298516f\") " pod="openshift-route-controller-manager/route-controller-manager-96b64b5cc-d77sm" Dec 11 08:22:07 crc kubenswrapper[4881]: I1211 08:22:07.950628 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q8fzh\" (UniqueName: \"kubernetes.io/projected/85255ad8-c128-434e-9ea8-d34d21ba0523-kube-api-access-q8fzh\") pod \"controller-manager-796b84794c-pcmjj\" (UID: \"85255ad8-c128-434e-9ea8-d34d21ba0523\") " pod="openshift-controller-manager/controller-manager-796b84794c-pcmjj" Dec 11 08:22:08 crc kubenswrapper[4881]: I1211 08:22:08.066955 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-96b64b5cc-d77sm" Dec 11 08:22:08 crc kubenswrapper[4881]: I1211 08:22:08.075011 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-796b84794c-pcmjj" Dec 11 08:22:08 crc kubenswrapper[4881]: I1211 08:22:08.135765 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-796b84794c-pcmjj"] Dec 11 08:22:08 crc kubenswrapper[4881]: I1211 08:22:08.156285 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-96b64b5cc-d77sm"] Dec 11 08:22:08 crc kubenswrapper[4881]: I1211 08:22:08.366993 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-96b64b5cc-d77sm"] Dec 11 08:22:08 crc kubenswrapper[4881]: I1211 08:22:08.568939 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-96b64b5cc-d77sm" event={"ID":"787b8c2f-fb8b-4fb3-bebb-5f84a298516f","Type":"ContainerStarted","Data":"f99670873e6c870ebc3ef3b17a2e55ffccf7b05835c77fe86d98910d603084c1"} Dec 11 08:22:08 crc kubenswrapper[4881]: I1211 08:22:08.569252 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-96b64b5cc-d77sm" event={"ID":"787b8c2f-fb8b-4fb3-bebb-5f84a298516f","Type":"ContainerStarted","Data":"be67f822ddbe0943c52a7a400c29b48d14655f3f18cf79f4ab7ba45ed88d101c"} Dec 11 08:22:08 crc kubenswrapper[4881]: I1211 08:22:08.569268 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-96b64b5cc-d77sm" podUID="787b8c2f-fb8b-4fb3-bebb-5f84a298516f" containerName="route-controller-manager" containerID="cri-o://f99670873e6c870ebc3ef3b17a2e55ffccf7b05835c77fe86d98910d603084c1" gracePeriod=30 Dec 11 08:22:08 crc kubenswrapper[4881]: I1211 08:22:08.570914 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-96b64b5cc-d77sm" Dec 11 08:22:08 crc kubenswrapper[4881]: I1211 08:22:08.597212 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-96b64b5cc-d77sm" podStartSLOduration=2.5971911800000003 podStartE2EDuration="2.59719118s" podCreationTimestamp="2025-12-11 08:22:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 08:22:08.593851888 +0000 UTC m=+376.971220585" watchObservedRunningTime="2025-12-11 08:22:08.59719118 +0000 UTC m=+376.974559877" Dec 11 08:22:08 crc kubenswrapper[4881]: I1211 08:22:08.637076 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-796b84794c-pcmjj"] Dec 11 08:22:08 crc kubenswrapper[4881]: I1211 08:22:08.902935 4881 patch_prober.go:28] interesting pod/route-controller-manager-96b64b5cc-d77sm container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.63:8443/healthz\": read tcp 10.217.0.2:43282->10.217.0.63:8443: read: connection reset by peer" start-of-body= Dec 11 08:22:08 crc kubenswrapper[4881]: I1211 08:22:08.903310 4881 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-96b64b5cc-d77sm" podUID="787b8c2f-fb8b-4fb3-bebb-5f84a298516f" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.63:8443/healthz\": read tcp 10.217.0.2:43282->10.217.0.63:8443: read: connection reset by peer" Dec 11 08:22:09 crc kubenswrapper[4881]: I1211 08:22:09.176923 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-route-controller-manager_route-controller-manager-96b64b5cc-d77sm_787b8c2f-fb8b-4fb3-bebb-5f84a298516f/route-controller-manager/0.log" Dec 11 08:22:09 crc kubenswrapper[4881]: I1211 08:22:09.176998 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-96b64b5cc-d77sm" Dec 11 08:22:09 crc kubenswrapper[4881]: I1211 08:22:09.232222 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tbxpl\" (UniqueName: \"kubernetes.io/projected/787b8c2f-fb8b-4fb3-bebb-5f84a298516f-kube-api-access-tbxpl\") pod \"787b8c2f-fb8b-4fb3-bebb-5f84a298516f\" (UID: \"787b8c2f-fb8b-4fb3-bebb-5f84a298516f\") " Dec 11 08:22:09 crc kubenswrapper[4881]: I1211 08:22:09.232275 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/787b8c2f-fb8b-4fb3-bebb-5f84a298516f-serving-cert\") pod \"787b8c2f-fb8b-4fb3-bebb-5f84a298516f\" (UID: \"787b8c2f-fb8b-4fb3-bebb-5f84a298516f\") " Dec 11 08:22:09 crc kubenswrapper[4881]: I1211 08:22:09.232317 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/787b8c2f-fb8b-4fb3-bebb-5f84a298516f-client-ca\") pod \"787b8c2f-fb8b-4fb3-bebb-5f84a298516f\" (UID: \"787b8c2f-fb8b-4fb3-bebb-5f84a298516f\") " Dec 11 08:22:09 crc kubenswrapper[4881]: I1211 08:22:09.232376 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/787b8c2f-fb8b-4fb3-bebb-5f84a298516f-config\") pod \"787b8c2f-fb8b-4fb3-bebb-5f84a298516f\" (UID: \"787b8c2f-fb8b-4fb3-bebb-5f84a298516f\") " Dec 11 08:22:09 crc kubenswrapper[4881]: I1211 08:22:09.233015 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/787b8c2f-fb8b-4fb3-bebb-5f84a298516f-client-ca" (OuterVolumeSpecName: "client-ca") pod "787b8c2f-fb8b-4fb3-bebb-5f84a298516f" (UID: "787b8c2f-fb8b-4fb3-bebb-5f84a298516f"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:22:09 crc kubenswrapper[4881]: I1211 08:22:09.233051 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/787b8c2f-fb8b-4fb3-bebb-5f84a298516f-config" (OuterVolumeSpecName: "config") pod "787b8c2f-fb8b-4fb3-bebb-5f84a298516f" (UID: "787b8c2f-fb8b-4fb3-bebb-5f84a298516f"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:22:09 crc kubenswrapper[4881]: I1211 08:22:09.233361 4881 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/787b8c2f-fb8b-4fb3-bebb-5f84a298516f-config\") on node \"crc\" DevicePath \"\"" Dec 11 08:22:09 crc kubenswrapper[4881]: I1211 08:22:09.233386 4881 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/787b8c2f-fb8b-4fb3-bebb-5f84a298516f-client-ca\") on node \"crc\" DevicePath \"\"" Dec 11 08:22:09 crc kubenswrapper[4881]: I1211 08:22:09.237981 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/787b8c2f-fb8b-4fb3-bebb-5f84a298516f-kube-api-access-tbxpl" (OuterVolumeSpecName: "kube-api-access-tbxpl") pod "787b8c2f-fb8b-4fb3-bebb-5f84a298516f" (UID: "787b8c2f-fb8b-4fb3-bebb-5f84a298516f"). InnerVolumeSpecName "kube-api-access-tbxpl". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:22:09 crc kubenswrapper[4881]: I1211 08:22:09.238598 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/787b8c2f-fb8b-4fb3-bebb-5f84a298516f-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "787b8c2f-fb8b-4fb3-bebb-5f84a298516f" (UID: "787b8c2f-fb8b-4fb3-bebb-5f84a298516f"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:22:09 crc kubenswrapper[4881]: I1211 08:22:09.334295 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tbxpl\" (UniqueName: \"kubernetes.io/projected/787b8c2f-fb8b-4fb3-bebb-5f84a298516f-kube-api-access-tbxpl\") on node \"crc\" DevicePath \"\"" Dec 11 08:22:09 crc kubenswrapper[4881]: I1211 08:22:09.334328 4881 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/787b8c2f-fb8b-4fb3-bebb-5f84a298516f-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 11 08:22:09 crc kubenswrapper[4881]: I1211 08:22:09.574917 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-796b84794c-pcmjj" event={"ID":"85255ad8-c128-434e-9ea8-d34d21ba0523","Type":"ContainerStarted","Data":"9d8258aa58b8553ed474caf3675f89cc86a0084acd2f07bb0e19047a936cb0e4"} Dec 11 08:22:09 crc kubenswrapper[4881]: I1211 08:22:09.575186 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-796b84794c-pcmjj" event={"ID":"85255ad8-c128-434e-9ea8-d34d21ba0523","Type":"ContainerStarted","Data":"0fd455a3276227b7cc88e510630d84ec012644e62acb1135ff3a12ad76dcdf71"} Dec 11 08:22:09 crc kubenswrapper[4881]: I1211 08:22:09.575330 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-796b84794c-pcmjj" podUID="85255ad8-c128-434e-9ea8-d34d21ba0523" containerName="controller-manager" containerID="cri-o://9d8258aa58b8553ed474caf3675f89cc86a0084acd2f07bb0e19047a936cb0e4" gracePeriod=30 Dec 11 08:22:09 crc kubenswrapper[4881]: I1211 08:22:09.575819 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-796b84794c-pcmjj" Dec 11 08:22:09 crc kubenswrapper[4881]: I1211 08:22:09.580144 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-route-controller-manager_route-controller-manager-96b64b5cc-d77sm_787b8c2f-fb8b-4fb3-bebb-5f84a298516f/route-controller-manager/0.log" Dec 11 08:22:09 crc kubenswrapper[4881]: I1211 08:22:09.580187 4881 generic.go:334] "Generic (PLEG): container finished" podID="787b8c2f-fb8b-4fb3-bebb-5f84a298516f" containerID="f99670873e6c870ebc3ef3b17a2e55ffccf7b05835c77fe86d98910d603084c1" exitCode=255 Dec 11 08:22:09 crc kubenswrapper[4881]: I1211 08:22:09.580214 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-96b64b5cc-d77sm" event={"ID":"787b8c2f-fb8b-4fb3-bebb-5f84a298516f","Type":"ContainerDied","Data":"f99670873e6c870ebc3ef3b17a2e55ffccf7b05835c77fe86d98910d603084c1"} Dec 11 08:22:09 crc kubenswrapper[4881]: I1211 08:22:09.580236 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-96b64b5cc-d77sm" event={"ID":"787b8c2f-fb8b-4fb3-bebb-5f84a298516f","Type":"ContainerDied","Data":"be67f822ddbe0943c52a7a400c29b48d14655f3f18cf79f4ab7ba45ed88d101c"} Dec 11 08:22:09 crc kubenswrapper[4881]: I1211 08:22:09.580258 4881 scope.go:117] "RemoveContainer" containerID="f99670873e6c870ebc3ef3b17a2e55ffccf7b05835c77fe86d98910d603084c1" Dec 11 08:22:09 crc kubenswrapper[4881]: I1211 08:22:09.580392 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-96b64b5cc-d77sm" Dec 11 08:22:09 crc kubenswrapper[4881]: I1211 08:22:09.609839 4881 patch_prober.go:28] interesting pod/controller-manager-796b84794c-pcmjj container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.64:8443/healthz\": read tcp 10.217.0.2:36286->10.217.0.64:8443: read: connection reset by peer" start-of-body= Dec 11 08:22:09 crc kubenswrapper[4881]: I1211 08:22:09.609903 4881 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-796b84794c-pcmjj" podUID="85255ad8-c128-434e-9ea8-d34d21ba0523" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.64:8443/healthz\": read tcp 10.217.0.2:36286->10.217.0.64:8443: read: connection reset by peer" Dec 11 08:22:09 crc kubenswrapper[4881]: I1211 08:22:09.618822 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-796b84794c-pcmjj" podStartSLOduration=3.618799132 podStartE2EDuration="3.618799132s" podCreationTimestamp="2025-12-11 08:22:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 08:22:09.618688469 +0000 UTC m=+377.996057196" watchObservedRunningTime="2025-12-11 08:22:09.618799132 +0000 UTC m=+377.996167839" Dec 11 08:22:09 crc kubenswrapper[4881]: I1211 08:22:09.620373 4881 scope.go:117] "RemoveContainer" containerID="f99670873e6c870ebc3ef3b17a2e55ffccf7b05835c77fe86d98910d603084c1" Dec 11 08:22:09 crc kubenswrapper[4881]: E1211 08:22:09.620946 4881 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f99670873e6c870ebc3ef3b17a2e55ffccf7b05835c77fe86d98910d603084c1\": container with ID starting with f99670873e6c870ebc3ef3b17a2e55ffccf7b05835c77fe86d98910d603084c1 not found: ID does not exist" containerID="f99670873e6c870ebc3ef3b17a2e55ffccf7b05835c77fe86d98910d603084c1" Dec 11 08:22:09 crc kubenswrapper[4881]: I1211 08:22:09.620983 4881 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f99670873e6c870ebc3ef3b17a2e55ffccf7b05835c77fe86d98910d603084c1"} err="failed to get container status \"f99670873e6c870ebc3ef3b17a2e55ffccf7b05835c77fe86d98910d603084c1\": rpc error: code = NotFound desc = could not find container \"f99670873e6c870ebc3ef3b17a2e55ffccf7b05835c77fe86d98910d603084c1\": container with ID starting with f99670873e6c870ebc3ef3b17a2e55ffccf7b05835c77fe86d98910d603084c1 not found: ID does not exist" Dec 11 08:22:09 crc kubenswrapper[4881]: I1211 08:22:09.638821 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-96b64b5cc-d77sm"] Dec 11 08:22:09 crc kubenswrapper[4881]: I1211 08:22:09.645765 4881 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-96b64b5cc-d77sm"] Dec 11 08:22:09 crc kubenswrapper[4881]: I1211 08:22:09.722262 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-69c7cb958d-ddc8x"] Dec 11 08:22:09 crc kubenswrapper[4881]: E1211 08:22:09.722532 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="787b8c2f-fb8b-4fb3-bebb-5f84a298516f" containerName="route-controller-manager" Dec 11 08:22:09 crc kubenswrapper[4881]: I1211 08:22:09.722556 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="787b8c2f-fb8b-4fb3-bebb-5f84a298516f" containerName="route-controller-manager" Dec 11 08:22:09 crc kubenswrapper[4881]: I1211 08:22:09.722673 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="787b8c2f-fb8b-4fb3-bebb-5f84a298516f" containerName="route-controller-manager" Dec 11 08:22:09 crc kubenswrapper[4881]: I1211 08:22:09.723096 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-69c7cb958d-ddc8x" Dec 11 08:22:09 crc kubenswrapper[4881]: I1211 08:22:09.726374 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Dec 11 08:22:09 crc kubenswrapper[4881]: I1211 08:22:09.726621 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Dec 11 08:22:09 crc kubenswrapper[4881]: I1211 08:22:09.726629 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Dec 11 08:22:09 crc kubenswrapper[4881]: I1211 08:22:09.726713 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Dec 11 08:22:09 crc kubenswrapper[4881]: I1211 08:22:09.726844 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Dec 11 08:22:09 crc kubenswrapper[4881]: I1211 08:22:09.726993 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Dec 11 08:22:09 crc kubenswrapper[4881]: I1211 08:22:09.739384 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-69c7cb958d-ddc8x"] Dec 11 08:22:09 crc kubenswrapper[4881]: I1211 08:22:09.839531 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d7tn5\" (UniqueName: \"kubernetes.io/projected/5dcb65e3-74f7-437f-b581-31b1b65af163-kube-api-access-d7tn5\") pod \"route-controller-manager-69c7cb958d-ddc8x\" (UID: \"5dcb65e3-74f7-437f-b581-31b1b65af163\") " pod="openshift-route-controller-manager/route-controller-manager-69c7cb958d-ddc8x" Dec 11 08:22:09 crc kubenswrapper[4881]: I1211 08:22:09.839657 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5dcb65e3-74f7-437f-b581-31b1b65af163-client-ca\") pod \"route-controller-manager-69c7cb958d-ddc8x\" (UID: \"5dcb65e3-74f7-437f-b581-31b1b65af163\") " pod="openshift-route-controller-manager/route-controller-manager-69c7cb958d-ddc8x" Dec 11 08:22:09 crc kubenswrapper[4881]: I1211 08:22:09.839716 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5dcb65e3-74f7-437f-b581-31b1b65af163-serving-cert\") pod \"route-controller-manager-69c7cb958d-ddc8x\" (UID: \"5dcb65e3-74f7-437f-b581-31b1b65af163\") " pod="openshift-route-controller-manager/route-controller-manager-69c7cb958d-ddc8x" Dec 11 08:22:09 crc kubenswrapper[4881]: I1211 08:22:09.839768 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5dcb65e3-74f7-437f-b581-31b1b65af163-config\") pod \"route-controller-manager-69c7cb958d-ddc8x\" (UID: \"5dcb65e3-74f7-437f-b581-31b1b65af163\") " pod="openshift-route-controller-manager/route-controller-manager-69c7cb958d-ddc8x" Dec 11 08:22:09 crc kubenswrapper[4881]: I1211 08:22:09.941392 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d7tn5\" (UniqueName: \"kubernetes.io/projected/5dcb65e3-74f7-437f-b581-31b1b65af163-kube-api-access-d7tn5\") pod \"route-controller-manager-69c7cb958d-ddc8x\" (UID: \"5dcb65e3-74f7-437f-b581-31b1b65af163\") " pod="openshift-route-controller-manager/route-controller-manager-69c7cb958d-ddc8x" Dec 11 08:22:09 crc kubenswrapper[4881]: I1211 08:22:09.941674 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5dcb65e3-74f7-437f-b581-31b1b65af163-client-ca\") pod \"route-controller-manager-69c7cb958d-ddc8x\" (UID: \"5dcb65e3-74f7-437f-b581-31b1b65af163\") " pod="openshift-route-controller-manager/route-controller-manager-69c7cb958d-ddc8x" Dec 11 08:22:09 crc kubenswrapper[4881]: I1211 08:22:09.941750 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5dcb65e3-74f7-437f-b581-31b1b65af163-serving-cert\") pod \"route-controller-manager-69c7cb958d-ddc8x\" (UID: \"5dcb65e3-74f7-437f-b581-31b1b65af163\") " pod="openshift-route-controller-manager/route-controller-manager-69c7cb958d-ddc8x" Dec 11 08:22:09 crc kubenswrapper[4881]: I1211 08:22:09.942464 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5dcb65e3-74f7-437f-b581-31b1b65af163-config\") pod \"route-controller-manager-69c7cb958d-ddc8x\" (UID: \"5dcb65e3-74f7-437f-b581-31b1b65af163\") " pod="openshift-route-controller-manager/route-controller-manager-69c7cb958d-ddc8x" Dec 11 08:22:09 crc kubenswrapper[4881]: I1211 08:22:09.944167 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5dcb65e3-74f7-437f-b581-31b1b65af163-client-ca\") pod \"route-controller-manager-69c7cb958d-ddc8x\" (UID: \"5dcb65e3-74f7-437f-b581-31b1b65af163\") " pod="openshift-route-controller-manager/route-controller-manager-69c7cb958d-ddc8x" Dec 11 08:22:09 crc kubenswrapper[4881]: I1211 08:22:09.944522 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5dcb65e3-74f7-437f-b581-31b1b65af163-config\") pod \"route-controller-manager-69c7cb958d-ddc8x\" (UID: \"5dcb65e3-74f7-437f-b581-31b1b65af163\") " pod="openshift-route-controller-manager/route-controller-manager-69c7cb958d-ddc8x" Dec 11 08:22:09 crc kubenswrapper[4881]: I1211 08:22:09.946822 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5dcb65e3-74f7-437f-b581-31b1b65af163-serving-cert\") pod \"route-controller-manager-69c7cb958d-ddc8x\" (UID: \"5dcb65e3-74f7-437f-b581-31b1b65af163\") " pod="openshift-route-controller-manager/route-controller-manager-69c7cb958d-ddc8x" Dec 11 08:22:09 crc kubenswrapper[4881]: I1211 08:22:09.962535 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d7tn5\" (UniqueName: \"kubernetes.io/projected/5dcb65e3-74f7-437f-b581-31b1b65af163-kube-api-access-d7tn5\") pod \"route-controller-manager-69c7cb958d-ddc8x\" (UID: \"5dcb65e3-74f7-437f-b581-31b1b65af163\") " pod="openshift-route-controller-manager/route-controller-manager-69c7cb958d-ddc8x" Dec 11 08:22:09 crc kubenswrapper[4881]: I1211 08:22:09.966518 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-796b84794c-pcmjj" Dec 11 08:22:10 crc kubenswrapper[4881]: I1211 08:22:10.043104 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/85255ad8-c128-434e-9ea8-d34d21ba0523-client-ca\") pod \"85255ad8-c128-434e-9ea8-d34d21ba0523\" (UID: \"85255ad8-c128-434e-9ea8-d34d21ba0523\") " Dec 11 08:22:10 crc kubenswrapper[4881]: I1211 08:22:10.043160 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/85255ad8-c128-434e-9ea8-d34d21ba0523-proxy-ca-bundles\") pod \"85255ad8-c128-434e-9ea8-d34d21ba0523\" (UID: \"85255ad8-c128-434e-9ea8-d34d21ba0523\") " Dec 11 08:22:10 crc kubenswrapper[4881]: I1211 08:22:10.043216 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-q8fzh\" (UniqueName: \"kubernetes.io/projected/85255ad8-c128-434e-9ea8-d34d21ba0523-kube-api-access-q8fzh\") pod \"85255ad8-c128-434e-9ea8-d34d21ba0523\" (UID: \"85255ad8-c128-434e-9ea8-d34d21ba0523\") " Dec 11 08:22:10 crc kubenswrapper[4881]: I1211 08:22:10.043243 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/85255ad8-c128-434e-9ea8-d34d21ba0523-config\") pod \"85255ad8-c128-434e-9ea8-d34d21ba0523\" (UID: \"85255ad8-c128-434e-9ea8-d34d21ba0523\") " Dec 11 08:22:10 crc kubenswrapper[4881]: I1211 08:22:10.043271 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/85255ad8-c128-434e-9ea8-d34d21ba0523-serving-cert\") pod \"85255ad8-c128-434e-9ea8-d34d21ba0523\" (UID: \"85255ad8-c128-434e-9ea8-d34d21ba0523\") " Dec 11 08:22:10 crc kubenswrapper[4881]: I1211 08:22:10.044004 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/85255ad8-c128-434e-9ea8-d34d21ba0523-client-ca" (OuterVolumeSpecName: "client-ca") pod "85255ad8-c128-434e-9ea8-d34d21ba0523" (UID: "85255ad8-c128-434e-9ea8-d34d21ba0523"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:22:10 crc kubenswrapper[4881]: I1211 08:22:10.044482 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/85255ad8-c128-434e-9ea8-d34d21ba0523-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "85255ad8-c128-434e-9ea8-d34d21ba0523" (UID: "85255ad8-c128-434e-9ea8-d34d21ba0523"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:22:10 crc kubenswrapper[4881]: I1211 08:22:10.044493 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/85255ad8-c128-434e-9ea8-d34d21ba0523-config" (OuterVolumeSpecName: "config") pod "85255ad8-c128-434e-9ea8-d34d21ba0523" (UID: "85255ad8-c128-434e-9ea8-d34d21ba0523"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:22:10 crc kubenswrapper[4881]: I1211 08:22:10.046210 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/85255ad8-c128-434e-9ea8-d34d21ba0523-kube-api-access-q8fzh" (OuterVolumeSpecName: "kube-api-access-q8fzh") pod "85255ad8-c128-434e-9ea8-d34d21ba0523" (UID: "85255ad8-c128-434e-9ea8-d34d21ba0523"). InnerVolumeSpecName "kube-api-access-q8fzh". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:22:10 crc kubenswrapper[4881]: I1211 08:22:10.047572 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/85255ad8-c128-434e-9ea8-d34d21ba0523-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "85255ad8-c128-434e-9ea8-d34d21ba0523" (UID: "85255ad8-c128-434e-9ea8-d34d21ba0523"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:22:10 crc kubenswrapper[4881]: I1211 08:22:10.060031 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-69c7cb958d-ddc8x" Dec 11 08:22:10 crc kubenswrapper[4881]: I1211 08:22:10.144934 4881 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/85255ad8-c128-434e-9ea8-d34d21ba0523-client-ca\") on node \"crc\" DevicePath \"\"" Dec 11 08:22:10 crc kubenswrapper[4881]: I1211 08:22:10.144969 4881 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/85255ad8-c128-434e-9ea8-d34d21ba0523-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Dec 11 08:22:10 crc kubenswrapper[4881]: I1211 08:22:10.144983 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-q8fzh\" (UniqueName: \"kubernetes.io/projected/85255ad8-c128-434e-9ea8-d34d21ba0523-kube-api-access-q8fzh\") on node \"crc\" DevicePath \"\"" Dec 11 08:22:10 crc kubenswrapper[4881]: I1211 08:22:10.144995 4881 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/85255ad8-c128-434e-9ea8-d34d21ba0523-config\") on node \"crc\" DevicePath \"\"" Dec 11 08:22:10 crc kubenswrapper[4881]: I1211 08:22:10.145006 4881 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/85255ad8-c128-434e-9ea8-d34d21ba0523-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 11 08:22:10 crc kubenswrapper[4881]: I1211 08:22:10.243761 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-69c7cb958d-ddc8x"] Dec 11 08:22:10 crc kubenswrapper[4881]: W1211 08:22:10.248717 4881 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5dcb65e3_74f7_437f_b581_31b1b65af163.slice/crio-4ff5162b26edbc5d66ab678acf75809fcc509879e0f8354a50d9c5f47a0b40e6 WatchSource:0}: Error finding container 4ff5162b26edbc5d66ab678acf75809fcc509879e0f8354a50d9c5f47a0b40e6: Status 404 returned error can't find the container with id 4ff5162b26edbc5d66ab678acf75809fcc509879e0f8354a50d9c5f47a0b40e6 Dec 11 08:22:10 crc kubenswrapper[4881]: I1211 08:22:10.588605 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-69c7cb958d-ddc8x" event={"ID":"5dcb65e3-74f7-437f-b581-31b1b65af163","Type":"ContainerStarted","Data":"506c6d28b935d934cec5a02fec1ef31376d51c4e1f423fa4cbef82200f1dbeff"} Dec 11 08:22:10 crc kubenswrapper[4881]: I1211 08:22:10.588677 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-69c7cb958d-ddc8x" event={"ID":"5dcb65e3-74f7-437f-b581-31b1b65af163","Type":"ContainerStarted","Data":"4ff5162b26edbc5d66ab678acf75809fcc509879e0f8354a50d9c5f47a0b40e6"} Dec 11 08:22:10 crc kubenswrapper[4881]: I1211 08:22:10.588934 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-69c7cb958d-ddc8x" Dec 11 08:22:10 crc kubenswrapper[4881]: I1211 08:22:10.590069 4881 generic.go:334] "Generic (PLEG): container finished" podID="85255ad8-c128-434e-9ea8-d34d21ba0523" containerID="9d8258aa58b8553ed474caf3675f89cc86a0084acd2f07bb0e19047a936cb0e4" exitCode=0 Dec 11 08:22:10 crc kubenswrapper[4881]: I1211 08:22:10.590101 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-796b84794c-pcmjj" event={"ID":"85255ad8-c128-434e-9ea8-d34d21ba0523","Type":"ContainerDied","Data":"9d8258aa58b8553ed474caf3675f89cc86a0084acd2f07bb0e19047a936cb0e4"} Dec 11 08:22:10 crc kubenswrapper[4881]: I1211 08:22:10.590118 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-796b84794c-pcmjj" event={"ID":"85255ad8-c128-434e-9ea8-d34d21ba0523","Type":"ContainerDied","Data":"0fd455a3276227b7cc88e510630d84ec012644e62acb1135ff3a12ad76dcdf71"} Dec 11 08:22:10 crc kubenswrapper[4881]: I1211 08:22:10.590134 4881 scope.go:117] "RemoveContainer" containerID="9d8258aa58b8553ed474caf3675f89cc86a0084acd2f07bb0e19047a936cb0e4" Dec 11 08:22:10 crc kubenswrapper[4881]: I1211 08:22:10.590137 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-796b84794c-pcmjj" Dec 11 08:22:10 crc kubenswrapper[4881]: I1211 08:22:10.617490 4881 scope.go:117] "RemoveContainer" containerID="9d8258aa58b8553ed474caf3675f89cc86a0084acd2f07bb0e19047a936cb0e4" Dec 11 08:22:10 crc kubenswrapper[4881]: E1211 08:22:10.618083 4881 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9d8258aa58b8553ed474caf3675f89cc86a0084acd2f07bb0e19047a936cb0e4\": container with ID starting with 9d8258aa58b8553ed474caf3675f89cc86a0084acd2f07bb0e19047a936cb0e4 not found: ID does not exist" containerID="9d8258aa58b8553ed474caf3675f89cc86a0084acd2f07bb0e19047a936cb0e4" Dec 11 08:22:10 crc kubenswrapper[4881]: I1211 08:22:10.618142 4881 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9d8258aa58b8553ed474caf3675f89cc86a0084acd2f07bb0e19047a936cb0e4"} err="failed to get container status \"9d8258aa58b8553ed474caf3675f89cc86a0084acd2f07bb0e19047a936cb0e4\": rpc error: code = NotFound desc = could not find container \"9d8258aa58b8553ed474caf3675f89cc86a0084acd2f07bb0e19047a936cb0e4\": container with ID starting with 9d8258aa58b8553ed474caf3675f89cc86a0084acd2f07bb0e19047a936cb0e4 not found: ID does not exist" Dec 11 08:22:10 crc kubenswrapper[4881]: I1211 08:22:10.638652 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-69c7cb958d-ddc8x" podStartSLOduration=2.638627311 podStartE2EDuration="2.638627311s" podCreationTimestamp="2025-12-11 08:22:08 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 08:22:10.611307123 +0000 UTC m=+378.988675820" watchObservedRunningTime="2025-12-11 08:22:10.638627311 +0000 UTC m=+379.015996008" Dec 11 08:22:10 crc kubenswrapper[4881]: I1211 08:22:10.648534 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-796b84794c-pcmjj"] Dec 11 08:22:10 crc kubenswrapper[4881]: I1211 08:22:10.659434 4881 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-796b84794c-pcmjj"] Dec 11 08:22:10 crc kubenswrapper[4881]: I1211 08:22:10.691280 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-69c7cb958d-ddc8x" Dec 11 08:22:11 crc kubenswrapper[4881]: I1211 08:22:11.012047 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="787b8c2f-fb8b-4fb3-bebb-5f84a298516f" path="/var/lib/kubelet/pods/787b8c2f-fb8b-4fb3-bebb-5f84a298516f/volumes" Dec 11 08:22:11 crc kubenswrapper[4881]: I1211 08:22:11.012843 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="85255ad8-c128-434e-9ea8-d34d21ba0523" path="/var/lib/kubelet/pods/85255ad8-c128-434e-9ea8-d34d21ba0523/volumes" Dec 11 08:22:11 crc kubenswrapper[4881]: I1211 08:22:11.722555 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-84765f478-g259n"] Dec 11 08:22:11 crc kubenswrapper[4881]: E1211 08:22:11.722771 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="85255ad8-c128-434e-9ea8-d34d21ba0523" containerName="controller-manager" Dec 11 08:22:11 crc kubenswrapper[4881]: I1211 08:22:11.722784 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="85255ad8-c128-434e-9ea8-d34d21ba0523" containerName="controller-manager" Dec 11 08:22:11 crc kubenswrapper[4881]: I1211 08:22:11.722885 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="85255ad8-c128-434e-9ea8-d34d21ba0523" containerName="controller-manager" Dec 11 08:22:11 crc kubenswrapper[4881]: I1211 08:22:11.723274 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-84765f478-g259n" Dec 11 08:22:11 crc kubenswrapper[4881]: I1211 08:22:11.726799 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Dec 11 08:22:11 crc kubenswrapper[4881]: I1211 08:22:11.727032 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Dec 11 08:22:11 crc kubenswrapper[4881]: I1211 08:22:11.727268 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Dec 11 08:22:11 crc kubenswrapper[4881]: I1211 08:22:11.727286 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Dec 11 08:22:11 crc kubenswrapper[4881]: I1211 08:22:11.727721 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Dec 11 08:22:11 crc kubenswrapper[4881]: I1211 08:22:11.731919 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Dec 11 08:22:11 crc kubenswrapper[4881]: I1211 08:22:11.743135 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-84765f478-g259n"] Dec 11 08:22:11 crc kubenswrapper[4881]: I1211 08:22:11.743146 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Dec 11 08:22:11 crc kubenswrapper[4881]: I1211 08:22:11.868156 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/08540012-842b-42a3-983b-231d761c3a80-proxy-ca-bundles\") pod \"controller-manager-84765f478-g259n\" (UID: \"08540012-842b-42a3-983b-231d761c3a80\") " pod="openshift-controller-manager/controller-manager-84765f478-g259n" Dec 11 08:22:11 crc kubenswrapper[4881]: I1211 08:22:11.868218 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/08540012-842b-42a3-983b-231d761c3a80-client-ca\") pod \"controller-manager-84765f478-g259n\" (UID: \"08540012-842b-42a3-983b-231d761c3a80\") " pod="openshift-controller-manager/controller-manager-84765f478-g259n" Dec 11 08:22:11 crc kubenswrapper[4881]: I1211 08:22:11.868458 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/08540012-842b-42a3-983b-231d761c3a80-serving-cert\") pod \"controller-manager-84765f478-g259n\" (UID: \"08540012-842b-42a3-983b-231d761c3a80\") " pod="openshift-controller-manager/controller-manager-84765f478-g259n" Dec 11 08:22:11 crc kubenswrapper[4881]: I1211 08:22:11.868815 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/08540012-842b-42a3-983b-231d761c3a80-config\") pod \"controller-manager-84765f478-g259n\" (UID: \"08540012-842b-42a3-983b-231d761c3a80\") " pod="openshift-controller-manager/controller-manager-84765f478-g259n" Dec 11 08:22:11 crc kubenswrapper[4881]: I1211 08:22:11.868873 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dff9q\" (UniqueName: \"kubernetes.io/projected/08540012-842b-42a3-983b-231d761c3a80-kube-api-access-dff9q\") pod \"controller-manager-84765f478-g259n\" (UID: \"08540012-842b-42a3-983b-231d761c3a80\") " pod="openshift-controller-manager/controller-manager-84765f478-g259n" Dec 11 08:22:11 crc kubenswrapper[4881]: I1211 08:22:11.969859 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/08540012-842b-42a3-983b-231d761c3a80-proxy-ca-bundles\") pod \"controller-manager-84765f478-g259n\" (UID: \"08540012-842b-42a3-983b-231d761c3a80\") " pod="openshift-controller-manager/controller-manager-84765f478-g259n" Dec 11 08:22:11 crc kubenswrapper[4881]: I1211 08:22:11.969926 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/08540012-842b-42a3-983b-231d761c3a80-client-ca\") pod \"controller-manager-84765f478-g259n\" (UID: \"08540012-842b-42a3-983b-231d761c3a80\") " pod="openshift-controller-manager/controller-manager-84765f478-g259n" Dec 11 08:22:11 crc kubenswrapper[4881]: I1211 08:22:11.970027 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/08540012-842b-42a3-983b-231d761c3a80-serving-cert\") pod \"controller-manager-84765f478-g259n\" (UID: \"08540012-842b-42a3-983b-231d761c3a80\") " pod="openshift-controller-manager/controller-manager-84765f478-g259n" Dec 11 08:22:11 crc kubenswrapper[4881]: I1211 08:22:11.970077 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/08540012-842b-42a3-983b-231d761c3a80-config\") pod \"controller-manager-84765f478-g259n\" (UID: \"08540012-842b-42a3-983b-231d761c3a80\") " pod="openshift-controller-manager/controller-manager-84765f478-g259n" Dec 11 08:22:11 crc kubenswrapper[4881]: I1211 08:22:11.970111 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dff9q\" (UniqueName: \"kubernetes.io/projected/08540012-842b-42a3-983b-231d761c3a80-kube-api-access-dff9q\") pod \"controller-manager-84765f478-g259n\" (UID: \"08540012-842b-42a3-983b-231d761c3a80\") " pod="openshift-controller-manager/controller-manager-84765f478-g259n" Dec 11 08:22:11 crc kubenswrapper[4881]: I1211 08:22:11.971024 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/08540012-842b-42a3-983b-231d761c3a80-client-ca\") pod \"controller-manager-84765f478-g259n\" (UID: \"08540012-842b-42a3-983b-231d761c3a80\") " pod="openshift-controller-manager/controller-manager-84765f478-g259n" Dec 11 08:22:11 crc kubenswrapper[4881]: I1211 08:22:11.971289 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/08540012-842b-42a3-983b-231d761c3a80-proxy-ca-bundles\") pod \"controller-manager-84765f478-g259n\" (UID: \"08540012-842b-42a3-983b-231d761c3a80\") " pod="openshift-controller-manager/controller-manager-84765f478-g259n" Dec 11 08:22:11 crc kubenswrapper[4881]: I1211 08:22:11.972099 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/08540012-842b-42a3-983b-231d761c3a80-config\") pod \"controller-manager-84765f478-g259n\" (UID: \"08540012-842b-42a3-983b-231d761c3a80\") " pod="openshift-controller-manager/controller-manager-84765f478-g259n" Dec 11 08:22:11 crc kubenswrapper[4881]: I1211 08:22:11.977154 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/08540012-842b-42a3-983b-231d761c3a80-serving-cert\") pod \"controller-manager-84765f478-g259n\" (UID: \"08540012-842b-42a3-983b-231d761c3a80\") " pod="openshift-controller-manager/controller-manager-84765f478-g259n" Dec 11 08:22:11 crc kubenswrapper[4881]: I1211 08:22:11.987204 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dff9q\" (UniqueName: \"kubernetes.io/projected/08540012-842b-42a3-983b-231d761c3a80-kube-api-access-dff9q\") pod \"controller-manager-84765f478-g259n\" (UID: \"08540012-842b-42a3-983b-231d761c3a80\") " pod="openshift-controller-manager/controller-manager-84765f478-g259n" Dec 11 08:22:12 crc kubenswrapper[4881]: I1211 08:22:12.086934 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-84765f478-g259n" Dec 11 08:22:12 crc kubenswrapper[4881]: I1211 08:22:12.294235 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-84765f478-g259n"] Dec 11 08:22:12 crc kubenswrapper[4881]: I1211 08:22:12.605019 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-84765f478-g259n" event={"ID":"08540012-842b-42a3-983b-231d761c3a80","Type":"ContainerStarted","Data":"f2d677d8bedb5b037c9674e9def057c2493247e6ce4e804990a563edc02e3248"} Dec 11 08:22:12 crc kubenswrapper[4881]: I1211 08:22:12.605078 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-84765f478-g259n" event={"ID":"08540012-842b-42a3-983b-231d761c3a80","Type":"ContainerStarted","Data":"05f4b2721175802342a73ceb1a837b2fca3c0a6f11ca84f6c625c04738896fff"} Dec 11 08:22:12 crc kubenswrapper[4881]: I1211 08:22:12.630446 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-84765f478-g259n" podStartSLOduration=4.630426308 podStartE2EDuration="4.630426308s" podCreationTimestamp="2025-12-11 08:22:08 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 08:22:12.627812695 +0000 UTC m=+381.005181392" watchObservedRunningTime="2025-12-11 08:22:12.630426308 +0000 UTC m=+381.007795025" Dec 11 08:22:13 crc kubenswrapper[4881]: I1211 08:22:13.612291 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-84765f478-g259n" Dec 11 08:22:13 crc kubenswrapper[4881]: I1211 08:22:13.619449 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-84765f478-g259n" Dec 11 08:22:18 crc kubenswrapper[4881]: I1211 08:22:18.264682 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-67h8h"] Dec 11 08:22:18 crc kubenswrapper[4881]: I1211 08:22:18.266022 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66df7c8f76-67h8h" Dec 11 08:22:18 crc kubenswrapper[4881]: I1211 08:22:18.279428 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-67h8h"] Dec 11 08:22:18 crc kubenswrapper[4881]: I1211 08:22:18.363525 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-66df7c8f76-67h8h\" (UID: \"8dc4f2f6-204f-44a0-b7de-9258e15d455d\") " pod="openshift-image-registry/image-registry-66df7c8f76-67h8h" Dec 11 08:22:18 crc kubenswrapper[4881]: I1211 08:22:18.363619 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8dc4f2f6-204f-44a0-b7de-9258e15d455d-registry-tls\") pod \"image-registry-66df7c8f76-67h8h\" (UID: \"8dc4f2f6-204f-44a0-b7de-9258e15d455d\") " pod="openshift-image-registry/image-registry-66df7c8f76-67h8h" Dec 11 08:22:18 crc kubenswrapper[4881]: I1211 08:22:18.363653 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8dc4f2f6-204f-44a0-b7de-9258e15d455d-registry-certificates\") pod \"image-registry-66df7c8f76-67h8h\" (UID: \"8dc4f2f6-204f-44a0-b7de-9258e15d455d\") " pod="openshift-image-registry/image-registry-66df7c8f76-67h8h" Dec 11 08:22:18 crc kubenswrapper[4881]: I1211 08:22:18.363726 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8dc4f2f6-204f-44a0-b7de-9258e15d455d-bound-sa-token\") pod \"image-registry-66df7c8f76-67h8h\" (UID: \"8dc4f2f6-204f-44a0-b7de-9258e15d455d\") " pod="openshift-image-registry/image-registry-66df7c8f76-67h8h" Dec 11 08:22:18 crc kubenswrapper[4881]: I1211 08:22:18.363754 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8dc4f2f6-204f-44a0-b7de-9258e15d455d-trusted-ca\") pod \"image-registry-66df7c8f76-67h8h\" (UID: \"8dc4f2f6-204f-44a0-b7de-9258e15d455d\") " pod="openshift-image-registry/image-registry-66df7c8f76-67h8h" Dec 11 08:22:18 crc kubenswrapper[4881]: I1211 08:22:18.363789 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8dc4f2f6-204f-44a0-b7de-9258e15d455d-installation-pull-secrets\") pod \"image-registry-66df7c8f76-67h8h\" (UID: \"8dc4f2f6-204f-44a0-b7de-9258e15d455d\") " pod="openshift-image-registry/image-registry-66df7c8f76-67h8h" Dec 11 08:22:18 crc kubenswrapper[4881]: I1211 08:22:18.363819 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8dc4f2f6-204f-44a0-b7de-9258e15d455d-ca-trust-extracted\") pod \"image-registry-66df7c8f76-67h8h\" (UID: \"8dc4f2f6-204f-44a0-b7de-9258e15d455d\") " pod="openshift-image-registry/image-registry-66df7c8f76-67h8h" Dec 11 08:22:18 crc kubenswrapper[4881]: I1211 08:22:18.363839 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rdx2p\" (UniqueName: \"kubernetes.io/projected/8dc4f2f6-204f-44a0-b7de-9258e15d455d-kube-api-access-rdx2p\") pod \"image-registry-66df7c8f76-67h8h\" (UID: \"8dc4f2f6-204f-44a0-b7de-9258e15d455d\") " pod="openshift-image-registry/image-registry-66df7c8f76-67h8h" Dec 11 08:22:18 crc kubenswrapper[4881]: I1211 08:22:18.390108 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-66df7c8f76-67h8h\" (UID: \"8dc4f2f6-204f-44a0-b7de-9258e15d455d\") " pod="openshift-image-registry/image-registry-66df7c8f76-67h8h" Dec 11 08:22:18 crc kubenswrapper[4881]: I1211 08:22:18.464761 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8dc4f2f6-204f-44a0-b7de-9258e15d455d-ca-trust-extracted\") pod \"image-registry-66df7c8f76-67h8h\" (UID: \"8dc4f2f6-204f-44a0-b7de-9258e15d455d\") " pod="openshift-image-registry/image-registry-66df7c8f76-67h8h" Dec 11 08:22:18 crc kubenswrapper[4881]: I1211 08:22:18.464857 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rdx2p\" (UniqueName: \"kubernetes.io/projected/8dc4f2f6-204f-44a0-b7de-9258e15d455d-kube-api-access-rdx2p\") pod \"image-registry-66df7c8f76-67h8h\" (UID: \"8dc4f2f6-204f-44a0-b7de-9258e15d455d\") " pod="openshift-image-registry/image-registry-66df7c8f76-67h8h" Dec 11 08:22:18 crc kubenswrapper[4881]: I1211 08:22:18.464998 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8dc4f2f6-204f-44a0-b7de-9258e15d455d-registry-tls\") pod \"image-registry-66df7c8f76-67h8h\" (UID: \"8dc4f2f6-204f-44a0-b7de-9258e15d455d\") " pod="openshift-image-registry/image-registry-66df7c8f76-67h8h" Dec 11 08:22:18 crc kubenswrapper[4881]: I1211 08:22:18.465041 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8dc4f2f6-204f-44a0-b7de-9258e15d455d-registry-certificates\") pod \"image-registry-66df7c8f76-67h8h\" (UID: \"8dc4f2f6-204f-44a0-b7de-9258e15d455d\") " pod="openshift-image-registry/image-registry-66df7c8f76-67h8h" Dec 11 08:22:18 crc kubenswrapper[4881]: I1211 08:22:18.465168 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8dc4f2f6-204f-44a0-b7de-9258e15d455d-bound-sa-token\") pod \"image-registry-66df7c8f76-67h8h\" (UID: \"8dc4f2f6-204f-44a0-b7de-9258e15d455d\") " pod="openshift-image-registry/image-registry-66df7c8f76-67h8h" Dec 11 08:22:18 crc kubenswrapper[4881]: I1211 08:22:18.465223 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8dc4f2f6-204f-44a0-b7de-9258e15d455d-trusted-ca\") pod \"image-registry-66df7c8f76-67h8h\" (UID: \"8dc4f2f6-204f-44a0-b7de-9258e15d455d\") " pod="openshift-image-registry/image-registry-66df7c8f76-67h8h" Dec 11 08:22:18 crc kubenswrapper[4881]: I1211 08:22:18.465287 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8dc4f2f6-204f-44a0-b7de-9258e15d455d-installation-pull-secrets\") pod \"image-registry-66df7c8f76-67h8h\" (UID: \"8dc4f2f6-204f-44a0-b7de-9258e15d455d\") " pod="openshift-image-registry/image-registry-66df7c8f76-67h8h" Dec 11 08:22:18 crc kubenswrapper[4881]: I1211 08:22:18.466222 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8dc4f2f6-204f-44a0-b7de-9258e15d455d-ca-trust-extracted\") pod \"image-registry-66df7c8f76-67h8h\" (UID: \"8dc4f2f6-204f-44a0-b7de-9258e15d455d\") " pod="openshift-image-registry/image-registry-66df7c8f76-67h8h" Dec 11 08:22:18 crc kubenswrapper[4881]: I1211 08:22:18.467687 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8dc4f2f6-204f-44a0-b7de-9258e15d455d-trusted-ca\") pod \"image-registry-66df7c8f76-67h8h\" (UID: \"8dc4f2f6-204f-44a0-b7de-9258e15d455d\") " pod="openshift-image-registry/image-registry-66df7c8f76-67h8h" Dec 11 08:22:18 crc kubenswrapper[4881]: I1211 08:22:18.469230 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8dc4f2f6-204f-44a0-b7de-9258e15d455d-registry-certificates\") pod \"image-registry-66df7c8f76-67h8h\" (UID: \"8dc4f2f6-204f-44a0-b7de-9258e15d455d\") " pod="openshift-image-registry/image-registry-66df7c8f76-67h8h" Dec 11 08:22:18 crc kubenswrapper[4881]: I1211 08:22:18.477883 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8dc4f2f6-204f-44a0-b7de-9258e15d455d-registry-tls\") pod \"image-registry-66df7c8f76-67h8h\" (UID: \"8dc4f2f6-204f-44a0-b7de-9258e15d455d\") " pod="openshift-image-registry/image-registry-66df7c8f76-67h8h" Dec 11 08:22:18 crc kubenswrapper[4881]: I1211 08:22:18.479747 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8dc4f2f6-204f-44a0-b7de-9258e15d455d-installation-pull-secrets\") pod \"image-registry-66df7c8f76-67h8h\" (UID: \"8dc4f2f6-204f-44a0-b7de-9258e15d455d\") " pod="openshift-image-registry/image-registry-66df7c8f76-67h8h" Dec 11 08:22:18 crc kubenswrapper[4881]: I1211 08:22:18.484240 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8dc4f2f6-204f-44a0-b7de-9258e15d455d-bound-sa-token\") pod \"image-registry-66df7c8f76-67h8h\" (UID: \"8dc4f2f6-204f-44a0-b7de-9258e15d455d\") " pod="openshift-image-registry/image-registry-66df7c8f76-67h8h" Dec 11 08:22:18 crc kubenswrapper[4881]: I1211 08:22:18.484680 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rdx2p\" (UniqueName: \"kubernetes.io/projected/8dc4f2f6-204f-44a0-b7de-9258e15d455d-kube-api-access-rdx2p\") pod \"image-registry-66df7c8f76-67h8h\" (UID: \"8dc4f2f6-204f-44a0-b7de-9258e15d455d\") " pod="openshift-image-registry/image-registry-66df7c8f76-67h8h" Dec 11 08:22:18 crc kubenswrapper[4881]: I1211 08:22:18.583555 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66df7c8f76-67h8h" Dec 11 08:22:19 crc kubenswrapper[4881]: I1211 08:22:19.003000 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-67h8h"] Dec 11 08:22:19 crc kubenswrapper[4881]: I1211 08:22:19.644746 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66df7c8f76-67h8h" event={"ID":"8dc4f2f6-204f-44a0-b7de-9258e15d455d","Type":"ContainerStarted","Data":"d752d5d633af23a8c07b80bb08880b40e68e8924381fa56a765dd6060c14df07"} Dec 11 08:22:19 crc kubenswrapper[4881]: I1211 08:22:19.644990 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66df7c8f76-67h8h" event={"ID":"8dc4f2f6-204f-44a0-b7de-9258e15d455d","Type":"ContainerStarted","Data":"3406ff6a877d28f3dc4f33c46aec0ab22c257855e6f3fca33e20aaf11df8f793"} Dec 11 08:22:19 crc kubenswrapper[4881]: I1211 08:22:19.645982 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-image-registry/image-registry-66df7c8f76-67h8h" Dec 11 08:22:21 crc kubenswrapper[4881]: I1211 08:22:21.591077 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/image-registry-66df7c8f76-67h8h" podStartSLOduration=3.591054334 podStartE2EDuration="3.591054334s" podCreationTimestamp="2025-12-11 08:22:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 08:22:19.673223745 +0000 UTC m=+388.050592442" watchObservedRunningTime="2025-12-11 08:22:21.591054334 +0000 UTC m=+389.968423031" Dec 11 08:22:21 crc kubenswrapper[4881]: I1211 08:22:21.596696 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-monitoring/cluster-monitoring-operator-6d5b84845-xzhfx"] Dec 11 08:22:21 crc kubenswrapper[4881]: I1211 08:22:21.598413 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/cluster-monitoring-operator-6d5b84845-xzhfx" Dec 11 08:22:21 crc kubenswrapper[4881]: I1211 08:22:21.603162 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-monitoring"/"kube-root-ca.crt" Dec 11 08:22:21 crc kubenswrapper[4881]: I1211 08:22:21.603295 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-monitoring"/"telemetry-config" Dec 11 08:22:21 crc kubenswrapper[4881]: I1211 08:22:21.603179 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-monitoring"/"openshift-service-ca.crt" Dec 11 08:22:21 crc kubenswrapper[4881]: I1211 08:22:21.603497 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"cluster-monitoring-operator-tls" Dec 11 08:22:21 crc kubenswrapper[4881]: I1211 08:22:21.603570 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"cluster-monitoring-operator-dockercfg-wwt9l" Dec 11 08:22:21 crc kubenswrapper[4881]: I1211 08:22:21.609195 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/cluster-monitoring-operator-6d5b84845-xzhfx"] Dec 11 08:22:21 crc kubenswrapper[4881]: I1211 08:22:21.728224 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l5lns\" (UniqueName: \"kubernetes.io/projected/d841e0c1-830c-42bb-973d-62a30f17b3e8-kube-api-access-l5lns\") pod \"cluster-monitoring-operator-6d5b84845-xzhfx\" (UID: \"d841e0c1-830c-42bb-973d-62a30f17b3e8\") " pod="openshift-monitoring/cluster-monitoring-operator-6d5b84845-xzhfx" Dec 11 08:22:21 crc kubenswrapper[4881]: I1211 08:22:21.728309 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cluster-monitoring-operator-tls\" (UniqueName: \"kubernetes.io/secret/d841e0c1-830c-42bb-973d-62a30f17b3e8-cluster-monitoring-operator-tls\") pod \"cluster-monitoring-operator-6d5b84845-xzhfx\" (UID: \"d841e0c1-830c-42bb-973d-62a30f17b3e8\") " pod="openshift-monitoring/cluster-monitoring-operator-6d5b84845-xzhfx" Dec 11 08:22:21 crc kubenswrapper[4881]: I1211 08:22:21.728463 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"telemetry-config\" (UniqueName: \"kubernetes.io/configmap/d841e0c1-830c-42bb-973d-62a30f17b3e8-telemetry-config\") pod \"cluster-monitoring-operator-6d5b84845-xzhfx\" (UID: \"d841e0c1-830c-42bb-973d-62a30f17b3e8\") " pod="openshift-monitoring/cluster-monitoring-operator-6d5b84845-xzhfx" Dec 11 08:22:21 crc kubenswrapper[4881]: I1211 08:22:21.829442 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l5lns\" (UniqueName: \"kubernetes.io/projected/d841e0c1-830c-42bb-973d-62a30f17b3e8-kube-api-access-l5lns\") pod \"cluster-monitoring-operator-6d5b84845-xzhfx\" (UID: \"d841e0c1-830c-42bb-973d-62a30f17b3e8\") " pod="openshift-monitoring/cluster-monitoring-operator-6d5b84845-xzhfx" Dec 11 08:22:21 crc kubenswrapper[4881]: I1211 08:22:21.829531 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cluster-monitoring-operator-tls\" (UniqueName: \"kubernetes.io/secret/d841e0c1-830c-42bb-973d-62a30f17b3e8-cluster-monitoring-operator-tls\") pod \"cluster-monitoring-operator-6d5b84845-xzhfx\" (UID: \"d841e0c1-830c-42bb-973d-62a30f17b3e8\") " pod="openshift-monitoring/cluster-monitoring-operator-6d5b84845-xzhfx" Dec 11 08:22:21 crc kubenswrapper[4881]: I1211 08:22:21.829628 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"telemetry-config\" (UniqueName: \"kubernetes.io/configmap/d841e0c1-830c-42bb-973d-62a30f17b3e8-telemetry-config\") pod \"cluster-monitoring-operator-6d5b84845-xzhfx\" (UID: \"d841e0c1-830c-42bb-973d-62a30f17b3e8\") " pod="openshift-monitoring/cluster-monitoring-operator-6d5b84845-xzhfx" Dec 11 08:22:21 crc kubenswrapper[4881]: I1211 08:22:21.831285 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"telemetry-config\" (UniqueName: \"kubernetes.io/configmap/d841e0c1-830c-42bb-973d-62a30f17b3e8-telemetry-config\") pod \"cluster-monitoring-operator-6d5b84845-xzhfx\" (UID: \"d841e0c1-830c-42bb-973d-62a30f17b3e8\") " pod="openshift-monitoring/cluster-monitoring-operator-6d5b84845-xzhfx" Dec 11 08:22:21 crc kubenswrapper[4881]: I1211 08:22:21.845737 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cluster-monitoring-operator-tls\" (UniqueName: \"kubernetes.io/secret/d841e0c1-830c-42bb-973d-62a30f17b3e8-cluster-monitoring-operator-tls\") pod \"cluster-monitoring-operator-6d5b84845-xzhfx\" (UID: \"d841e0c1-830c-42bb-973d-62a30f17b3e8\") " pod="openshift-monitoring/cluster-monitoring-operator-6d5b84845-xzhfx" Dec 11 08:22:21 crc kubenswrapper[4881]: I1211 08:22:21.848774 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l5lns\" (UniqueName: \"kubernetes.io/projected/d841e0c1-830c-42bb-973d-62a30f17b3e8-kube-api-access-l5lns\") pod \"cluster-monitoring-operator-6d5b84845-xzhfx\" (UID: \"d841e0c1-830c-42bb-973d-62a30f17b3e8\") " pod="openshift-monitoring/cluster-monitoring-operator-6d5b84845-xzhfx" Dec 11 08:22:21 crc kubenswrapper[4881]: I1211 08:22:21.927540 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/cluster-monitoring-operator-6d5b84845-xzhfx" Dec 11 08:22:22 crc kubenswrapper[4881]: I1211 08:22:22.321163 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/cluster-monitoring-operator-6d5b84845-xzhfx"] Dec 11 08:22:22 crc kubenswrapper[4881]: W1211 08:22:22.324209 4881 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd841e0c1_830c_42bb_973d_62a30f17b3e8.slice/crio-53d5b938e7bcc8b3111691388d7c8b05237b2cbf794680f8117c552ae3b409df WatchSource:0}: Error finding container 53d5b938e7bcc8b3111691388d7c8b05237b2cbf794680f8117c552ae3b409df: Status 404 returned error can't find the container with id 53d5b938e7bcc8b3111691388d7c8b05237b2cbf794680f8117c552ae3b409df Dec 11 08:22:22 crc kubenswrapper[4881]: I1211 08:22:22.662527 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/cluster-monitoring-operator-6d5b84845-xzhfx" event={"ID":"d841e0c1-830c-42bb-973d-62a30f17b3e8","Type":"ContainerStarted","Data":"53d5b938e7bcc8b3111691388d7c8b05237b2cbf794680f8117c552ae3b409df"} Dec 11 08:22:25 crc kubenswrapper[4881]: I1211 08:22:25.185272 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-b52lg"] Dec 11 08:22:25 crc kubenswrapper[4881]: I1211 08:22:25.186145 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-b52lg" Dec 11 08:22:25 crc kubenswrapper[4881]: I1211 08:22:25.188671 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-operator-admission-webhook-tls" Dec 11 08:22:25 crc kubenswrapper[4881]: I1211 08:22:25.188848 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-operator-admission-webhook-dockercfg-mkjwq" Dec 11 08:22:25 crc kubenswrapper[4881]: I1211 08:22:25.194363 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-b52lg"] Dec 11 08:22:25 crc kubenswrapper[4881]: I1211 08:22:25.281288 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-certificates\" (UniqueName: \"kubernetes.io/secret/cc8120de-b56f-481f-9ac5-19235df13216-tls-certificates\") pod \"prometheus-operator-admission-webhook-f54c54754-b52lg\" (UID: \"cc8120de-b56f-481f-9ac5-19235df13216\") " pod="openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-b52lg" Dec 11 08:22:25 crc kubenswrapper[4881]: I1211 08:22:25.383026 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-certificates\" (UniqueName: \"kubernetes.io/secret/cc8120de-b56f-481f-9ac5-19235df13216-tls-certificates\") pod \"prometheus-operator-admission-webhook-f54c54754-b52lg\" (UID: \"cc8120de-b56f-481f-9ac5-19235df13216\") " pod="openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-b52lg" Dec 11 08:22:25 crc kubenswrapper[4881]: E1211 08:22:25.383205 4881 secret.go:188] Couldn't get secret openshift-monitoring/prometheus-operator-admission-webhook-tls: secret "prometheus-operator-admission-webhook-tls" not found Dec 11 08:22:25 crc kubenswrapper[4881]: E1211 08:22:25.383488 4881 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/cc8120de-b56f-481f-9ac5-19235df13216-tls-certificates podName:cc8120de-b56f-481f-9ac5-19235df13216 nodeName:}" failed. No retries permitted until 2025-12-11 08:22:25.883424939 +0000 UTC m=+394.260793636 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "tls-certificates" (UniqueName: "kubernetes.io/secret/cc8120de-b56f-481f-9ac5-19235df13216-tls-certificates") pod "prometheus-operator-admission-webhook-f54c54754-b52lg" (UID: "cc8120de-b56f-481f-9ac5-19235df13216") : secret "prometheus-operator-admission-webhook-tls" not found Dec 11 08:22:25 crc kubenswrapper[4881]: I1211 08:22:25.685357 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/cluster-monitoring-operator-6d5b84845-xzhfx" event={"ID":"d841e0c1-830c-42bb-973d-62a30f17b3e8","Type":"ContainerStarted","Data":"1bda4d150972e655a83b2532b08f2a5cd10523387d1a1447fef9aaad8ac94c7b"} Dec 11 08:22:25 crc kubenswrapper[4881]: I1211 08:22:25.698914 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-monitoring/cluster-monitoring-operator-6d5b84845-xzhfx" podStartSLOduration=2.4563951680000002 podStartE2EDuration="4.698895283s" podCreationTimestamp="2025-12-11 08:22:21 +0000 UTC" firstStartedPulling="2025-12-11 08:22:22.326698627 +0000 UTC m=+390.704067324" lastFinishedPulling="2025-12-11 08:22:24.569198742 +0000 UTC m=+392.946567439" observedRunningTime="2025-12-11 08:22:25.696799882 +0000 UTC m=+394.074168579" watchObservedRunningTime="2025-12-11 08:22:25.698895283 +0000 UTC m=+394.076263980" Dec 11 08:22:25 crc kubenswrapper[4881]: I1211 08:22:25.889236 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-certificates\" (UniqueName: \"kubernetes.io/secret/cc8120de-b56f-481f-9ac5-19235df13216-tls-certificates\") pod \"prometheus-operator-admission-webhook-f54c54754-b52lg\" (UID: \"cc8120de-b56f-481f-9ac5-19235df13216\") " pod="openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-b52lg" Dec 11 08:22:25 crc kubenswrapper[4881]: E1211 08:22:25.889501 4881 secret.go:188] Couldn't get secret openshift-monitoring/prometheus-operator-admission-webhook-tls: secret "prometheus-operator-admission-webhook-tls" not found Dec 11 08:22:25 crc kubenswrapper[4881]: E1211 08:22:25.889600 4881 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/cc8120de-b56f-481f-9ac5-19235df13216-tls-certificates podName:cc8120de-b56f-481f-9ac5-19235df13216 nodeName:}" failed. No retries permitted until 2025-12-11 08:22:26.889577871 +0000 UTC m=+395.266946578 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "tls-certificates" (UniqueName: "kubernetes.io/secret/cc8120de-b56f-481f-9ac5-19235df13216-tls-certificates") pod "prometheus-operator-admission-webhook-f54c54754-b52lg" (UID: "cc8120de-b56f-481f-9ac5-19235df13216") : secret "prometheus-operator-admission-webhook-tls" not found Dec 11 08:22:26 crc kubenswrapper[4881]: I1211 08:22:26.092184 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-84765f478-g259n"] Dec 11 08:22:26 crc kubenswrapper[4881]: I1211 08:22:26.092543 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-84765f478-g259n" podUID="08540012-842b-42a3-983b-231d761c3a80" containerName="controller-manager" containerID="cri-o://f2d677d8bedb5b037c9674e9def057c2493247e6ce4e804990a563edc02e3248" gracePeriod=30 Dec 11 08:22:26 crc kubenswrapper[4881]: I1211 08:22:26.656119 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-84765f478-g259n" Dec 11 08:22:26 crc kubenswrapper[4881]: I1211 08:22:26.690457 4881 generic.go:334] "Generic (PLEG): container finished" podID="08540012-842b-42a3-983b-231d761c3a80" containerID="f2d677d8bedb5b037c9674e9def057c2493247e6ce4e804990a563edc02e3248" exitCode=0 Dec 11 08:22:26 crc kubenswrapper[4881]: I1211 08:22:26.690522 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-84765f478-g259n" Dec 11 08:22:26 crc kubenswrapper[4881]: I1211 08:22:26.690566 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-84765f478-g259n" event={"ID":"08540012-842b-42a3-983b-231d761c3a80","Type":"ContainerDied","Data":"f2d677d8bedb5b037c9674e9def057c2493247e6ce4e804990a563edc02e3248"} Dec 11 08:22:26 crc kubenswrapper[4881]: I1211 08:22:26.690626 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-84765f478-g259n" event={"ID":"08540012-842b-42a3-983b-231d761c3a80","Type":"ContainerDied","Data":"05f4b2721175802342a73ceb1a837b2fca3c0a6f11ca84f6c625c04738896fff"} Dec 11 08:22:26 crc kubenswrapper[4881]: I1211 08:22:26.690648 4881 scope.go:117] "RemoveContainer" containerID="f2d677d8bedb5b037c9674e9def057c2493247e6ce4e804990a563edc02e3248" Dec 11 08:22:26 crc kubenswrapper[4881]: I1211 08:22:26.706857 4881 scope.go:117] "RemoveContainer" containerID="f2d677d8bedb5b037c9674e9def057c2493247e6ce4e804990a563edc02e3248" Dec 11 08:22:26 crc kubenswrapper[4881]: E1211 08:22:26.709313 4881 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f2d677d8bedb5b037c9674e9def057c2493247e6ce4e804990a563edc02e3248\": container with ID starting with f2d677d8bedb5b037c9674e9def057c2493247e6ce4e804990a563edc02e3248 not found: ID does not exist" containerID="f2d677d8bedb5b037c9674e9def057c2493247e6ce4e804990a563edc02e3248" Dec 11 08:22:26 crc kubenswrapper[4881]: I1211 08:22:26.709371 4881 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f2d677d8bedb5b037c9674e9def057c2493247e6ce4e804990a563edc02e3248"} err="failed to get container status \"f2d677d8bedb5b037c9674e9def057c2493247e6ce4e804990a563edc02e3248\": rpc error: code = NotFound desc = could not find container \"f2d677d8bedb5b037c9674e9def057c2493247e6ce4e804990a563edc02e3248\": container with ID starting with f2d677d8bedb5b037c9674e9def057c2493247e6ce4e804990a563edc02e3248 not found: ID does not exist" Dec 11 08:22:26 crc kubenswrapper[4881]: I1211 08:22:26.802541 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/08540012-842b-42a3-983b-231d761c3a80-config\") pod \"08540012-842b-42a3-983b-231d761c3a80\" (UID: \"08540012-842b-42a3-983b-231d761c3a80\") " Dec 11 08:22:26 crc kubenswrapper[4881]: I1211 08:22:26.802604 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/08540012-842b-42a3-983b-231d761c3a80-proxy-ca-bundles\") pod \"08540012-842b-42a3-983b-231d761c3a80\" (UID: \"08540012-842b-42a3-983b-231d761c3a80\") " Dec 11 08:22:26 crc kubenswrapper[4881]: I1211 08:22:26.802687 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dff9q\" (UniqueName: \"kubernetes.io/projected/08540012-842b-42a3-983b-231d761c3a80-kube-api-access-dff9q\") pod \"08540012-842b-42a3-983b-231d761c3a80\" (UID: \"08540012-842b-42a3-983b-231d761c3a80\") " Dec 11 08:22:26 crc kubenswrapper[4881]: I1211 08:22:26.802743 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/08540012-842b-42a3-983b-231d761c3a80-serving-cert\") pod \"08540012-842b-42a3-983b-231d761c3a80\" (UID: \"08540012-842b-42a3-983b-231d761c3a80\") " Dec 11 08:22:26 crc kubenswrapper[4881]: I1211 08:22:26.802769 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/08540012-842b-42a3-983b-231d761c3a80-client-ca\") pod \"08540012-842b-42a3-983b-231d761c3a80\" (UID: \"08540012-842b-42a3-983b-231d761c3a80\") " Dec 11 08:22:26 crc kubenswrapper[4881]: I1211 08:22:26.803816 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/08540012-842b-42a3-983b-231d761c3a80-client-ca" (OuterVolumeSpecName: "client-ca") pod "08540012-842b-42a3-983b-231d761c3a80" (UID: "08540012-842b-42a3-983b-231d761c3a80"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:22:26 crc kubenswrapper[4881]: I1211 08:22:26.804046 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/08540012-842b-42a3-983b-231d761c3a80-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "08540012-842b-42a3-983b-231d761c3a80" (UID: "08540012-842b-42a3-983b-231d761c3a80"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:22:26 crc kubenswrapper[4881]: I1211 08:22:26.804359 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/08540012-842b-42a3-983b-231d761c3a80-config" (OuterVolumeSpecName: "config") pod "08540012-842b-42a3-983b-231d761c3a80" (UID: "08540012-842b-42a3-983b-231d761c3a80"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:22:26 crc kubenswrapper[4881]: I1211 08:22:26.808578 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/08540012-842b-42a3-983b-231d761c3a80-kube-api-access-dff9q" (OuterVolumeSpecName: "kube-api-access-dff9q") pod "08540012-842b-42a3-983b-231d761c3a80" (UID: "08540012-842b-42a3-983b-231d761c3a80"). InnerVolumeSpecName "kube-api-access-dff9q". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:22:26 crc kubenswrapper[4881]: I1211 08:22:26.808831 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/08540012-842b-42a3-983b-231d761c3a80-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "08540012-842b-42a3-983b-231d761c3a80" (UID: "08540012-842b-42a3-983b-231d761c3a80"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:22:26 crc kubenswrapper[4881]: I1211 08:22:26.904322 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-certificates\" (UniqueName: \"kubernetes.io/secret/cc8120de-b56f-481f-9ac5-19235df13216-tls-certificates\") pod \"prometheus-operator-admission-webhook-f54c54754-b52lg\" (UID: \"cc8120de-b56f-481f-9ac5-19235df13216\") " pod="openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-b52lg" Dec 11 08:22:26 crc kubenswrapper[4881]: I1211 08:22:26.904464 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dff9q\" (UniqueName: \"kubernetes.io/projected/08540012-842b-42a3-983b-231d761c3a80-kube-api-access-dff9q\") on node \"crc\" DevicePath \"\"" Dec 11 08:22:26 crc kubenswrapper[4881]: I1211 08:22:26.904476 4881 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/08540012-842b-42a3-983b-231d761c3a80-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 11 08:22:26 crc kubenswrapper[4881]: I1211 08:22:26.904488 4881 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/08540012-842b-42a3-983b-231d761c3a80-client-ca\") on node \"crc\" DevicePath \"\"" Dec 11 08:22:26 crc kubenswrapper[4881]: E1211 08:22:26.904493 4881 secret.go:188] Couldn't get secret openshift-monitoring/prometheus-operator-admission-webhook-tls: secret "prometheus-operator-admission-webhook-tls" not found Dec 11 08:22:26 crc kubenswrapper[4881]: E1211 08:22:26.904551 4881 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/cc8120de-b56f-481f-9ac5-19235df13216-tls-certificates podName:cc8120de-b56f-481f-9ac5-19235df13216 nodeName:}" failed. No retries permitted until 2025-12-11 08:22:28.904535981 +0000 UTC m=+397.281904678 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "tls-certificates" (UniqueName: "kubernetes.io/secret/cc8120de-b56f-481f-9ac5-19235df13216-tls-certificates") pod "prometheus-operator-admission-webhook-f54c54754-b52lg" (UID: "cc8120de-b56f-481f-9ac5-19235df13216") : secret "prometheus-operator-admission-webhook-tls" not found Dec 11 08:22:26 crc kubenswrapper[4881]: I1211 08:22:26.904498 4881 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/08540012-842b-42a3-983b-231d761c3a80-config\") on node \"crc\" DevicePath \"\"" Dec 11 08:22:26 crc kubenswrapper[4881]: I1211 08:22:26.904618 4881 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/08540012-842b-42a3-983b-231d761c3a80-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Dec 11 08:22:27 crc kubenswrapper[4881]: I1211 08:22:27.048788 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-84765f478-g259n"] Dec 11 08:22:27 crc kubenswrapper[4881]: I1211 08:22:27.057180 4881 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-84765f478-g259n"] Dec 11 08:22:27 crc kubenswrapper[4881]: I1211 08:22:27.728968 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-6d79d95bbf-rpz78"] Dec 11 08:22:27 crc kubenswrapper[4881]: E1211 08:22:27.729153 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="08540012-842b-42a3-983b-231d761c3a80" containerName="controller-manager" Dec 11 08:22:27 crc kubenswrapper[4881]: I1211 08:22:27.729164 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="08540012-842b-42a3-983b-231d761c3a80" containerName="controller-manager" Dec 11 08:22:27 crc kubenswrapper[4881]: I1211 08:22:27.729261 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="08540012-842b-42a3-983b-231d761c3a80" containerName="controller-manager" Dec 11 08:22:27 crc kubenswrapper[4881]: I1211 08:22:27.729600 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6d79d95bbf-rpz78" Dec 11 08:22:27 crc kubenswrapper[4881]: I1211 08:22:27.733267 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Dec 11 08:22:27 crc kubenswrapper[4881]: I1211 08:22:27.733607 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Dec 11 08:22:27 crc kubenswrapper[4881]: I1211 08:22:27.734547 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Dec 11 08:22:27 crc kubenswrapper[4881]: I1211 08:22:27.734719 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Dec 11 08:22:27 crc kubenswrapper[4881]: I1211 08:22:27.735470 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Dec 11 08:22:27 crc kubenswrapper[4881]: I1211 08:22:27.736138 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Dec 11 08:22:27 crc kubenswrapper[4881]: I1211 08:22:27.750013 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Dec 11 08:22:27 crc kubenswrapper[4881]: I1211 08:22:27.755087 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-6d79d95bbf-rpz78"] Dec 11 08:22:27 crc kubenswrapper[4881]: I1211 08:22:27.816718 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/e5cf2194-857a-4f41-a925-3ee960c29134-proxy-ca-bundles\") pod \"controller-manager-6d79d95bbf-rpz78\" (UID: \"e5cf2194-857a-4f41-a925-3ee960c29134\") " pod="openshift-controller-manager/controller-manager-6d79d95bbf-rpz78" Dec 11 08:22:27 crc kubenswrapper[4881]: I1211 08:22:27.816766 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e5cf2194-857a-4f41-a925-3ee960c29134-serving-cert\") pod \"controller-manager-6d79d95bbf-rpz78\" (UID: \"e5cf2194-857a-4f41-a925-3ee960c29134\") " pod="openshift-controller-manager/controller-manager-6d79d95bbf-rpz78" Dec 11 08:22:27 crc kubenswrapper[4881]: I1211 08:22:27.816788 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/e5cf2194-857a-4f41-a925-3ee960c29134-client-ca\") pod \"controller-manager-6d79d95bbf-rpz78\" (UID: \"e5cf2194-857a-4f41-a925-3ee960c29134\") " pod="openshift-controller-manager/controller-manager-6d79d95bbf-rpz78" Dec 11 08:22:27 crc kubenswrapper[4881]: I1211 08:22:27.816807 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e5cf2194-857a-4f41-a925-3ee960c29134-config\") pod \"controller-manager-6d79d95bbf-rpz78\" (UID: \"e5cf2194-857a-4f41-a925-3ee960c29134\") " pod="openshift-controller-manager/controller-manager-6d79d95bbf-rpz78" Dec 11 08:22:27 crc kubenswrapper[4881]: I1211 08:22:27.817041 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nrh4g\" (UniqueName: \"kubernetes.io/projected/e5cf2194-857a-4f41-a925-3ee960c29134-kube-api-access-nrh4g\") pod \"controller-manager-6d79d95bbf-rpz78\" (UID: \"e5cf2194-857a-4f41-a925-3ee960c29134\") " pod="openshift-controller-manager/controller-manager-6d79d95bbf-rpz78" Dec 11 08:22:27 crc kubenswrapper[4881]: I1211 08:22:27.918527 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nrh4g\" (UniqueName: \"kubernetes.io/projected/e5cf2194-857a-4f41-a925-3ee960c29134-kube-api-access-nrh4g\") pod \"controller-manager-6d79d95bbf-rpz78\" (UID: \"e5cf2194-857a-4f41-a925-3ee960c29134\") " pod="openshift-controller-manager/controller-manager-6d79d95bbf-rpz78" Dec 11 08:22:27 crc kubenswrapper[4881]: I1211 08:22:27.918606 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/e5cf2194-857a-4f41-a925-3ee960c29134-proxy-ca-bundles\") pod \"controller-manager-6d79d95bbf-rpz78\" (UID: \"e5cf2194-857a-4f41-a925-3ee960c29134\") " pod="openshift-controller-manager/controller-manager-6d79d95bbf-rpz78" Dec 11 08:22:27 crc kubenswrapper[4881]: I1211 08:22:27.918626 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e5cf2194-857a-4f41-a925-3ee960c29134-serving-cert\") pod \"controller-manager-6d79d95bbf-rpz78\" (UID: \"e5cf2194-857a-4f41-a925-3ee960c29134\") " pod="openshift-controller-manager/controller-manager-6d79d95bbf-rpz78" Dec 11 08:22:27 crc kubenswrapper[4881]: I1211 08:22:27.918653 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/e5cf2194-857a-4f41-a925-3ee960c29134-client-ca\") pod \"controller-manager-6d79d95bbf-rpz78\" (UID: \"e5cf2194-857a-4f41-a925-3ee960c29134\") " pod="openshift-controller-manager/controller-manager-6d79d95bbf-rpz78" Dec 11 08:22:27 crc kubenswrapper[4881]: I1211 08:22:27.918685 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e5cf2194-857a-4f41-a925-3ee960c29134-config\") pod \"controller-manager-6d79d95bbf-rpz78\" (UID: \"e5cf2194-857a-4f41-a925-3ee960c29134\") " pod="openshift-controller-manager/controller-manager-6d79d95bbf-rpz78" Dec 11 08:22:27 crc kubenswrapper[4881]: I1211 08:22:27.919802 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/e5cf2194-857a-4f41-a925-3ee960c29134-client-ca\") pod \"controller-manager-6d79d95bbf-rpz78\" (UID: \"e5cf2194-857a-4f41-a925-3ee960c29134\") " pod="openshift-controller-manager/controller-manager-6d79d95bbf-rpz78" Dec 11 08:22:27 crc kubenswrapper[4881]: I1211 08:22:27.919829 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/e5cf2194-857a-4f41-a925-3ee960c29134-proxy-ca-bundles\") pod \"controller-manager-6d79d95bbf-rpz78\" (UID: \"e5cf2194-857a-4f41-a925-3ee960c29134\") " pod="openshift-controller-manager/controller-manager-6d79d95bbf-rpz78" Dec 11 08:22:27 crc kubenswrapper[4881]: I1211 08:22:27.920031 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e5cf2194-857a-4f41-a925-3ee960c29134-config\") pod \"controller-manager-6d79d95bbf-rpz78\" (UID: \"e5cf2194-857a-4f41-a925-3ee960c29134\") " pod="openshift-controller-manager/controller-manager-6d79d95bbf-rpz78" Dec 11 08:22:27 crc kubenswrapper[4881]: I1211 08:22:27.923920 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e5cf2194-857a-4f41-a925-3ee960c29134-serving-cert\") pod \"controller-manager-6d79d95bbf-rpz78\" (UID: \"e5cf2194-857a-4f41-a925-3ee960c29134\") " pod="openshift-controller-manager/controller-manager-6d79d95bbf-rpz78" Dec 11 08:22:27 crc kubenswrapper[4881]: I1211 08:22:27.941112 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nrh4g\" (UniqueName: \"kubernetes.io/projected/e5cf2194-857a-4f41-a925-3ee960c29134-kube-api-access-nrh4g\") pod \"controller-manager-6d79d95bbf-rpz78\" (UID: \"e5cf2194-857a-4f41-a925-3ee960c29134\") " pod="openshift-controller-manager/controller-manager-6d79d95bbf-rpz78" Dec 11 08:22:28 crc kubenswrapper[4881]: I1211 08:22:28.043251 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6d79d95bbf-rpz78" Dec 11 08:22:28 crc kubenswrapper[4881]: I1211 08:22:28.236280 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-6d79d95bbf-rpz78"] Dec 11 08:22:28 crc kubenswrapper[4881]: W1211 08:22:28.242063 4881 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pode5cf2194_857a_4f41_a925_3ee960c29134.slice/crio-cd07a6675b46b1dc3d78b045e0f17a236483aa543ca41c57c163a7c4827312de WatchSource:0}: Error finding container cd07a6675b46b1dc3d78b045e0f17a236483aa543ca41c57c163a7c4827312de: Status 404 returned error can't find the container with id cd07a6675b46b1dc3d78b045e0f17a236483aa543ca41c57c163a7c4827312de Dec 11 08:22:28 crc kubenswrapper[4881]: I1211 08:22:28.701792 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-6d79d95bbf-rpz78" event={"ID":"e5cf2194-857a-4f41-a925-3ee960c29134","Type":"ContainerStarted","Data":"8d65442ec0767379cdcb84dc5d910c4bd47eb8f9552921190d510eb215a1cc7a"} Dec 11 08:22:28 crc kubenswrapper[4881]: I1211 08:22:28.702307 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-6d79d95bbf-rpz78" event={"ID":"e5cf2194-857a-4f41-a925-3ee960c29134","Type":"ContainerStarted","Data":"cd07a6675b46b1dc3d78b045e0f17a236483aa543ca41c57c163a7c4827312de"} Dec 11 08:22:28 crc kubenswrapper[4881]: I1211 08:22:28.702436 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-6d79d95bbf-rpz78" Dec 11 08:22:28 crc kubenswrapper[4881]: I1211 08:22:28.706308 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-6d79d95bbf-rpz78" Dec 11 08:22:28 crc kubenswrapper[4881]: I1211 08:22:28.728773 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-6d79d95bbf-rpz78" podStartSLOduration=2.728736186 podStartE2EDuration="2.728736186s" podCreationTimestamp="2025-12-11 08:22:26 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 08:22:28.724204906 +0000 UTC m=+397.101573603" watchObservedRunningTime="2025-12-11 08:22:28.728736186 +0000 UTC m=+397.106104893" Dec 11 08:22:28 crc kubenswrapper[4881]: I1211 08:22:28.931546 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-certificates\" (UniqueName: \"kubernetes.io/secret/cc8120de-b56f-481f-9ac5-19235df13216-tls-certificates\") pod \"prometheus-operator-admission-webhook-f54c54754-b52lg\" (UID: \"cc8120de-b56f-481f-9ac5-19235df13216\") " pod="openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-b52lg" Dec 11 08:22:28 crc kubenswrapper[4881]: E1211 08:22:28.931708 4881 secret.go:188] Couldn't get secret openshift-monitoring/prometheus-operator-admission-webhook-tls: secret "prometheus-operator-admission-webhook-tls" not found Dec 11 08:22:28 crc kubenswrapper[4881]: E1211 08:22:28.931762 4881 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/cc8120de-b56f-481f-9ac5-19235df13216-tls-certificates podName:cc8120de-b56f-481f-9ac5-19235df13216 nodeName:}" failed. No retries permitted until 2025-12-11 08:22:32.931747974 +0000 UTC m=+401.309116671 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "tls-certificates" (UniqueName: "kubernetes.io/secret/cc8120de-b56f-481f-9ac5-19235df13216-tls-certificates") pod "prometheus-operator-admission-webhook-f54c54754-b52lg" (UID: "cc8120de-b56f-481f-9ac5-19235df13216") : secret "prometheus-operator-admission-webhook-tls" not found Dec 11 08:22:29 crc kubenswrapper[4881]: I1211 08:22:29.011964 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="08540012-842b-42a3-983b-231d761c3a80" path="/var/lib/kubelet/pods/08540012-842b-42a3-983b-231d761c3a80/volumes" Dec 11 08:22:29 crc kubenswrapper[4881]: I1211 08:22:29.396719 4881 patch_prober.go:28] interesting pod/machine-config-daemon-z9nnh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 11 08:22:29 crc kubenswrapper[4881]: I1211 08:22:29.396800 4881 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 11 08:22:30 crc kubenswrapper[4881]: I1211 08:22:30.055526 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 11 08:22:30 crc kubenswrapper[4881]: I1211 08:22:30.055836 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 11 08:22:30 crc kubenswrapper[4881]: I1211 08:22:30.056603 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 11 08:22:30 crc kubenswrapper[4881]: I1211 08:22:30.063839 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 11 08:22:30 crc kubenswrapper[4881]: I1211 08:22:30.105567 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 11 08:22:30 crc kubenswrapper[4881]: W1211 08:22:30.570721 4881 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5fe485a1_e14f_4c09_b5b9_f252bc42b7e8.slice/crio-da9ebab79e62efb7b9cdd29d1104fd3321d45bdd7280ab4bda2b8b8d71267d23 WatchSource:0}: Error finding container da9ebab79e62efb7b9cdd29d1104fd3321d45bdd7280ab4bda2b8b8d71267d23: Status 404 returned error can't find the container with id da9ebab79e62efb7b9cdd29d1104fd3321d45bdd7280ab4bda2b8b8d71267d23 Dec 11 08:22:30 crc kubenswrapper[4881]: I1211 08:22:30.719147 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" event={"ID":"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8","Type":"ContainerStarted","Data":"da9ebab79e62efb7b9cdd29d1104fd3321d45bdd7280ab4bda2b8b8d71267d23"} Dec 11 08:22:31 crc kubenswrapper[4881]: I1211 08:22:31.727141 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" event={"ID":"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8","Type":"ContainerStarted","Data":"42fa8e50316ab35b223160c84f99ece46d711218a7a63f5cbb09042bc0eb606f"} Dec 11 08:22:32 crc kubenswrapper[4881]: I1211 08:22:32.998216 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-certificates\" (UniqueName: \"kubernetes.io/secret/cc8120de-b56f-481f-9ac5-19235df13216-tls-certificates\") pod \"prometheus-operator-admission-webhook-f54c54754-b52lg\" (UID: \"cc8120de-b56f-481f-9ac5-19235df13216\") " pod="openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-b52lg" Dec 11 08:22:32 crc kubenswrapper[4881]: E1211 08:22:32.998460 4881 secret.go:188] Couldn't get secret openshift-monitoring/prometheus-operator-admission-webhook-tls: secret "prometheus-operator-admission-webhook-tls" not found Dec 11 08:22:32 crc kubenswrapper[4881]: E1211 08:22:32.998786 4881 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/cc8120de-b56f-481f-9ac5-19235df13216-tls-certificates podName:cc8120de-b56f-481f-9ac5-19235df13216 nodeName:}" failed. No retries permitted until 2025-12-11 08:22:40.998758308 +0000 UTC m=+409.376127015 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "tls-certificates" (UniqueName: "kubernetes.io/secret/cc8120de-b56f-481f-9ac5-19235df13216-tls-certificates") pod "prometheus-operator-admission-webhook-f54c54754-b52lg" (UID: "cc8120de-b56f-481f-9ac5-19235df13216") : secret "prometheus-operator-admission-webhook-tls" not found Dec 11 08:22:38 crc kubenswrapper[4881]: I1211 08:22:38.588727 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-66df7c8f76-67h8h" Dec 11 08:22:38 crc kubenswrapper[4881]: I1211 08:22:38.638111 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-zn9v8"] Dec 11 08:22:41 crc kubenswrapper[4881]: I1211 08:22:41.012018 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-certificates\" (UniqueName: \"kubernetes.io/secret/cc8120de-b56f-481f-9ac5-19235df13216-tls-certificates\") pod \"prometheus-operator-admission-webhook-f54c54754-b52lg\" (UID: \"cc8120de-b56f-481f-9ac5-19235df13216\") " pod="openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-b52lg" Dec 11 08:22:41 crc kubenswrapper[4881]: E1211 08:22:41.012207 4881 secret.go:188] Couldn't get secret openshift-monitoring/prometheus-operator-admission-webhook-tls: secret "prometheus-operator-admission-webhook-tls" not found Dec 11 08:22:41 crc kubenswrapper[4881]: E1211 08:22:41.012532 4881 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/cc8120de-b56f-481f-9ac5-19235df13216-tls-certificates podName:cc8120de-b56f-481f-9ac5-19235df13216 nodeName:}" failed. No retries permitted until 2025-12-11 08:22:57.012502828 +0000 UTC m=+425.389871525 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "tls-certificates" (UniqueName: "kubernetes.io/secret/cc8120de-b56f-481f-9ac5-19235df13216-tls-certificates") pod "prometheus-operator-admission-webhook-f54c54754-b52lg" (UID: "cc8120de-b56f-481f-9ac5-19235df13216") : secret "prometheus-operator-admission-webhook-tls" not found Dec 11 08:22:46 crc kubenswrapper[4881]: I1211 08:22:46.103775 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-69c7cb958d-ddc8x"] Dec 11 08:22:46 crc kubenswrapper[4881]: I1211 08:22:46.104250 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-69c7cb958d-ddc8x" podUID="5dcb65e3-74f7-437f-b581-31b1b65af163" containerName="route-controller-manager" containerID="cri-o://506c6d28b935d934cec5a02fec1ef31376d51c4e1f423fa4cbef82200f1dbeff" gracePeriod=30 Dec 11 08:22:46 crc kubenswrapper[4881]: I1211 08:22:46.809689 4881 generic.go:334] "Generic (PLEG): container finished" podID="5dcb65e3-74f7-437f-b581-31b1b65af163" containerID="506c6d28b935d934cec5a02fec1ef31376d51c4e1f423fa4cbef82200f1dbeff" exitCode=0 Dec 11 08:22:46 crc kubenswrapper[4881]: I1211 08:22:46.809960 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-69c7cb958d-ddc8x" event={"ID":"5dcb65e3-74f7-437f-b581-31b1b65af163","Type":"ContainerDied","Data":"506c6d28b935d934cec5a02fec1ef31376d51c4e1f423fa4cbef82200f1dbeff"} Dec 11 08:22:47 crc kubenswrapper[4881]: I1211 08:22:47.107633 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-69c7cb958d-ddc8x" Dec 11 08:22:47 crc kubenswrapper[4881]: I1211 08:22:47.137256 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-95479fb4c-24hkl"] Dec 11 08:22:47 crc kubenswrapper[4881]: E1211 08:22:47.137529 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5dcb65e3-74f7-437f-b581-31b1b65af163" containerName="route-controller-manager" Dec 11 08:22:47 crc kubenswrapper[4881]: I1211 08:22:47.137549 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="5dcb65e3-74f7-437f-b581-31b1b65af163" containerName="route-controller-manager" Dec 11 08:22:47 crc kubenswrapper[4881]: I1211 08:22:47.137673 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="5dcb65e3-74f7-437f-b581-31b1b65af163" containerName="route-controller-manager" Dec 11 08:22:47 crc kubenswrapper[4881]: I1211 08:22:47.138125 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-95479fb4c-24hkl" Dec 11 08:22:47 crc kubenswrapper[4881]: I1211 08:22:47.150967 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-95479fb4c-24hkl"] Dec 11 08:22:47 crc kubenswrapper[4881]: I1211 08:22:47.195361 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5dcb65e3-74f7-437f-b581-31b1b65af163-serving-cert\") pod \"5dcb65e3-74f7-437f-b581-31b1b65af163\" (UID: \"5dcb65e3-74f7-437f-b581-31b1b65af163\") " Dec 11 08:22:47 crc kubenswrapper[4881]: I1211 08:22:47.195422 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d7tn5\" (UniqueName: \"kubernetes.io/projected/5dcb65e3-74f7-437f-b581-31b1b65af163-kube-api-access-d7tn5\") pod \"5dcb65e3-74f7-437f-b581-31b1b65af163\" (UID: \"5dcb65e3-74f7-437f-b581-31b1b65af163\") " Dec 11 08:22:47 crc kubenswrapper[4881]: I1211 08:22:47.195498 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5dcb65e3-74f7-437f-b581-31b1b65af163-config\") pod \"5dcb65e3-74f7-437f-b581-31b1b65af163\" (UID: \"5dcb65e3-74f7-437f-b581-31b1b65af163\") " Dec 11 08:22:47 crc kubenswrapper[4881]: I1211 08:22:47.195608 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5dcb65e3-74f7-437f-b581-31b1b65af163-client-ca\") pod \"5dcb65e3-74f7-437f-b581-31b1b65af163\" (UID: \"5dcb65e3-74f7-437f-b581-31b1b65af163\") " Dec 11 08:22:47 crc kubenswrapper[4881]: I1211 08:22:47.195831 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f318c6eb-08d3-4d22-9f89-d57404e0a8ad-serving-cert\") pod \"route-controller-manager-95479fb4c-24hkl\" (UID: \"f318c6eb-08d3-4d22-9f89-d57404e0a8ad\") " pod="openshift-route-controller-manager/route-controller-manager-95479fb4c-24hkl" Dec 11 08:22:47 crc kubenswrapper[4881]: I1211 08:22:47.195956 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/f318c6eb-08d3-4d22-9f89-d57404e0a8ad-client-ca\") pod \"route-controller-manager-95479fb4c-24hkl\" (UID: \"f318c6eb-08d3-4d22-9f89-d57404e0a8ad\") " pod="openshift-route-controller-manager/route-controller-manager-95479fb4c-24hkl" Dec 11 08:22:47 crc kubenswrapper[4881]: I1211 08:22:47.196004 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gd7nv\" (UniqueName: \"kubernetes.io/projected/f318c6eb-08d3-4d22-9f89-d57404e0a8ad-kube-api-access-gd7nv\") pod \"route-controller-manager-95479fb4c-24hkl\" (UID: \"f318c6eb-08d3-4d22-9f89-d57404e0a8ad\") " pod="openshift-route-controller-manager/route-controller-manager-95479fb4c-24hkl" Dec 11 08:22:47 crc kubenswrapper[4881]: I1211 08:22:47.196049 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f318c6eb-08d3-4d22-9f89-d57404e0a8ad-config\") pod \"route-controller-manager-95479fb4c-24hkl\" (UID: \"f318c6eb-08d3-4d22-9f89-d57404e0a8ad\") " pod="openshift-route-controller-manager/route-controller-manager-95479fb4c-24hkl" Dec 11 08:22:47 crc kubenswrapper[4881]: I1211 08:22:47.196276 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5dcb65e3-74f7-437f-b581-31b1b65af163-config" (OuterVolumeSpecName: "config") pod "5dcb65e3-74f7-437f-b581-31b1b65af163" (UID: "5dcb65e3-74f7-437f-b581-31b1b65af163"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:22:47 crc kubenswrapper[4881]: I1211 08:22:47.196292 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5dcb65e3-74f7-437f-b581-31b1b65af163-client-ca" (OuterVolumeSpecName: "client-ca") pod "5dcb65e3-74f7-437f-b581-31b1b65af163" (UID: "5dcb65e3-74f7-437f-b581-31b1b65af163"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:22:47 crc kubenswrapper[4881]: I1211 08:22:47.200767 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5dcb65e3-74f7-437f-b581-31b1b65af163-kube-api-access-d7tn5" (OuterVolumeSpecName: "kube-api-access-d7tn5") pod "5dcb65e3-74f7-437f-b581-31b1b65af163" (UID: "5dcb65e3-74f7-437f-b581-31b1b65af163"). InnerVolumeSpecName "kube-api-access-d7tn5". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:22:47 crc kubenswrapper[4881]: I1211 08:22:47.203578 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5dcb65e3-74f7-437f-b581-31b1b65af163-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "5dcb65e3-74f7-437f-b581-31b1b65af163" (UID: "5dcb65e3-74f7-437f-b581-31b1b65af163"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:22:47 crc kubenswrapper[4881]: I1211 08:22:47.297101 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f318c6eb-08d3-4d22-9f89-d57404e0a8ad-serving-cert\") pod \"route-controller-manager-95479fb4c-24hkl\" (UID: \"f318c6eb-08d3-4d22-9f89-d57404e0a8ad\") " pod="openshift-route-controller-manager/route-controller-manager-95479fb4c-24hkl" Dec 11 08:22:47 crc kubenswrapper[4881]: I1211 08:22:47.297210 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/f318c6eb-08d3-4d22-9f89-d57404e0a8ad-client-ca\") pod \"route-controller-manager-95479fb4c-24hkl\" (UID: \"f318c6eb-08d3-4d22-9f89-d57404e0a8ad\") " pod="openshift-route-controller-manager/route-controller-manager-95479fb4c-24hkl" Dec 11 08:22:47 crc kubenswrapper[4881]: I1211 08:22:47.297252 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gd7nv\" (UniqueName: \"kubernetes.io/projected/f318c6eb-08d3-4d22-9f89-d57404e0a8ad-kube-api-access-gd7nv\") pod \"route-controller-manager-95479fb4c-24hkl\" (UID: \"f318c6eb-08d3-4d22-9f89-d57404e0a8ad\") " pod="openshift-route-controller-manager/route-controller-manager-95479fb4c-24hkl" Dec 11 08:22:47 crc kubenswrapper[4881]: I1211 08:22:47.297282 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f318c6eb-08d3-4d22-9f89-d57404e0a8ad-config\") pod \"route-controller-manager-95479fb4c-24hkl\" (UID: \"f318c6eb-08d3-4d22-9f89-d57404e0a8ad\") " pod="openshift-route-controller-manager/route-controller-manager-95479fb4c-24hkl" Dec 11 08:22:47 crc kubenswrapper[4881]: I1211 08:22:47.297326 4881 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5dcb65e3-74f7-437f-b581-31b1b65af163-config\") on node \"crc\" DevicePath \"\"" Dec 11 08:22:47 crc kubenswrapper[4881]: I1211 08:22:47.297367 4881 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5dcb65e3-74f7-437f-b581-31b1b65af163-client-ca\") on node \"crc\" DevicePath \"\"" Dec 11 08:22:47 crc kubenswrapper[4881]: I1211 08:22:47.297377 4881 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5dcb65e3-74f7-437f-b581-31b1b65af163-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 11 08:22:47 crc kubenswrapper[4881]: I1211 08:22:47.297385 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d7tn5\" (UniqueName: \"kubernetes.io/projected/5dcb65e3-74f7-437f-b581-31b1b65af163-kube-api-access-d7tn5\") on node \"crc\" DevicePath \"\"" Dec 11 08:22:47 crc kubenswrapper[4881]: I1211 08:22:47.298526 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f318c6eb-08d3-4d22-9f89-d57404e0a8ad-config\") pod \"route-controller-manager-95479fb4c-24hkl\" (UID: \"f318c6eb-08d3-4d22-9f89-d57404e0a8ad\") " pod="openshift-route-controller-manager/route-controller-manager-95479fb4c-24hkl" Dec 11 08:22:47 crc kubenswrapper[4881]: I1211 08:22:47.298742 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/f318c6eb-08d3-4d22-9f89-d57404e0a8ad-client-ca\") pod \"route-controller-manager-95479fb4c-24hkl\" (UID: \"f318c6eb-08d3-4d22-9f89-d57404e0a8ad\") " pod="openshift-route-controller-manager/route-controller-manager-95479fb4c-24hkl" Dec 11 08:22:47 crc kubenswrapper[4881]: I1211 08:22:47.302007 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f318c6eb-08d3-4d22-9f89-d57404e0a8ad-serving-cert\") pod \"route-controller-manager-95479fb4c-24hkl\" (UID: \"f318c6eb-08d3-4d22-9f89-d57404e0a8ad\") " pod="openshift-route-controller-manager/route-controller-manager-95479fb4c-24hkl" Dec 11 08:22:47 crc kubenswrapper[4881]: I1211 08:22:47.316766 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gd7nv\" (UniqueName: \"kubernetes.io/projected/f318c6eb-08d3-4d22-9f89-d57404e0a8ad-kube-api-access-gd7nv\") pod \"route-controller-manager-95479fb4c-24hkl\" (UID: \"f318c6eb-08d3-4d22-9f89-d57404e0a8ad\") " pod="openshift-route-controller-manager/route-controller-manager-95479fb4c-24hkl" Dec 11 08:22:47 crc kubenswrapper[4881]: I1211 08:22:47.454129 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-95479fb4c-24hkl" Dec 11 08:22:47 crc kubenswrapper[4881]: I1211 08:22:47.817951 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-69c7cb958d-ddc8x" event={"ID":"5dcb65e3-74f7-437f-b581-31b1b65af163","Type":"ContainerDied","Data":"4ff5162b26edbc5d66ab678acf75809fcc509879e0f8354a50d9c5f47a0b40e6"} Dec 11 08:22:47 crc kubenswrapper[4881]: I1211 08:22:47.818022 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-69c7cb958d-ddc8x" Dec 11 08:22:47 crc kubenswrapper[4881]: I1211 08:22:47.818225 4881 scope.go:117] "RemoveContainer" containerID="506c6d28b935d934cec5a02fec1ef31376d51c4e1f423fa4cbef82200f1dbeff" Dec 11 08:22:47 crc kubenswrapper[4881]: I1211 08:22:47.844800 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-69c7cb958d-ddc8x"] Dec 11 08:22:47 crc kubenswrapper[4881]: I1211 08:22:47.848532 4881 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-69c7cb958d-ddc8x"] Dec 11 08:22:47 crc kubenswrapper[4881]: I1211 08:22:47.899247 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-95479fb4c-24hkl"] Dec 11 08:22:47 crc kubenswrapper[4881]: W1211 08:22:47.903868 4881 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf318c6eb_08d3_4d22_9f89_d57404e0a8ad.slice/crio-018b2ea407a8d11468f80bbca328d0dcc158357c528daaf878c51bfd8da2a8fc WatchSource:0}: Error finding container 018b2ea407a8d11468f80bbca328d0dcc158357c528daaf878c51bfd8da2a8fc: Status 404 returned error can't find the container with id 018b2ea407a8d11468f80bbca328d0dcc158357c528daaf878c51bfd8da2a8fc Dec 11 08:22:48 crc kubenswrapper[4881]: I1211 08:22:48.826179 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-95479fb4c-24hkl" event={"ID":"f318c6eb-08d3-4d22-9f89-d57404e0a8ad","Type":"ContainerStarted","Data":"a66708198b06c597b190c7aac3046531a8e5b992b32e8dadf6e8b99294becc0d"} Dec 11 08:22:48 crc kubenswrapper[4881]: I1211 08:22:48.827700 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-95479fb4c-24hkl" event={"ID":"f318c6eb-08d3-4d22-9f89-d57404e0a8ad","Type":"ContainerStarted","Data":"018b2ea407a8d11468f80bbca328d0dcc158357c528daaf878c51bfd8da2a8fc"} Dec 11 08:22:48 crc kubenswrapper[4881]: I1211 08:22:48.827787 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-95479fb4c-24hkl" Dec 11 08:22:48 crc kubenswrapper[4881]: I1211 08:22:48.840854 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-95479fb4c-24hkl" Dec 11 08:22:48 crc kubenswrapper[4881]: I1211 08:22:48.853285 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-95479fb4c-24hkl" podStartSLOduration=2.853262492 podStartE2EDuration="2.853262492s" podCreationTimestamp="2025-12-11 08:22:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 08:22:48.853014646 +0000 UTC m=+417.230383363" watchObservedRunningTime="2025-12-11 08:22:48.853262492 +0000 UTC m=+417.230631229" Dec 11 08:22:49 crc kubenswrapper[4881]: I1211 08:22:49.013602 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5dcb65e3-74f7-437f-b581-31b1b65af163" path="/var/lib/kubelet/pods/5dcb65e3-74f7-437f-b581-31b1b65af163/volumes" Dec 11 08:22:57 crc kubenswrapper[4881]: I1211 08:22:57.084540 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-certificates\" (UniqueName: \"kubernetes.io/secret/cc8120de-b56f-481f-9ac5-19235df13216-tls-certificates\") pod \"prometheus-operator-admission-webhook-f54c54754-b52lg\" (UID: \"cc8120de-b56f-481f-9ac5-19235df13216\") " pod="openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-b52lg" Dec 11 08:22:57 crc kubenswrapper[4881]: I1211 08:22:57.095203 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-certificates\" (UniqueName: \"kubernetes.io/secret/cc8120de-b56f-481f-9ac5-19235df13216-tls-certificates\") pod \"prometheus-operator-admission-webhook-f54c54754-b52lg\" (UID: \"cc8120de-b56f-481f-9ac5-19235df13216\") " pod="openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-b52lg" Dec 11 08:22:57 crc kubenswrapper[4881]: I1211 08:22:57.306991 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-operator-admission-webhook-dockercfg-mkjwq" Dec 11 08:22:57 crc kubenswrapper[4881]: I1211 08:22:57.315252 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-b52lg" Dec 11 08:22:57 crc kubenswrapper[4881]: I1211 08:22:57.747930 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-b52lg"] Dec 11 08:22:57 crc kubenswrapper[4881]: W1211 08:22:57.755652 4881 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podcc8120de_b56f_481f_9ac5_19235df13216.slice/crio-29c63ded0a519ae0dead40a165a419c9a71940881bcc92465568dadc7c5d2ba2 WatchSource:0}: Error finding container 29c63ded0a519ae0dead40a165a419c9a71940881bcc92465568dadc7c5d2ba2: Status 404 returned error can't find the container with id 29c63ded0a519ae0dead40a165a419c9a71940881bcc92465568dadc7c5d2ba2 Dec 11 08:22:57 crc kubenswrapper[4881]: I1211 08:22:57.891143 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-b52lg" event={"ID":"cc8120de-b56f-481f-9ac5-19235df13216","Type":"ContainerStarted","Data":"29c63ded0a519ae0dead40a165a419c9a71940881bcc92465568dadc7c5d2ba2"} Dec 11 08:22:59 crc kubenswrapper[4881]: I1211 08:22:59.397197 4881 patch_prober.go:28] interesting pod/machine-config-daemon-z9nnh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 11 08:22:59 crc kubenswrapper[4881]: I1211 08:22:59.397363 4881 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 11 08:22:59 crc kubenswrapper[4881]: I1211 08:22:59.397435 4881 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" Dec 11 08:22:59 crc kubenswrapper[4881]: I1211 08:22:59.398280 4881 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"d7ca78f14154a7019c587a5f29a4086b385a480f4f657464057274ffa0a054ec"} pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Dec 11 08:22:59 crc kubenswrapper[4881]: I1211 08:22:59.398372 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" containerName="machine-config-daemon" containerID="cri-o://d7ca78f14154a7019c587a5f29a4086b385a480f4f657464057274ffa0a054ec" gracePeriod=600 Dec 11 08:22:59 crc kubenswrapper[4881]: I1211 08:22:59.905660 4881 generic.go:334] "Generic (PLEG): container finished" podID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" containerID="d7ca78f14154a7019c587a5f29a4086b385a480f4f657464057274ffa0a054ec" exitCode=0 Dec 11 08:22:59 crc kubenswrapper[4881]: I1211 08:22:59.905762 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" event={"ID":"56d69133-af36-4cbd-af7d-3a58cc4dd8ca","Type":"ContainerDied","Data":"d7ca78f14154a7019c587a5f29a4086b385a480f4f657464057274ffa0a054ec"} Dec 11 08:22:59 crc kubenswrapper[4881]: I1211 08:22:59.906178 4881 scope.go:117] "RemoveContainer" containerID="b0321657968185975569a4bf8037ab956988b13493feec0be9c439fbe6c20707" Dec 11 08:23:00 crc kubenswrapper[4881]: I1211 08:23:00.915045 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" event={"ID":"56d69133-af36-4cbd-af7d-3a58cc4dd8ca","Type":"ContainerStarted","Data":"2378020365a7ffc5afa00424ace5b73c56a13d69da3d6d17d8336f551688833d"} Dec 11 08:23:00 crc kubenswrapper[4881]: I1211 08:23:00.916416 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-b52lg" event={"ID":"cc8120de-b56f-481f-9ac5-19235df13216","Type":"ContainerStarted","Data":"5bd37d816b9cc3ff28a23ffb1042d6288870cf28e50115d1d9ad6d9c31eda935"} Dec 11 08:23:00 crc kubenswrapper[4881]: I1211 08:23:00.916624 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-b52lg" Dec 11 08:23:00 crc kubenswrapper[4881]: I1211 08:23:00.924632 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-b52lg" Dec 11 08:23:00 crc kubenswrapper[4881]: I1211 08:23:00.946983 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-b52lg" podStartSLOduration=33.263938275 podStartE2EDuration="35.946964422s" podCreationTimestamp="2025-12-11 08:22:25 +0000 UTC" firstStartedPulling="2025-12-11 08:22:57.758376277 +0000 UTC m=+426.135744974" lastFinishedPulling="2025-12-11 08:23:00.441402414 +0000 UTC m=+428.818771121" observedRunningTime="2025-12-11 08:23:00.945154958 +0000 UTC m=+429.322523725" watchObservedRunningTime="2025-12-11 08:23:00.946964422 +0000 UTC m=+429.324333129" Dec 11 08:23:01 crc kubenswrapper[4881]: I1211 08:23:01.280879 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-monitoring/prometheus-operator-db54df47d-hg5fq"] Dec 11 08:23:01 crc kubenswrapper[4881]: I1211 08:23:01.282447 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/prometheus-operator-db54df47d-hg5fq" Dec 11 08:23:01 crc kubenswrapper[4881]: I1211 08:23:01.285100 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-operator-kube-rbac-proxy-config" Dec 11 08:23:01 crc kubenswrapper[4881]: I1211 08:23:01.285293 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-operator-dockercfg-8rm4q" Dec 11 08:23:01 crc kubenswrapper[4881]: I1211 08:23:01.285431 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-operator-tls" Dec 11 08:23:01 crc kubenswrapper[4881]: I1211 08:23:01.285916 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-monitoring"/"metrics-client-ca" Dec 11 08:23:01 crc kubenswrapper[4881]: I1211 08:23:01.304428 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/prometheus-operator-db54df47d-hg5fq"] Dec 11 08:23:01 crc kubenswrapper[4881]: I1211 08:23:01.451958 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rwpgw\" (UniqueName: \"kubernetes.io/projected/7d55a9c7-2259-4cdd-9789-7c2e0f881274-kube-api-access-rwpgw\") pod \"prometheus-operator-db54df47d-hg5fq\" (UID: \"7d55a9c7-2259-4cdd-9789-7c2e0f881274\") " pod="openshift-monitoring/prometheus-operator-db54df47d-hg5fq" Dec 11 08:23:01 crc kubenswrapper[4881]: I1211 08:23:01.452107 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-operator-tls\" (UniqueName: \"kubernetes.io/secret/7d55a9c7-2259-4cdd-9789-7c2e0f881274-prometheus-operator-tls\") pod \"prometheus-operator-db54df47d-hg5fq\" (UID: \"7d55a9c7-2259-4cdd-9789-7c2e0f881274\") " pod="openshift-monitoring/prometheus-operator-db54df47d-hg5fq" Dec 11 08:23:01 crc kubenswrapper[4881]: I1211 08:23:01.452248 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/7d55a9c7-2259-4cdd-9789-7c2e0f881274-metrics-client-ca\") pod \"prometheus-operator-db54df47d-hg5fq\" (UID: \"7d55a9c7-2259-4cdd-9789-7c2e0f881274\") " pod="openshift-monitoring/prometheus-operator-db54df47d-hg5fq" Dec 11 08:23:01 crc kubenswrapper[4881]: I1211 08:23:01.452411 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-operator-kube-rbac-proxy-config\" (UniqueName: \"kubernetes.io/secret/7d55a9c7-2259-4cdd-9789-7c2e0f881274-prometheus-operator-kube-rbac-proxy-config\") pod \"prometheus-operator-db54df47d-hg5fq\" (UID: \"7d55a9c7-2259-4cdd-9789-7c2e0f881274\") " pod="openshift-monitoring/prometheus-operator-db54df47d-hg5fq" Dec 11 08:23:01 crc kubenswrapper[4881]: I1211 08:23:01.554140 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-operator-kube-rbac-proxy-config\" (UniqueName: \"kubernetes.io/secret/7d55a9c7-2259-4cdd-9789-7c2e0f881274-prometheus-operator-kube-rbac-proxy-config\") pod \"prometheus-operator-db54df47d-hg5fq\" (UID: \"7d55a9c7-2259-4cdd-9789-7c2e0f881274\") " pod="openshift-monitoring/prometheus-operator-db54df47d-hg5fq" Dec 11 08:23:01 crc kubenswrapper[4881]: I1211 08:23:01.554215 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rwpgw\" (UniqueName: \"kubernetes.io/projected/7d55a9c7-2259-4cdd-9789-7c2e0f881274-kube-api-access-rwpgw\") pod \"prometheus-operator-db54df47d-hg5fq\" (UID: \"7d55a9c7-2259-4cdd-9789-7c2e0f881274\") " pod="openshift-monitoring/prometheus-operator-db54df47d-hg5fq" Dec 11 08:23:01 crc kubenswrapper[4881]: I1211 08:23:01.554237 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-operator-tls\" (UniqueName: \"kubernetes.io/secret/7d55a9c7-2259-4cdd-9789-7c2e0f881274-prometheus-operator-tls\") pod \"prometheus-operator-db54df47d-hg5fq\" (UID: \"7d55a9c7-2259-4cdd-9789-7c2e0f881274\") " pod="openshift-monitoring/prometheus-operator-db54df47d-hg5fq" Dec 11 08:23:01 crc kubenswrapper[4881]: I1211 08:23:01.554271 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/7d55a9c7-2259-4cdd-9789-7c2e0f881274-metrics-client-ca\") pod \"prometheus-operator-db54df47d-hg5fq\" (UID: \"7d55a9c7-2259-4cdd-9789-7c2e0f881274\") " pod="openshift-monitoring/prometheus-operator-db54df47d-hg5fq" Dec 11 08:23:01 crc kubenswrapper[4881]: E1211 08:23:01.554422 4881 secret.go:188] Couldn't get secret openshift-monitoring/prometheus-operator-tls: secret "prometheus-operator-tls" not found Dec 11 08:23:01 crc kubenswrapper[4881]: E1211 08:23:01.554496 4881 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/7d55a9c7-2259-4cdd-9789-7c2e0f881274-prometheus-operator-tls podName:7d55a9c7-2259-4cdd-9789-7c2e0f881274 nodeName:}" failed. No retries permitted until 2025-12-11 08:23:02.05447443 +0000 UTC m=+430.431843127 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "prometheus-operator-tls" (UniqueName: "kubernetes.io/secret/7d55a9c7-2259-4cdd-9789-7c2e0f881274-prometheus-operator-tls") pod "prometheus-operator-db54df47d-hg5fq" (UID: "7d55a9c7-2259-4cdd-9789-7c2e0f881274") : secret "prometheus-operator-tls" not found Dec 11 08:23:01 crc kubenswrapper[4881]: I1211 08:23:01.555215 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/7d55a9c7-2259-4cdd-9789-7c2e0f881274-metrics-client-ca\") pod \"prometheus-operator-db54df47d-hg5fq\" (UID: \"7d55a9c7-2259-4cdd-9789-7c2e0f881274\") " pod="openshift-monitoring/prometheus-operator-db54df47d-hg5fq" Dec 11 08:23:01 crc kubenswrapper[4881]: I1211 08:23:01.562799 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-operator-kube-rbac-proxy-config\" (UniqueName: \"kubernetes.io/secret/7d55a9c7-2259-4cdd-9789-7c2e0f881274-prometheus-operator-kube-rbac-proxy-config\") pod \"prometheus-operator-db54df47d-hg5fq\" (UID: \"7d55a9c7-2259-4cdd-9789-7c2e0f881274\") " pod="openshift-monitoring/prometheus-operator-db54df47d-hg5fq" Dec 11 08:23:01 crc kubenswrapper[4881]: I1211 08:23:01.571276 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rwpgw\" (UniqueName: \"kubernetes.io/projected/7d55a9c7-2259-4cdd-9789-7c2e0f881274-kube-api-access-rwpgw\") pod \"prometheus-operator-db54df47d-hg5fq\" (UID: \"7d55a9c7-2259-4cdd-9789-7c2e0f881274\") " pod="openshift-monitoring/prometheus-operator-db54df47d-hg5fq" Dec 11 08:23:02 crc kubenswrapper[4881]: I1211 08:23:02.061672 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-operator-tls\" (UniqueName: \"kubernetes.io/secret/7d55a9c7-2259-4cdd-9789-7c2e0f881274-prometheus-operator-tls\") pod \"prometheus-operator-db54df47d-hg5fq\" (UID: \"7d55a9c7-2259-4cdd-9789-7c2e0f881274\") " pod="openshift-monitoring/prometheus-operator-db54df47d-hg5fq" Dec 11 08:23:02 crc kubenswrapper[4881]: I1211 08:23:02.072624 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-operator-tls\" (UniqueName: \"kubernetes.io/secret/7d55a9c7-2259-4cdd-9789-7c2e0f881274-prometheus-operator-tls\") pod \"prometheus-operator-db54df47d-hg5fq\" (UID: \"7d55a9c7-2259-4cdd-9789-7c2e0f881274\") " pod="openshift-monitoring/prometheus-operator-db54df47d-hg5fq" Dec 11 08:23:02 crc kubenswrapper[4881]: I1211 08:23:02.201832 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/prometheus-operator-db54df47d-hg5fq" Dec 11 08:23:02 crc kubenswrapper[4881]: I1211 08:23:02.629198 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/prometheus-operator-db54df47d-hg5fq"] Dec 11 08:23:02 crc kubenswrapper[4881]: W1211 08:23:02.640418 4881 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod7d55a9c7_2259_4cdd_9789_7c2e0f881274.slice/crio-264ed18621232374d402a5788243f485b2221d03eec7e4a1a329d49bc5b3018b WatchSource:0}: Error finding container 264ed18621232374d402a5788243f485b2221d03eec7e4a1a329d49bc5b3018b: Status 404 returned error can't find the container with id 264ed18621232374d402a5788243f485b2221d03eec7e4a1a329d49bc5b3018b Dec 11 08:23:02 crc kubenswrapper[4881]: I1211 08:23:02.927561 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/prometheus-operator-db54df47d-hg5fq" event={"ID":"7d55a9c7-2259-4cdd-9789-7c2e0f881274","Type":"ContainerStarted","Data":"264ed18621232374d402a5788243f485b2221d03eec7e4a1a329d49bc5b3018b"} Dec 11 08:23:03 crc kubenswrapper[4881]: I1211 08:23:03.690666 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-image-registry/image-registry-697d97f7c8-zn9v8" podUID="ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1" containerName="registry" containerID="cri-o://78424e21b354c54529a5a4bc83c45a2b99b3d25d3e44cf7f4d5aca40f1d9ef67" gracePeriod=30 Dec 11 08:23:03 crc kubenswrapper[4881]: I1211 08:23:03.946096 4881 generic.go:334] "Generic (PLEG): container finished" podID="ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1" containerID="78424e21b354c54529a5a4bc83c45a2b99b3d25d3e44cf7f4d5aca40f1d9ef67" exitCode=0 Dec 11 08:23:03 crc kubenswrapper[4881]: I1211 08:23:03.946590 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-zn9v8" event={"ID":"ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1","Type":"ContainerDied","Data":"78424e21b354c54529a5a4bc83c45a2b99b3d25d3e44cf7f4d5aca40f1d9ef67"} Dec 11 08:23:04 crc kubenswrapper[4881]: I1211 08:23:04.209156 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-zn9v8" Dec 11 08:23:04 crc kubenswrapper[4881]: I1211 08:23:04.293530 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1-installation-pull-secrets\") pod \"ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1\" (UID: \"ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1\") " Dec 11 08:23:04 crc kubenswrapper[4881]: I1211 08:23:04.293617 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-t6j22\" (UniqueName: \"kubernetes.io/projected/ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1-kube-api-access-t6j22\") pod \"ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1\" (UID: \"ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1\") " Dec 11 08:23:04 crc kubenswrapper[4881]: I1211 08:23:04.293652 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1-registry-certificates\") pod \"ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1\" (UID: \"ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1\") " Dec 11 08:23:04 crc kubenswrapper[4881]: I1211 08:23:04.293707 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1-registry-tls\") pod \"ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1\" (UID: \"ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1\") " Dec 11 08:23:04 crc kubenswrapper[4881]: I1211 08:23:04.293779 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1-trusted-ca\") pod \"ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1\" (UID: \"ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1\") " Dec 11 08:23:04 crc kubenswrapper[4881]: I1211 08:23:04.293812 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1-ca-trust-extracted\") pod \"ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1\" (UID: \"ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1\") " Dec 11 08:23:04 crc kubenswrapper[4881]: I1211 08:23:04.293947 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-storage\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1\" (UID: \"ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1\") " Dec 11 08:23:04 crc kubenswrapper[4881]: I1211 08:23:04.294014 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1-bound-sa-token\") pod \"ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1\" (UID: \"ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1\") " Dec 11 08:23:04 crc kubenswrapper[4881]: I1211 08:23:04.294609 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1-registry-certificates" (OuterVolumeSpecName: "registry-certificates") pod "ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1" (UID: "ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1"). InnerVolumeSpecName "registry-certificates". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:23:04 crc kubenswrapper[4881]: I1211 08:23:04.294903 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1" (UID: "ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:23:04 crc kubenswrapper[4881]: I1211 08:23:04.300082 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1-registry-tls" (OuterVolumeSpecName: "registry-tls") pod "ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1" (UID: "ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1"). InnerVolumeSpecName "registry-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:23:04 crc kubenswrapper[4881]: I1211 08:23:04.300148 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1-installation-pull-secrets" (OuterVolumeSpecName: "installation-pull-secrets") pod "ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1" (UID: "ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1"). InnerVolumeSpecName "installation-pull-secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:23:04 crc kubenswrapper[4881]: I1211 08:23:04.305792 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1-kube-api-access-t6j22" (OuterVolumeSpecName: "kube-api-access-t6j22") pod "ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1" (UID: "ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1"). InnerVolumeSpecName "kube-api-access-t6j22". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:23:04 crc kubenswrapper[4881]: I1211 08:23:04.311805 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1-ca-trust-extracted" (OuterVolumeSpecName: "ca-trust-extracted") pod "ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1" (UID: "ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1"). InnerVolumeSpecName "ca-trust-extracted". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 08:23:04 crc kubenswrapper[4881]: I1211 08:23:04.314081 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (OuterVolumeSpecName: "registry-storage") pod "ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1" (UID: "ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1"). InnerVolumeSpecName "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8". PluginName "kubernetes.io/csi", VolumeGidValue "" Dec 11 08:23:04 crc kubenswrapper[4881]: I1211 08:23:04.315367 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1" (UID: "ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:23:04 crc kubenswrapper[4881]: I1211 08:23:04.395740 4881 reconciler_common.go:293] "Volume detached for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1-registry-certificates\") on node \"crc\" DevicePath \"\"" Dec 11 08:23:04 crc kubenswrapper[4881]: I1211 08:23:04.396068 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-t6j22\" (UniqueName: \"kubernetes.io/projected/ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1-kube-api-access-t6j22\") on node \"crc\" DevicePath \"\"" Dec 11 08:23:04 crc kubenswrapper[4881]: I1211 08:23:04.396084 4881 reconciler_common.go:293] "Volume detached for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1-registry-tls\") on node \"crc\" DevicePath \"\"" Dec 11 08:23:04 crc kubenswrapper[4881]: I1211 08:23:04.396098 4881 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1-trusted-ca\") on node \"crc\" DevicePath \"\"" Dec 11 08:23:04 crc kubenswrapper[4881]: I1211 08:23:04.396112 4881 reconciler_common.go:293] "Volume detached for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1-ca-trust-extracted\") on node \"crc\" DevicePath \"\"" Dec 11 08:23:04 crc kubenswrapper[4881]: I1211 08:23:04.396124 4881 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1-bound-sa-token\") on node \"crc\" DevicePath \"\"" Dec 11 08:23:04 crc kubenswrapper[4881]: I1211 08:23:04.396136 4881 reconciler_common.go:293] "Volume detached for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1-installation-pull-secrets\") on node \"crc\" DevicePath \"\"" Dec 11 08:23:04 crc kubenswrapper[4881]: I1211 08:23:04.953419 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-zn9v8" event={"ID":"ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1","Type":"ContainerDied","Data":"728ddb536a6009db0b6069c9feb86db5e7215b44d6d3d18f091c85eda1891956"} Dec 11 08:23:04 crc kubenswrapper[4881]: I1211 08:23:04.953471 4881 scope.go:117] "RemoveContainer" containerID="78424e21b354c54529a5a4bc83c45a2b99b3d25d3e44cf7f4d5aca40f1d9ef67" Dec 11 08:23:04 crc kubenswrapper[4881]: I1211 08:23:04.953489 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-zn9v8" Dec 11 08:23:04 crc kubenswrapper[4881]: I1211 08:23:04.981143 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-zn9v8"] Dec 11 08:23:04 crc kubenswrapper[4881]: I1211 08:23:04.985018 4881 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-zn9v8"] Dec 11 08:23:05 crc kubenswrapper[4881]: I1211 08:23:05.011755 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1" path="/var/lib/kubelet/pods/ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1/volumes" Dec 11 08:23:05 crc kubenswrapper[4881]: I1211 08:23:05.963204 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/prometheus-operator-db54df47d-hg5fq" event={"ID":"7d55a9c7-2259-4cdd-9789-7c2e0f881274","Type":"ContainerStarted","Data":"3d7f90082b40a7b4fda13abeb6b1a59e5525d1290ba6587236ef4cbc1458d54c"} Dec 11 08:23:05 crc kubenswrapper[4881]: I1211 08:23:05.963272 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/prometheus-operator-db54df47d-hg5fq" event={"ID":"7d55a9c7-2259-4cdd-9789-7c2e0f881274","Type":"ContainerStarted","Data":"e21e0f878d0b4661e531494ce2120314356e53fa0ae88b722afb8b6fc5147235"} Dec 11 08:23:05 crc kubenswrapper[4881]: I1211 08:23:05.982533 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-monitoring/prometheus-operator-db54df47d-hg5fq" podStartSLOduration=2.557967697 podStartE2EDuration="4.982497615s" podCreationTimestamp="2025-12-11 08:23:01 +0000 UTC" firstStartedPulling="2025-12-11 08:23:02.642091578 +0000 UTC m=+431.019460275" lastFinishedPulling="2025-12-11 08:23:05.066621456 +0000 UTC m=+433.443990193" observedRunningTime="2025-12-11 08:23:05.982421903 +0000 UTC m=+434.359790680" watchObservedRunningTime="2025-12-11 08:23:05.982497615 +0000 UTC m=+434.359866352" Dec 11 08:23:07 crc kubenswrapper[4881]: I1211 08:23:07.614763 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-monitoring/openshift-state-metrics-566fddb674-6lbqz"] Dec 11 08:23:07 crc kubenswrapper[4881]: E1211 08:23:07.615067 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1" containerName="registry" Dec 11 08:23:07 crc kubenswrapper[4881]: I1211 08:23:07.615081 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1" containerName="registry" Dec 11 08:23:07 crc kubenswrapper[4881]: I1211 08:23:07.615178 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="ce5b2d83-b0de-46a2-9b7f-7688aa7ea0b1" containerName="registry" Dec 11 08:23:07 crc kubenswrapper[4881]: I1211 08:23:07.615998 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/openshift-state-metrics-566fddb674-6lbqz" Dec 11 08:23:07 crc kubenswrapper[4881]: I1211 08:23:07.617285 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"openshift-state-metrics-kube-rbac-proxy-config" Dec 11 08:23:07 crc kubenswrapper[4881]: I1211 08:23:07.617997 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-monitoring/kube-state-metrics-777cb5bd5d-598lm"] Dec 11 08:23:07 crc kubenswrapper[4881]: I1211 08:23:07.618579 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"openshift-state-metrics-tls" Dec 11 08:23:07 crc kubenswrapper[4881]: I1211 08:23:07.618691 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"openshift-state-metrics-dockercfg-bbzpx" Dec 11 08:23:07 crc kubenswrapper[4881]: I1211 08:23:07.618986 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-598lm" Dec 11 08:23:07 crc kubenswrapper[4881]: I1211 08:23:07.621664 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"kube-state-metrics-tls" Dec 11 08:23:07 crc kubenswrapper[4881]: I1211 08:23:07.621757 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-monitoring"/"kube-state-metrics-custom-resource-state-configmap" Dec 11 08:23:07 crc kubenswrapper[4881]: I1211 08:23:07.621801 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"kube-state-metrics-kube-rbac-proxy-config" Dec 11 08:23:07 crc kubenswrapper[4881]: I1211 08:23:07.625998 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"kube-state-metrics-dockercfg-xwkkn" Dec 11 08:23:07 crc kubenswrapper[4881]: I1211 08:23:07.637955 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/kube-state-metrics-777cb5bd5d-598lm"] Dec 11 08:23:07 crc kubenswrapper[4881]: I1211 08:23:07.641398 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-monitoring/node-exporter-l9wps"] Dec 11 08:23:07 crc kubenswrapper[4881]: I1211 08:23:07.642376 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/node-exporter-l9wps" Dec 11 08:23:07 crc kubenswrapper[4881]: I1211 08:23:07.644165 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"node-exporter-kube-rbac-proxy-config" Dec 11 08:23:07 crc kubenswrapper[4881]: I1211 08:23:07.645302 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"node-exporter-tls" Dec 11 08:23:07 crc kubenswrapper[4881]: I1211 08:23:07.645906 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"node-exporter-dockercfg-qd6gb" Dec 11 08:23:07 crc kubenswrapper[4881]: I1211 08:23:07.646513 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/openshift-state-metrics-566fddb674-6lbqz"] Dec 11 08:23:07 crc kubenswrapper[4881]: I1211 08:23:07.740059 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-exporter-textfile\" (UniqueName: \"kubernetes.io/empty-dir/d8e9ea88-1dc9-4254-870d-c590763cc538-node-exporter-textfile\") pod \"node-exporter-l9wps\" (UID: \"d8e9ea88-1dc9-4254-870d-c590763cc538\") " pod="openshift-monitoring/node-exporter-l9wps" Dec 11 08:23:07 crc kubenswrapper[4881]: I1211 08:23:07.740131 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-tls\" (UniqueName: \"kubernetes.io/secret/ad6fe517-f4e6-4fdf-b981-54924015778d-kube-state-metrics-tls\") pod \"kube-state-metrics-777cb5bd5d-598lm\" (UID: \"ad6fe517-f4e6-4fdf-b981-54924015778d\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-598lm" Dec 11 08:23:07 crc kubenswrapper[4881]: I1211 08:23:07.740165 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-exporter-tls\" (UniqueName: \"kubernetes.io/secret/d8e9ea88-1dc9-4254-870d-c590763cc538-node-exporter-tls\") pod \"node-exporter-l9wps\" (UID: \"d8e9ea88-1dc9-4254-870d-c590763cc538\") " pod="openshift-monitoring/node-exporter-l9wps" Dec 11 08:23:07 crc kubenswrapper[4881]: I1211 08:23:07.740188 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-smpn8\" (UniqueName: \"kubernetes.io/projected/d8e9ea88-1dc9-4254-870d-c590763cc538-kube-api-access-smpn8\") pod \"node-exporter-l9wps\" (UID: \"d8e9ea88-1dc9-4254-870d-c590763cc538\") " pod="openshift-monitoring/node-exporter-l9wps" Dec 11 08:23:07 crc kubenswrapper[4881]: I1211 08:23:07.740212 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/0d9ffadb-6839-48af-b6a5-83263823a70e-metrics-client-ca\") pod \"openshift-state-metrics-566fddb674-6lbqz\" (UID: \"0d9ffadb-6839-48af-b6a5-83263823a70e\") " pod="openshift-monitoring/openshift-state-metrics-566fddb674-6lbqz" Dec 11 08:23:07 crc kubenswrapper[4881]: I1211 08:23:07.740235 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"root\" (UniqueName: \"kubernetes.io/host-path/d8e9ea88-1dc9-4254-870d-c590763cc538-root\") pod \"node-exporter-l9wps\" (UID: \"d8e9ea88-1dc9-4254-870d-c590763cc538\") " pod="openshift-monitoring/node-exporter-l9wps" Dec 11 08:23:07 crc kubenswrapper[4881]: I1211 08:23:07.740262 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-kube-rbac-proxy-config\" (UniqueName: \"kubernetes.io/secret/ad6fe517-f4e6-4fdf-b981-54924015778d-kube-state-metrics-kube-rbac-proxy-config\") pod \"kube-state-metrics-777cb5bd5d-598lm\" (UID: \"ad6fe517-f4e6-4fdf-b981-54924015778d\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-598lm" Dec 11 08:23:07 crc kubenswrapper[4881]: I1211 08:23:07.740386 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/d8e9ea88-1dc9-4254-870d-c590763cc538-metrics-client-ca\") pod \"node-exporter-l9wps\" (UID: \"d8e9ea88-1dc9-4254-870d-c590763cc538\") " pod="openshift-monitoring/node-exporter-l9wps" Dec 11 08:23:07 crc kubenswrapper[4881]: I1211 08:23:07.740468 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-exporter-kube-rbac-proxy-config\" (UniqueName: \"kubernetes.io/secret/d8e9ea88-1dc9-4254-870d-c590763cc538-node-exporter-kube-rbac-proxy-config\") pod \"node-exporter-l9wps\" (UID: \"d8e9ea88-1dc9-4254-870d-c590763cc538\") " pod="openshift-monitoring/node-exporter-l9wps" Dec 11 08:23:07 crc kubenswrapper[4881]: I1211 08:23:07.740486 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-exporter-wtmp\" (UniqueName: \"kubernetes.io/host-path/d8e9ea88-1dc9-4254-870d-c590763cc538-node-exporter-wtmp\") pod \"node-exporter-l9wps\" (UID: \"d8e9ea88-1dc9-4254-870d-c590763cc538\") " pod="openshift-monitoring/node-exporter-l9wps" Dec 11 08:23:07 crc kubenswrapper[4881]: I1211 08:23:07.740517 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-custom-resource-state-configmap\" (UniqueName: \"kubernetes.io/configmap/ad6fe517-f4e6-4fdf-b981-54924015778d-kube-state-metrics-custom-resource-state-configmap\") pod \"kube-state-metrics-777cb5bd5d-598lm\" (UID: \"ad6fe517-f4e6-4fdf-b981-54924015778d\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-598lm" Dec 11 08:23:07 crc kubenswrapper[4881]: I1211 08:23:07.740549 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/d8e9ea88-1dc9-4254-870d-c590763cc538-sys\") pod \"node-exporter-l9wps\" (UID: \"d8e9ea88-1dc9-4254-870d-c590763cc538\") " pod="openshift-monitoring/node-exporter-l9wps" Dec 11 08:23:07 crc kubenswrapper[4881]: I1211 08:23:07.740585 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"volume-directive-shadow\" (UniqueName: \"kubernetes.io/empty-dir/ad6fe517-f4e6-4fdf-b981-54924015778d-volume-directive-shadow\") pod \"kube-state-metrics-777cb5bd5d-598lm\" (UID: \"ad6fe517-f4e6-4fdf-b981-54924015778d\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-598lm" Dec 11 08:23:07 crc kubenswrapper[4881]: I1211 08:23:07.740603 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openshift-state-metrics-tls\" (UniqueName: \"kubernetes.io/secret/0d9ffadb-6839-48af-b6a5-83263823a70e-openshift-state-metrics-tls\") pod \"openshift-state-metrics-566fddb674-6lbqz\" (UID: \"0d9ffadb-6839-48af-b6a5-83263823a70e\") " pod="openshift-monitoring/openshift-state-metrics-566fddb674-6lbqz" Dec 11 08:23:07 crc kubenswrapper[4881]: I1211 08:23:07.740729 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/ad6fe517-f4e6-4fdf-b981-54924015778d-metrics-client-ca\") pod \"kube-state-metrics-777cb5bd5d-598lm\" (UID: \"ad6fe517-f4e6-4fdf-b981-54924015778d\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-598lm" Dec 11 08:23:07 crc kubenswrapper[4881]: I1211 08:23:07.740800 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lzdfb\" (UniqueName: \"kubernetes.io/projected/0d9ffadb-6839-48af-b6a5-83263823a70e-kube-api-access-lzdfb\") pod \"openshift-state-metrics-566fddb674-6lbqz\" (UID: \"0d9ffadb-6839-48af-b6a5-83263823a70e\") " pod="openshift-monitoring/openshift-state-metrics-566fddb674-6lbqz" Dec 11 08:23:07 crc kubenswrapper[4881]: I1211 08:23:07.740834 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openshift-state-metrics-kube-rbac-proxy-config\" (UniqueName: \"kubernetes.io/secret/0d9ffadb-6839-48af-b6a5-83263823a70e-openshift-state-metrics-kube-rbac-proxy-config\") pod \"openshift-state-metrics-566fddb674-6lbqz\" (UID: \"0d9ffadb-6839-48af-b6a5-83263823a70e\") " pod="openshift-monitoring/openshift-state-metrics-566fddb674-6lbqz" Dec 11 08:23:07 crc kubenswrapper[4881]: I1211 08:23:07.740868 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kf8q4\" (UniqueName: \"kubernetes.io/projected/ad6fe517-f4e6-4fdf-b981-54924015778d-kube-api-access-kf8q4\") pod \"kube-state-metrics-777cb5bd5d-598lm\" (UID: \"ad6fe517-f4e6-4fdf-b981-54924015778d\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-598lm" Dec 11 08:23:07 crc kubenswrapper[4881]: I1211 08:23:07.841970 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-kube-rbac-proxy-config\" (UniqueName: \"kubernetes.io/secret/ad6fe517-f4e6-4fdf-b981-54924015778d-kube-state-metrics-kube-rbac-proxy-config\") pod \"kube-state-metrics-777cb5bd5d-598lm\" (UID: \"ad6fe517-f4e6-4fdf-b981-54924015778d\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-598lm" Dec 11 08:23:07 crc kubenswrapper[4881]: I1211 08:23:07.842485 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/d8e9ea88-1dc9-4254-870d-c590763cc538-metrics-client-ca\") pod \"node-exporter-l9wps\" (UID: \"d8e9ea88-1dc9-4254-870d-c590763cc538\") " pod="openshift-monitoring/node-exporter-l9wps" Dec 11 08:23:07 crc kubenswrapper[4881]: I1211 08:23:07.842568 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-exporter-kube-rbac-proxy-config\" (UniqueName: \"kubernetes.io/secret/d8e9ea88-1dc9-4254-870d-c590763cc538-node-exporter-kube-rbac-proxy-config\") pod \"node-exporter-l9wps\" (UID: \"d8e9ea88-1dc9-4254-870d-c590763cc538\") " pod="openshift-monitoring/node-exporter-l9wps" Dec 11 08:23:07 crc kubenswrapper[4881]: I1211 08:23:07.842652 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-exporter-wtmp\" (UniqueName: \"kubernetes.io/host-path/d8e9ea88-1dc9-4254-870d-c590763cc538-node-exporter-wtmp\") pod \"node-exporter-l9wps\" (UID: \"d8e9ea88-1dc9-4254-870d-c590763cc538\") " pod="openshift-monitoring/node-exporter-l9wps" Dec 11 08:23:07 crc kubenswrapper[4881]: I1211 08:23:07.842721 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-custom-resource-state-configmap\" (UniqueName: \"kubernetes.io/configmap/ad6fe517-f4e6-4fdf-b981-54924015778d-kube-state-metrics-custom-resource-state-configmap\") pod \"kube-state-metrics-777cb5bd5d-598lm\" (UID: \"ad6fe517-f4e6-4fdf-b981-54924015778d\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-598lm" Dec 11 08:23:07 crc kubenswrapper[4881]: I1211 08:23:07.842812 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/d8e9ea88-1dc9-4254-870d-c590763cc538-sys\") pod \"node-exporter-l9wps\" (UID: \"d8e9ea88-1dc9-4254-870d-c590763cc538\") " pod="openshift-monitoring/node-exporter-l9wps" Dec 11 08:23:07 crc kubenswrapper[4881]: I1211 08:23:07.842891 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"volume-directive-shadow\" (UniqueName: \"kubernetes.io/empty-dir/ad6fe517-f4e6-4fdf-b981-54924015778d-volume-directive-shadow\") pod \"kube-state-metrics-777cb5bd5d-598lm\" (UID: \"ad6fe517-f4e6-4fdf-b981-54924015778d\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-598lm" Dec 11 08:23:07 crc kubenswrapper[4881]: I1211 08:23:07.842951 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/d8e9ea88-1dc9-4254-870d-c590763cc538-sys\") pod \"node-exporter-l9wps\" (UID: \"d8e9ea88-1dc9-4254-870d-c590763cc538\") " pod="openshift-monitoring/node-exporter-l9wps" Dec 11 08:23:07 crc kubenswrapper[4881]: I1211 08:23:07.842959 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openshift-state-metrics-tls\" (UniqueName: \"kubernetes.io/secret/0d9ffadb-6839-48af-b6a5-83263823a70e-openshift-state-metrics-tls\") pod \"openshift-state-metrics-566fddb674-6lbqz\" (UID: \"0d9ffadb-6839-48af-b6a5-83263823a70e\") " pod="openshift-monitoring/openshift-state-metrics-566fddb674-6lbqz" Dec 11 08:23:07 crc kubenswrapper[4881]: I1211 08:23:07.843066 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/ad6fe517-f4e6-4fdf-b981-54924015778d-metrics-client-ca\") pod \"kube-state-metrics-777cb5bd5d-598lm\" (UID: \"ad6fe517-f4e6-4fdf-b981-54924015778d\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-598lm" Dec 11 08:23:07 crc kubenswrapper[4881]: I1211 08:23:07.843152 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lzdfb\" (UniqueName: \"kubernetes.io/projected/0d9ffadb-6839-48af-b6a5-83263823a70e-kube-api-access-lzdfb\") pod \"openshift-state-metrics-566fddb674-6lbqz\" (UID: \"0d9ffadb-6839-48af-b6a5-83263823a70e\") " pod="openshift-monitoring/openshift-state-metrics-566fddb674-6lbqz" Dec 11 08:23:07 crc kubenswrapper[4881]: I1211 08:23:07.843199 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openshift-state-metrics-kube-rbac-proxy-config\" (UniqueName: \"kubernetes.io/secret/0d9ffadb-6839-48af-b6a5-83263823a70e-openshift-state-metrics-kube-rbac-proxy-config\") pod \"openshift-state-metrics-566fddb674-6lbqz\" (UID: \"0d9ffadb-6839-48af-b6a5-83263823a70e\") " pod="openshift-monitoring/openshift-state-metrics-566fddb674-6lbqz" Dec 11 08:23:07 crc kubenswrapper[4881]: I1211 08:23:07.843251 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kf8q4\" (UniqueName: \"kubernetes.io/projected/ad6fe517-f4e6-4fdf-b981-54924015778d-kube-api-access-kf8q4\") pod \"kube-state-metrics-777cb5bd5d-598lm\" (UID: \"ad6fe517-f4e6-4fdf-b981-54924015778d\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-598lm" Dec 11 08:23:07 crc kubenswrapper[4881]: I1211 08:23:07.843291 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-exporter-textfile\" (UniqueName: \"kubernetes.io/empty-dir/d8e9ea88-1dc9-4254-870d-c590763cc538-node-exporter-textfile\") pod \"node-exporter-l9wps\" (UID: \"d8e9ea88-1dc9-4254-870d-c590763cc538\") " pod="openshift-monitoring/node-exporter-l9wps" Dec 11 08:23:07 crc kubenswrapper[4881]: I1211 08:23:07.843327 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-tls\" (UniqueName: \"kubernetes.io/secret/ad6fe517-f4e6-4fdf-b981-54924015778d-kube-state-metrics-tls\") pod \"kube-state-metrics-777cb5bd5d-598lm\" (UID: \"ad6fe517-f4e6-4fdf-b981-54924015778d\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-598lm" Dec 11 08:23:07 crc kubenswrapper[4881]: I1211 08:23:07.843430 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-exporter-tls\" (UniqueName: \"kubernetes.io/secret/d8e9ea88-1dc9-4254-870d-c590763cc538-node-exporter-tls\") pod \"node-exporter-l9wps\" (UID: \"d8e9ea88-1dc9-4254-870d-c590763cc538\") " pod="openshift-monitoring/node-exporter-l9wps" Dec 11 08:23:07 crc kubenswrapper[4881]: I1211 08:23:07.843474 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-smpn8\" (UniqueName: \"kubernetes.io/projected/d8e9ea88-1dc9-4254-870d-c590763cc538-kube-api-access-smpn8\") pod \"node-exporter-l9wps\" (UID: \"d8e9ea88-1dc9-4254-870d-c590763cc538\") " pod="openshift-monitoring/node-exporter-l9wps" Dec 11 08:23:07 crc kubenswrapper[4881]: I1211 08:23:07.843524 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/0d9ffadb-6839-48af-b6a5-83263823a70e-metrics-client-ca\") pod \"openshift-state-metrics-566fddb674-6lbqz\" (UID: \"0d9ffadb-6839-48af-b6a5-83263823a70e\") " pod="openshift-monitoring/openshift-state-metrics-566fddb674-6lbqz" Dec 11 08:23:07 crc kubenswrapper[4881]: I1211 08:23:07.843566 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"volume-directive-shadow\" (UniqueName: \"kubernetes.io/empty-dir/ad6fe517-f4e6-4fdf-b981-54924015778d-volume-directive-shadow\") pod \"kube-state-metrics-777cb5bd5d-598lm\" (UID: \"ad6fe517-f4e6-4fdf-b981-54924015778d\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-598lm" Dec 11 08:23:07 crc kubenswrapper[4881]: I1211 08:23:07.843597 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"root\" (UniqueName: \"kubernetes.io/host-path/d8e9ea88-1dc9-4254-870d-c590763cc538-root\") pod \"node-exporter-l9wps\" (UID: \"d8e9ea88-1dc9-4254-870d-c590763cc538\") " pod="openshift-monitoring/node-exporter-l9wps" Dec 11 08:23:07 crc kubenswrapper[4881]: I1211 08:23:07.843572 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"root\" (UniqueName: \"kubernetes.io/host-path/d8e9ea88-1dc9-4254-870d-c590763cc538-root\") pod \"node-exporter-l9wps\" (UID: \"d8e9ea88-1dc9-4254-870d-c590763cc538\") " pod="openshift-monitoring/node-exporter-l9wps" Dec 11 08:23:07 crc kubenswrapper[4881]: I1211 08:23:07.842901 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-exporter-wtmp\" (UniqueName: \"kubernetes.io/host-path/d8e9ea88-1dc9-4254-870d-c590763cc538-node-exporter-wtmp\") pod \"node-exporter-l9wps\" (UID: \"d8e9ea88-1dc9-4254-870d-c590763cc538\") " pod="openshift-monitoring/node-exporter-l9wps" Dec 11 08:23:07 crc kubenswrapper[4881]: E1211 08:23:07.843807 4881 secret.go:188] Couldn't get secret openshift-monitoring/kube-state-metrics-tls: secret "kube-state-metrics-tls" not found Dec 11 08:23:07 crc kubenswrapper[4881]: E1211 08:23:07.843870 4881 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/ad6fe517-f4e6-4fdf-b981-54924015778d-kube-state-metrics-tls podName:ad6fe517-f4e6-4fdf-b981-54924015778d nodeName:}" failed. No retries permitted until 2025-12-11 08:23:08.343849385 +0000 UTC m=+436.721218202 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-state-metrics-tls" (UniqueName: "kubernetes.io/secret/ad6fe517-f4e6-4fdf-b981-54924015778d-kube-state-metrics-tls") pod "kube-state-metrics-777cb5bd5d-598lm" (UID: "ad6fe517-f4e6-4fdf-b981-54924015778d") : secret "kube-state-metrics-tls" not found Dec 11 08:23:07 crc kubenswrapper[4881]: I1211 08:23:07.843917 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-exporter-textfile\" (UniqueName: \"kubernetes.io/empty-dir/d8e9ea88-1dc9-4254-870d-c590763cc538-node-exporter-textfile\") pod \"node-exporter-l9wps\" (UID: \"d8e9ea88-1dc9-4254-870d-c590763cc538\") " pod="openshift-monitoring/node-exporter-l9wps" Dec 11 08:23:07 crc kubenswrapper[4881]: I1211 08:23:07.843924 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-custom-resource-state-configmap\" (UniqueName: \"kubernetes.io/configmap/ad6fe517-f4e6-4fdf-b981-54924015778d-kube-state-metrics-custom-resource-state-configmap\") pod \"kube-state-metrics-777cb5bd5d-598lm\" (UID: \"ad6fe517-f4e6-4fdf-b981-54924015778d\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-598lm" Dec 11 08:23:07 crc kubenswrapper[4881]: I1211 08:23:07.845101 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/0d9ffadb-6839-48af-b6a5-83263823a70e-metrics-client-ca\") pod \"openshift-state-metrics-566fddb674-6lbqz\" (UID: \"0d9ffadb-6839-48af-b6a5-83263823a70e\") " pod="openshift-monitoring/openshift-state-metrics-566fddb674-6lbqz" Dec 11 08:23:07 crc kubenswrapper[4881]: I1211 08:23:07.845259 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/ad6fe517-f4e6-4fdf-b981-54924015778d-metrics-client-ca\") pod \"kube-state-metrics-777cb5bd5d-598lm\" (UID: \"ad6fe517-f4e6-4fdf-b981-54924015778d\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-598lm" Dec 11 08:23:07 crc kubenswrapper[4881]: I1211 08:23:07.845865 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/d8e9ea88-1dc9-4254-870d-c590763cc538-metrics-client-ca\") pod \"node-exporter-l9wps\" (UID: \"d8e9ea88-1dc9-4254-870d-c590763cc538\") " pod="openshift-monitoring/node-exporter-l9wps" Dec 11 08:23:07 crc kubenswrapper[4881]: I1211 08:23:07.847674 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openshift-state-metrics-tls\" (UniqueName: \"kubernetes.io/secret/0d9ffadb-6839-48af-b6a5-83263823a70e-openshift-state-metrics-tls\") pod \"openshift-state-metrics-566fddb674-6lbqz\" (UID: \"0d9ffadb-6839-48af-b6a5-83263823a70e\") " pod="openshift-monitoring/openshift-state-metrics-566fddb674-6lbqz" Dec 11 08:23:07 crc kubenswrapper[4881]: I1211 08:23:07.850012 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-kube-rbac-proxy-config\" (UniqueName: \"kubernetes.io/secret/ad6fe517-f4e6-4fdf-b981-54924015778d-kube-state-metrics-kube-rbac-proxy-config\") pod \"kube-state-metrics-777cb5bd5d-598lm\" (UID: \"ad6fe517-f4e6-4fdf-b981-54924015778d\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-598lm" Dec 11 08:23:07 crc kubenswrapper[4881]: I1211 08:23:07.855230 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openshift-state-metrics-kube-rbac-proxy-config\" (UniqueName: \"kubernetes.io/secret/0d9ffadb-6839-48af-b6a5-83263823a70e-openshift-state-metrics-kube-rbac-proxy-config\") pod \"openshift-state-metrics-566fddb674-6lbqz\" (UID: \"0d9ffadb-6839-48af-b6a5-83263823a70e\") " pod="openshift-monitoring/openshift-state-metrics-566fddb674-6lbqz" Dec 11 08:23:07 crc kubenswrapper[4881]: I1211 08:23:07.856511 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-exporter-kube-rbac-proxy-config\" (UniqueName: \"kubernetes.io/secret/d8e9ea88-1dc9-4254-870d-c590763cc538-node-exporter-kube-rbac-proxy-config\") pod \"node-exporter-l9wps\" (UID: \"d8e9ea88-1dc9-4254-870d-c590763cc538\") " pod="openshift-monitoring/node-exporter-l9wps" Dec 11 08:23:07 crc kubenswrapper[4881]: I1211 08:23:07.857191 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-exporter-tls\" (UniqueName: \"kubernetes.io/secret/d8e9ea88-1dc9-4254-870d-c590763cc538-node-exporter-tls\") pod \"node-exporter-l9wps\" (UID: \"d8e9ea88-1dc9-4254-870d-c590763cc538\") " pod="openshift-monitoring/node-exporter-l9wps" Dec 11 08:23:07 crc kubenswrapper[4881]: I1211 08:23:07.880170 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lzdfb\" (UniqueName: \"kubernetes.io/projected/0d9ffadb-6839-48af-b6a5-83263823a70e-kube-api-access-lzdfb\") pod \"openshift-state-metrics-566fddb674-6lbqz\" (UID: \"0d9ffadb-6839-48af-b6a5-83263823a70e\") " pod="openshift-monitoring/openshift-state-metrics-566fddb674-6lbqz" Dec 11 08:23:07 crc kubenswrapper[4881]: I1211 08:23:07.880384 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kf8q4\" (UniqueName: \"kubernetes.io/projected/ad6fe517-f4e6-4fdf-b981-54924015778d-kube-api-access-kf8q4\") pod \"kube-state-metrics-777cb5bd5d-598lm\" (UID: \"ad6fe517-f4e6-4fdf-b981-54924015778d\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-598lm" Dec 11 08:23:07 crc kubenswrapper[4881]: I1211 08:23:07.882738 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-smpn8\" (UniqueName: \"kubernetes.io/projected/d8e9ea88-1dc9-4254-870d-c590763cc538-kube-api-access-smpn8\") pod \"node-exporter-l9wps\" (UID: \"d8e9ea88-1dc9-4254-870d-c590763cc538\") " pod="openshift-monitoring/node-exporter-l9wps" Dec 11 08:23:07 crc kubenswrapper[4881]: I1211 08:23:07.933293 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/openshift-state-metrics-566fddb674-6lbqz" Dec 11 08:23:07 crc kubenswrapper[4881]: I1211 08:23:07.954617 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/node-exporter-l9wps" Dec 11 08:23:08 crc kubenswrapper[4881]: I1211 08:23:08.321276 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/openshift-state-metrics-566fddb674-6lbqz"] Dec 11 08:23:08 crc kubenswrapper[4881]: W1211 08:23:08.331706 4881 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0d9ffadb_6839_48af_b6a5_83263823a70e.slice/crio-de069cd63fe46696f5b50a64213a1052a1728410d4d7b14e2c17046f8cc6d3cf WatchSource:0}: Error finding container de069cd63fe46696f5b50a64213a1052a1728410d4d7b14e2c17046f8cc6d3cf: Status 404 returned error can't find the container with id de069cd63fe46696f5b50a64213a1052a1728410d4d7b14e2c17046f8cc6d3cf Dec 11 08:23:08 crc kubenswrapper[4881]: I1211 08:23:08.353023 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-tls\" (UniqueName: \"kubernetes.io/secret/ad6fe517-f4e6-4fdf-b981-54924015778d-kube-state-metrics-tls\") pod \"kube-state-metrics-777cb5bd5d-598lm\" (UID: \"ad6fe517-f4e6-4fdf-b981-54924015778d\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-598lm" Dec 11 08:23:08 crc kubenswrapper[4881]: I1211 08:23:08.359556 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-tls\" (UniqueName: \"kubernetes.io/secret/ad6fe517-f4e6-4fdf-b981-54924015778d-kube-state-metrics-tls\") pod \"kube-state-metrics-777cb5bd5d-598lm\" (UID: \"ad6fe517-f4e6-4fdf-b981-54924015778d\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-598lm" Dec 11 08:23:08 crc kubenswrapper[4881]: I1211 08:23:08.543551 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-598lm" Dec 11 08:23:08 crc kubenswrapper[4881]: I1211 08:23:08.738533 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-monitoring/alertmanager-main-0"] Dec 11 08:23:08 crc kubenswrapper[4881]: I1211 08:23:08.740996 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/alertmanager-main-0" Dec 11 08:23:08 crc kubenswrapper[4881]: I1211 08:23:08.754390 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"alertmanager-kube-rbac-proxy-web" Dec 11 08:23:08 crc kubenswrapper[4881]: I1211 08:23:08.754639 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"alertmanager-kube-rbac-proxy" Dec 11 08:23:08 crc kubenswrapper[4881]: I1211 08:23:08.754768 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"alertmanager-main-web-config" Dec 11 08:23:08 crc kubenswrapper[4881]: I1211 08:23:08.754898 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"alertmanager-main-dockercfg-tns79" Dec 11 08:23:08 crc kubenswrapper[4881]: I1211 08:23:08.755037 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"alertmanager-main-tls-assets-0" Dec 11 08:23:08 crc kubenswrapper[4881]: I1211 08:23:08.755177 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"alertmanager-main-generated" Dec 11 08:23:08 crc kubenswrapper[4881]: I1211 08:23:08.755348 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"alertmanager-kube-rbac-proxy-metric" Dec 11 08:23:08 crc kubenswrapper[4881]: I1211 08:23:08.755624 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"alertmanager-main-tls" Dec 11 08:23:08 crc kubenswrapper[4881]: I1211 08:23:08.765738 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-monitoring"/"alertmanager-trusted-ca-bundle" Dec 11 08:23:08 crc kubenswrapper[4881]: I1211 08:23:08.779095 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/alertmanager-main-0"] Dec 11 08:23:08 crc kubenswrapper[4881]: I1211 08:23:08.859256 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/f2498125-8f34-4a97-92de-92085c448bf4-metrics-client-ca\") pod \"alertmanager-main-0\" (UID: \"f2498125-8f34-4a97-92de-92085c448bf4\") " pod="openshift-monitoring/alertmanager-main-0" Dec 11 08:23:08 crc kubenswrapper[4881]: I1211 08:23:08.859298 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-alertmanager-kube-rbac-proxy-metric\" (UniqueName: \"kubernetes.io/secret/f2498125-8f34-4a97-92de-92085c448bf4-secret-alertmanager-kube-rbac-proxy-metric\") pod \"alertmanager-main-0\" (UID: \"f2498125-8f34-4a97-92de-92085c448bf4\") " pod="openshift-monitoring/alertmanager-main-0" Dec 11 08:23:08 crc kubenswrapper[4881]: I1211 08:23:08.859350 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"alertmanager-main-db\" (UniqueName: \"kubernetes.io/empty-dir/f2498125-8f34-4a97-92de-92085c448bf4-alertmanager-main-db\") pod \"alertmanager-main-0\" (UID: \"f2498125-8f34-4a97-92de-92085c448bf4\") " pod="openshift-monitoring/alertmanager-main-0" Dec 11 08:23:08 crc kubenswrapper[4881]: I1211 08:23:08.859383 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/f2498125-8f34-4a97-92de-92085c448bf4-config-out\") pod \"alertmanager-main-0\" (UID: \"f2498125-8f34-4a97-92de-92085c448bf4\") " pod="openshift-monitoring/alertmanager-main-0" Dec 11 08:23:08 crc kubenswrapper[4881]: I1211 08:23:08.859411 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/f2498125-8f34-4a97-92de-92085c448bf4-tls-assets\") pod \"alertmanager-main-0\" (UID: \"f2498125-8f34-4a97-92de-92085c448bf4\") " pod="openshift-monitoring/alertmanager-main-0" Dec 11 08:23:08 crc kubenswrapper[4881]: I1211 08:23:08.859453 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-alertmanager-kube-rbac-proxy-web\" (UniqueName: \"kubernetes.io/secret/f2498125-8f34-4a97-92de-92085c448bf4-secret-alertmanager-kube-rbac-proxy-web\") pod \"alertmanager-main-0\" (UID: \"f2498125-8f34-4a97-92de-92085c448bf4\") " pod="openshift-monitoring/alertmanager-main-0" Dec 11 08:23:08 crc kubenswrapper[4881]: I1211 08:23:08.859675 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mklkn\" (UniqueName: \"kubernetes.io/projected/f2498125-8f34-4a97-92de-92085c448bf4-kube-api-access-mklkn\") pod \"alertmanager-main-0\" (UID: \"f2498125-8f34-4a97-92de-92085c448bf4\") " pod="openshift-monitoring/alertmanager-main-0" Dec 11 08:23:08 crc kubenswrapper[4881]: I1211 08:23:08.859783 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/f2498125-8f34-4a97-92de-92085c448bf4-web-config\") pod \"alertmanager-main-0\" (UID: \"f2498125-8f34-4a97-92de-92085c448bf4\") " pod="openshift-monitoring/alertmanager-main-0" Dec 11 08:23:08 crc kubenswrapper[4881]: I1211 08:23:08.859843 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/secret/f2498125-8f34-4a97-92de-92085c448bf4-config-volume\") pod \"alertmanager-main-0\" (UID: \"f2498125-8f34-4a97-92de-92085c448bf4\") " pod="openshift-monitoring/alertmanager-main-0" Dec 11 08:23:08 crc kubenswrapper[4881]: I1211 08:23:08.859924 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-alertmanager-main-tls\" (UniqueName: \"kubernetes.io/secret/f2498125-8f34-4a97-92de-92085c448bf4-secret-alertmanager-main-tls\") pod \"alertmanager-main-0\" (UID: \"f2498125-8f34-4a97-92de-92085c448bf4\") " pod="openshift-monitoring/alertmanager-main-0" Dec 11 08:23:08 crc kubenswrapper[4881]: I1211 08:23:08.859950 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"alertmanager-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/f2498125-8f34-4a97-92de-92085c448bf4-alertmanager-trusted-ca-bundle\") pod \"alertmanager-main-0\" (UID: \"f2498125-8f34-4a97-92de-92085c448bf4\") " pod="openshift-monitoring/alertmanager-main-0" Dec 11 08:23:08 crc kubenswrapper[4881]: I1211 08:23:08.859992 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-alertmanager-kube-rbac-proxy\" (UniqueName: \"kubernetes.io/secret/f2498125-8f34-4a97-92de-92085c448bf4-secret-alertmanager-kube-rbac-proxy\") pod \"alertmanager-main-0\" (UID: \"f2498125-8f34-4a97-92de-92085c448bf4\") " pod="openshift-monitoring/alertmanager-main-0" Dec 11 08:23:08 crc kubenswrapper[4881]: I1211 08:23:08.960768 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mklkn\" (UniqueName: \"kubernetes.io/projected/f2498125-8f34-4a97-92de-92085c448bf4-kube-api-access-mklkn\") pod \"alertmanager-main-0\" (UID: \"f2498125-8f34-4a97-92de-92085c448bf4\") " pod="openshift-monitoring/alertmanager-main-0" Dec 11 08:23:08 crc kubenswrapper[4881]: I1211 08:23:08.960828 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/f2498125-8f34-4a97-92de-92085c448bf4-web-config\") pod \"alertmanager-main-0\" (UID: \"f2498125-8f34-4a97-92de-92085c448bf4\") " pod="openshift-monitoring/alertmanager-main-0" Dec 11 08:23:08 crc kubenswrapper[4881]: I1211 08:23:08.960855 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/secret/f2498125-8f34-4a97-92de-92085c448bf4-config-volume\") pod \"alertmanager-main-0\" (UID: \"f2498125-8f34-4a97-92de-92085c448bf4\") " pod="openshift-monitoring/alertmanager-main-0" Dec 11 08:23:08 crc kubenswrapper[4881]: I1211 08:23:08.960889 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-alertmanager-main-tls\" (UniqueName: \"kubernetes.io/secret/f2498125-8f34-4a97-92de-92085c448bf4-secret-alertmanager-main-tls\") pod \"alertmanager-main-0\" (UID: \"f2498125-8f34-4a97-92de-92085c448bf4\") " pod="openshift-monitoring/alertmanager-main-0" Dec 11 08:23:08 crc kubenswrapper[4881]: I1211 08:23:08.960909 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"alertmanager-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/f2498125-8f34-4a97-92de-92085c448bf4-alertmanager-trusted-ca-bundle\") pod \"alertmanager-main-0\" (UID: \"f2498125-8f34-4a97-92de-92085c448bf4\") " pod="openshift-monitoring/alertmanager-main-0" Dec 11 08:23:08 crc kubenswrapper[4881]: I1211 08:23:08.960934 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-alertmanager-kube-rbac-proxy\" (UniqueName: \"kubernetes.io/secret/f2498125-8f34-4a97-92de-92085c448bf4-secret-alertmanager-kube-rbac-proxy\") pod \"alertmanager-main-0\" (UID: \"f2498125-8f34-4a97-92de-92085c448bf4\") " pod="openshift-monitoring/alertmanager-main-0" Dec 11 08:23:08 crc kubenswrapper[4881]: I1211 08:23:08.960982 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/f2498125-8f34-4a97-92de-92085c448bf4-metrics-client-ca\") pod \"alertmanager-main-0\" (UID: \"f2498125-8f34-4a97-92de-92085c448bf4\") " pod="openshift-monitoring/alertmanager-main-0" Dec 11 08:23:08 crc kubenswrapper[4881]: I1211 08:23:08.961005 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-alertmanager-kube-rbac-proxy-metric\" (UniqueName: \"kubernetes.io/secret/f2498125-8f34-4a97-92de-92085c448bf4-secret-alertmanager-kube-rbac-proxy-metric\") pod \"alertmanager-main-0\" (UID: \"f2498125-8f34-4a97-92de-92085c448bf4\") " pod="openshift-monitoring/alertmanager-main-0" Dec 11 08:23:08 crc kubenswrapper[4881]: I1211 08:23:08.961036 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"alertmanager-main-db\" (UniqueName: \"kubernetes.io/empty-dir/f2498125-8f34-4a97-92de-92085c448bf4-alertmanager-main-db\") pod \"alertmanager-main-0\" (UID: \"f2498125-8f34-4a97-92de-92085c448bf4\") " pod="openshift-monitoring/alertmanager-main-0" Dec 11 08:23:08 crc kubenswrapper[4881]: I1211 08:23:08.961064 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/f2498125-8f34-4a97-92de-92085c448bf4-config-out\") pod \"alertmanager-main-0\" (UID: \"f2498125-8f34-4a97-92de-92085c448bf4\") " pod="openshift-monitoring/alertmanager-main-0" Dec 11 08:23:08 crc kubenswrapper[4881]: I1211 08:23:08.961089 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/f2498125-8f34-4a97-92de-92085c448bf4-tls-assets\") pod \"alertmanager-main-0\" (UID: \"f2498125-8f34-4a97-92de-92085c448bf4\") " pod="openshift-monitoring/alertmanager-main-0" Dec 11 08:23:08 crc kubenswrapper[4881]: I1211 08:23:08.961131 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-alertmanager-kube-rbac-proxy-web\" (UniqueName: \"kubernetes.io/secret/f2498125-8f34-4a97-92de-92085c448bf4-secret-alertmanager-kube-rbac-proxy-web\") pod \"alertmanager-main-0\" (UID: \"f2498125-8f34-4a97-92de-92085c448bf4\") " pod="openshift-monitoring/alertmanager-main-0" Dec 11 08:23:08 crc kubenswrapper[4881]: I1211 08:23:08.962120 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"alertmanager-main-db\" (UniqueName: \"kubernetes.io/empty-dir/f2498125-8f34-4a97-92de-92085c448bf4-alertmanager-main-db\") pod \"alertmanager-main-0\" (UID: \"f2498125-8f34-4a97-92de-92085c448bf4\") " pod="openshift-monitoring/alertmanager-main-0" Dec 11 08:23:08 crc kubenswrapper[4881]: I1211 08:23:08.962518 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"alertmanager-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/f2498125-8f34-4a97-92de-92085c448bf4-alertmanager-trusted-ca-bundle\") pod \"alertmanager-main-0\" (UID: \"f2498125-8f34-4a97-92de-92085c448bf4\") " pod="openshift-monitoring/alertmanager-main-0" Dec 11 08:23:08 crc kubenswrapper[4881]: I1211 08:23:08.963164 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/f2498125-8f34-4a97-92de-92085c448bf4-metrics-client-ca\") pod \"alertmanager-main-0\" (UID: \"f2498125-8f34-4a97-92de-92085c448bf4\") " pod="openshift-monitoring/alertmanager-main-0" Dec 11 08:23:08 crc kubenswrapper[4881]: I1211 08:23:08.966987 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/f2498125-8f34-4a97-92de-92085c448bf4-config-out\") pod \"alertmanager-main-0\" (UID: \"f2498125-8f34-4a97-92de-92085c448bf4\") " pod="openshift-monitoring/alertmanager-main-0" Dec 11 08:23:08 crc kubenswrapper[4881]: I1211 08:23:08.967128 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-alertmanager-kube-rbac-proxy-metric\" (UniqueName: \"kubernetes.io/secret/f2498125-8f34-4a97-92de-92085c448bf4-secret-alertmanager-kube-rbac-proxy-metric\") pod \"alertmanager-main-0\" (UID: \"f2498125-8f34-4a97-92de-92085c448bf4\") " pod="openshift-monitoring/alertmanager-main-0" Dec 11 08:23:08 crc kubenswrapper[4881]: I1211 08:23:08.967613 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-alertmanager-kube-rbac-proxy-web\" (UniqueName: \"kubernetes.io/secret/f2498125-8f34-4a97-92de-92085c448bf4-secret-alertmanager-kube-rbac-proxy-web\") pod \"alertmanager-main-0\" (UID: \"f2498125-8f34-4a97-92de-92085c448bf4\") " pod="openshift-monitoring/alertmanager-main-0" Dec 11 08:23:08 crc kubenswrapper[4881]: I1211 08:23:08.968164 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/f2498125-8f34-4a97-92de-92085c448bf4-tls-assets\") pod \"alertmanager-main-0\" (UID: \"f2498125-8f34-4a97-92de-92085c448bf4\") " pod="openshift-monitoring/alertmanager-main-0" Dec 11 08:23:08 crc kubenswrapper[4881]: I1211 08:23:08.972166 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-alertmanager-kube-rbac-proxy\" (UniqueName: \"kubernetes.io/secret/f2498125-8f34-4a97-92de-92085c448bf4-secret-alertmanager-kube-rbac-proxy\") pod \"alertmanager-main-0\" (UID: \"f2498125-8f34-4a97-92de-92085c448bf4\") " pod="openshift-monitoring/alertmanager-main-0" Dec 11 08:23:08 crc kubenswrapper[4881]: I1211 08:23:08.972321 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-alertmanager-main-tls\" (UniqueName: \"kubernetes.io/secret/f2498125-8f34-4a97-92de-92085c448bf4-secret-alertmanager-main-tls\") pod \"alertmanager-main-0\" (UID: \"f2498125-8f34-4a97-92de-92085c448bf4\") " pod="openshift-monitoring/alertmanager-main-0" Dec 11 08:23:08 crc kubenswrapper[4881]: I1211 08:23:08.972878 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/f2498125-8f34-4a97-92de-92085c448bf4-web-config\") pod \"alertmanager-main-0\" (UID: \"f2498125-8f34-4a97-92de-92085c448bf4\") " pod="openshift-monitoring/alertmanager-main-0" Dec 11 08:23:08 crc kubenswrapper[4881]: I1211 08:23:08.974402 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/secret/f2498125-8f34-4a97-92de-92085c448bf4-config-volume\") pod \"alertmanager-main-0\" (UID: \"f2498125-8f34-4a97-92de-92085c448bf4\") " pod="openshift-monitoring/alertmanager-main-0" Dec 11 08:23:08 crc kubenswrapper[4881]: I1211 08:23:08.983372 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mklkn\" (UniqueName: \"kubernetes.io/projected/f2498125-8f34-4a97-92de-92085c448bf4-kube-api-access-mklkn\") pod \"alertmanager-main-0\" (UID: \"f2498125-8f34-4a97-92de-92085c448bf4\") " pod="openshift-monitoring/alertmanager-main-0" Dec 11 08:23:08 crc kubenswrapper[4881]: I1211 08:23:08.984215 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/openshift-state-metrics-566fddb674-6lbqz" event={"ID":"0d9ffadb-6839-48af-b6a5-83263823a70e","Type":"ContainerStarted","Data":"33eda45bd888c443826678ad4018c8904625e1657463bfd641c52cb676bb43c9"} Dec 11 08:23:08 crc kubenswrapper[4881]: I1211 08:23:08.984264 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/openshift-state-metrics-566fddb674-6lbqz" event={"ID":"0d9ffadb-6839-48af-b6a5-83263823a70e","Type":"ContainerStarted","Data":"d394ea3cc05aed7cb04f09331e44f2b5bc2f9ac90deb350339305613f6edf7e9"} Dec 11 08:23:08 crc kubenswrapper[4881]: I1211 08:23:08.984281 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/openshift-state-metrics-566fddb674-6lbqz" event={"ID":"0d9ffadb-6839-48af-b6a5-83263823a70e","Type":"ContainerStarted","Data":"de069cd63fe46696f5b50a64213a1052a1728410d4d7b14e2c17046f8cc6d3cf"} Dec 11 08:23:08 crc kubenswrapper[4881]: I1211 08:23:08.985468 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/node-exporter-l9wps" event={"ID":"d8e9ea88-1dc9-4254-870d-c590763cc538","Type":"ContainerStarted","Data":"5851ed1dae360845efa2214b01987922fb42f4efd5519e27c3292a21bbfd1a45"} Dec 11 08:23:09 crc kubenswrapper[4881]: I1211 08:23:09.016213 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/kube-state-metrics-777cb5bd5d-598lm"] Dec 11 08:23:09 crc kubenswrapper[4881]: I1211 08:23:09.080161 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/alertmanager-main-0" Dec 11 08:23:09 crc kubenswrapper[4881]: I1211 08:23:09.663124 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/alertmanager-main-0"] Dec 11 08:23:09 crc kubenswrapper[4881]: W1211 08:23:09.678300 4881 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf2498125_8f34_4a97_92de_92085c448bf4.slice/crio-57ad0ae51cd72da1adf914569ba13d305e5ffedf3fadd4b125f2f6c1b32c64b2 WatchSource:0}: Error finding container 57ad0ae51cd72da1adf914569ba13d305e5ffedf3fadd4b125f2f6c1b32c64b2: Status 404 returned error can't find the container with id 57ad0ae51cd72da1adf914569ba13d305e5ffedf3fadd4b125f2f6c1b32c64b2 Dec 11 08:23:09 crc kubenswrapper[4881]: I1211 08:23:09.727867 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-monitoring/thanos-querier-778bc6ff98-47qfw"] Dec 11 08:23:09 crc kubenswrapper[4881]: I1211 08:23:09.729767 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/thanos-querier-778bc6ff98-47qfw" Dec 11 08:23:09 crc kubenswrapper[4881]: I1211 08:23:09.735758 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"thanos-querier-kube-rbac-proxy-metrics" Dec 11 08:23:09 crc kubenswrapper[4881]: I1211 08:23:09.735843 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"thanos-querier-kube-rbac-proxy" Dec 11 08:23:09 crc kubenswrapper[4881]: I1211 08:23:09.735971 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"thanos-querier-tls" Dec 11 08:23:09 crc kubenswrapper[4881]: I1211 08:23:09.739651 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"thanos-querier-grpc-tls-27sb8jdjp92gi" Dec 11 08:23:09 crc kubenswrapper[4881]: I1211 08:23:09.744777 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"thanos-querier-kube-rbac-proxy-web" Dec 11 08:23:09 crc kubenswrapper[4881]: I1211 08:23:09.745094 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"thanos-querier-dockercfg-kswb5" Dec 11 08:23:09 crc kubenswrapper[4881]: I1211 08:23:09.745244 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"thanos-querier-kube-rbac-proxy-rules" Dec 11 08:23:09 crc kubenswrapper[4881]: I1211 08:23:09.770826 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/thanos-querier-778bc6ff98-47qfw"] Dec 11 08:23:09 crc kubenswrapper[4881]: I1211 08:23:09.875474 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-thanos-querier-tls\" (UniqueName: \"kubernetes.io/secret/0dbf04dc-19a0-4694-bdc1-c5d81052efd3-secret-thanos-querier-tls\") pod \"thanos-querier-778bc6ff98-47qfw\" (UID: \"0dbf04dc-19a0-4694-bdc1-c5d81052efd3\") " pod="openshift-monitoring/thanos-querier-778bc6ff98-47qfw" Dec 11 08:23:09 crc kubenswrapper[4881]: I1211 08:23:09.875579 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-thanos-querier-kube-rbac-proxy\" (UniqueName: \"kubernetes.io/secret/0dbf04dc-19a0-4694-bdc1-c5d81052efd3-secret-thanos-querier-kube-rbac-proxy\") pod \"thanos-querier-778bc6ff98-47qfw\" (UID: \"0dbf04dc-19a0-4694-bdc1-c5d81052efd3\") " pod="openshift-monitoring/thanos-querier-778bc6ff98-47qfw" Dec 11 08:23:09 crc kubenswrapper[4881]: I1211 08:23:09.875680 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-thanos-querier-kube-rbac-proxy-metrics\" (UniqueName: \"kubernetes.io/secret/0dbf04dc-19a0-4694-bdc1-c5d81052efd3-secret-thanos-querier-kube-rbac-proxy-metrics\") pod \"thanos-querier-778bc6ff98-47qfw\" (UID: \"0dbf04dc-19a0-4694-bdc1-c5d81052efd3\") " pod="openshift-monitoring/thanos-querier-778bc6ff98-47qfw" Dec 11 08:23:09 crc kubenswrapper[4881]: I1211 08:23:09.875730 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-thanos-querier-kube-rbac-proxy-web\" (UniqueName: \"kubernetes.io/secret/0dbf04dc-19a0-4694-bdc1-c5d81052efd3-secret-thanos-querier-kube-rbac-proxy-web\") pod \"thanos-querier-778bc6ff98-47qfw\" (UID: \"0dbf04dc-19a0-4694-bdc1-c5d81052efd3\") " pod="openshift-monitoring/thanos-querier-778bc6ff98-47qfw" Dec 11 08:23:09 crc kubenswrapper[4881]: I1211 08:23:09.875800 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/0dbf04dc-19a0-4694-bdc1-c5d81052efd3-metrics-client-ca\") pod \"thanos-querier-778bc6ff98-47qfw\" (UID: \"0dbf04dc-19a0-4694-bdc1-c5d81052efd3\") " pod="openshift-monitoring/thanos-querier-778bc6ff98-47qfw" Dec 11 08:23:09 crc kubenswrapper[4881]: I1211 08:23:09.875973 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-thanos-querier-kube-rbac-proxy-rules\" (UniqueName: \"kubernetes.io/secret/0dbf04dc-19a0-4694-bdc1-c5d81052efd3-secret-thanos-querier-kube-rbac-proxy-rules\") pod \"thanos-querier-778bc6ff98-47qfw\" (UID: \"0dbf04dc-19a0-4694-bdc1-c5d81052efd3\") " pod="openshift-monitoring/thanos-querier-778bc6ff98-47qfw" Dec 11 08:23:09 crc kubenswrapper[4881]: I1211 08:23:09.876078 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-grpc-tls\" (UniqueName: \"kubernetes.io/secret/0dbf04dc-19a0-4694-bdc1-c5d81052efd3-secret-grpc-tls\") pod \"thanos-querier-778bc6ff98-47qfw\" (UID: \"0dbf04dc-19a0-4694-bdc1-c5d81052efd3\") " pod="openshift-monitoring/thanos-querier-778bc6ff98-47qfw" Dec 11 08:23:09 crc kubenswrapper[4881]: I1211 08:23:09.876129 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xg5c6\" (UniqueName: \"kubernetes.io/projected/0dbf04dc-19a0-4694-bdc1-c5d81052efd3-kube-api-access-xg5c6\") pod \"thanos-querier-778bc6ff98-47qfw\" (UID: \"0dbf04dc-19a0-4694-bdc1-c5d81052efd3\") " pod="openshift-monitoring/thanos-querier-778bc6ff98-47qfw" Dec 11 08:23:09 crc kubenswrapper[4881]: I1211 08:23:09.977203 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-grpc-tls\" (UniqueName: \"kubernetes.io/secret/0dbf04dc-19a0-4694-bdc1-c5d81052efd3-secret-grpc-tls\") pod \"thanos-querier-778bc6ff98-47qfw\" (UID: \"0dbf04dc-19a0-4694-bdc1-c5d81052efd3\") " pod="openshift-monitoring/thanos-querier-778bc6ff98-47qfw" Dec 11 08:23:09 crc kubenswrapper[4881]: I1211 08:23:09.977566 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xg5c6\" (UniqueName: \"kubernetes.io/projected/0dbf04dc-19a0-4694-bdc1-c5d81052efd3-kube-api-access-xg5c6\") pod \"thanos-querier-778bc6ff98-47qfw\" (UID: \"0dbf04dc-19a0-4694-bdc1-c5d81052efd3\") " pod="openshift-monitoring/thanos-querier-778bc6ff98-47qfw" Dec 11 08:23:09 crc kubenswrapper[4881]: I1211 08:23:09.977604 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-thanos-querier-tls\" (UniqueName: \"kubernetes.io/secret/0dbf04dc-19a0-4694-bdc1-c5d81052efd3-secret-thanos-querier-tls\") pod \"thanos-querier-778bc6ff98-47qfw\" (UID: \"0dbf04dc-19a0-4694-bdc1-c5d81052efd3\") " pod="openshift-monitoring/thanos-querier-778bc6ff98-47qfw" Dec 11 08:23:09 crc kubenswrapper[4881]: I1211 08:23:09.977634 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-thanos-querier-kube-rbac-proxy\" (UniqueName: \"kubernetes.io/secret/0dbf04dc-19a0-4694-bdc1-c5d81052efd3-secret-thanos-querier-kube-rbac-proxy\") pod \"thanos-querier-778bc6ff98-47qfw\" (UID: \"0dbf04dc-19a0-4694-bdc1-c5d81052efd3\") " pod="openshift-monitoring/thanos-querier-778bc6ff98-47qfw" Dec 11 08:23:09 crc kubenswrapper[4881]: I1211 08:23:09.977655 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-thanos-querier-kube-rbac-proxy-metrics\" (UniqueName: \"kubernetes.io/secret/0dbf04dc-19a0-4694-bdc1-c5d81052efd3-secret-thanos-querier-kube-rbac-proxy-metrics\") pod \"thanos-querier-778bc6ff98-47qfw\" (UID: \"0dbf04dc-19a0-4694-bdc1-c5d81052efd3\") " pod="openshift-monitoring/thanos-querier-778bc6ff98-47qfw" Dec 11 08:23:09 crc kubenswrapper[4881]: I1211 08:23:09.977676 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-thanos-querier-kube-rbac-proxy-web\" (UniqueName: \"kubernetes.io/secret/0dbf04dc-19a0-4694-bdc1-c5d81052efd3-secret-thanos-querier-kube-rbac-proxy-web\") pod \"thanos-querier-778bc6ff98-47qfw\" (UID: \"0dbf04dc-19a0-4694-bdc1-c5d81052efd3\") " pod="openshift-monitoring/thanos-querier-778bc6ff98-47qfw" Dec 11 08:23:09 crc kubenswrapper[4881]: I1211 08:23:09.977701 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/0dbf04dc-19a0-4694-bdc1-c5d81052efd3-metrics-client-ca\") pod \"thanos-querier-778bc6ff98-47qfw\" (UID: \"0dbf04dc-19a0-4694-bdc1-c5d81052efd3\") " pod="openshift-monitoring/thanos-querier-778bc6ff98-47qfw" Dec 11 08:23:09 crc kubenswrapper[4881]: I1211 08:23:09.977734 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-thanos-querier-kube-rbac-proxy-rules\" (UniqueName: \"kubernetes.io/secret/0dbf04dc-19a0-4694-bdc1-c5d81052efd3-secret-thanos-querier-kube-rbac-proxy-rules\") pod \"thanos-querier-778bc6ff98-47qfw\" (UID: \"0dbf04dc-19a0-4694-bdc1-c5d81052efd3\") " pod="openshift-monitoring/thanos-querier-778bc6ff98-47qfw" Dec 11 08:23:09 crc kubenswrapper[4881]: I1211 08:23:09.980238 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/0dbf04dc-19a0-4694-bdc1-c5d81052efd3-metrics-client-ca\") pod \"thanos-querier-778bc6ff98-47qfw\" (UID: \"0dbf04dc-19a0-4694-bdc1-c5d81052efd3\") " pod="openshift-monitoring/thanos-querier-778bc6ff98-47qfw" Dec 11 08:23:09 crc kubenswrapper[4881]: I1211 08:23:09.982982 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-thanos-querier-kube-rbac-proxy-web\" (UniqueName: \"kubernetes.io/secret/0dbf04dc-19a0-4694-bdc1-c5d81052efd3-secret-thanos-querier-kube-rbac-proxy-web\") pod \"thanos-querier-778bc6ff98-47qfw\" (UID: \"0dbf04dc-19a0-4694-bdc1-c5d81052efd3\") " pod="openshift-monitoring/thanos-querier-778bc6ff98-47qfw" Dec 11 08:23:09 crc kubenswrapper[4881]: I1211 08:23:09.983186 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-thanos-querier-tls\" (UniqueName: \"kubernetes.io/secret/0dbf04dc-19a0-4694-bdc1-c5d81052efd3-secret-thanos-querier-tls\") pod \"thanos-querier-778bc6ff98-47qfw\" (UID: \"0dbf04dc-19a0-4694-bdc1-c5d81052efd3\") " pod="openshift-monitoring/thanos-querier-778bc6ff98-47qfw" Dec 11 08:23:09 crc kubenswrapper[4881]: I1211 08:23:09.983618 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-thanos-querier-kube-rbac-proxy-metrics\" (UniqueName: \"kubernetes.io/secret/0dbf04dc-19a0-4694-bdc1-c5d81052efd3-secret-thanos-querier-kube-rbac-proxy-metrics\") pod \"thanos-querier-778bc6ff98-47qfw\" (UID: \"0dbf04dc-19a0-4694-bdc1-c5d81052efd3\") " pod="openshift-monitoring/thanos-querier-778bc6ff98-47qfw" Dec 11 08:23:09 crc kubenswrapper[4881]: I1211 08:23:09.985197 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-thanos-querier-kube-rbac-proxy\" (UniqueName: \"kubernetes.io/secret/0dbf04dc-19a0-4694-bdc1-c5d81052efd3-secret-thanos-querier-kube-rbac-proxy\") pod \"thanos-querier-778bc6ff98-47qfw\" (UID: \"0dbf04dc-19a0-4694-bdc1-c5d81052efd3\") " pod="openshift-monitoring/thanos-querier-778bc6ff98-47qfw" Dec 11 08:23:09 crc kubenswrapper[4881]: I1211 08:23:09.987100 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-thanos-querier-kube-rbac-proxy-rules\" (UniqueName: \"kubernetes.io/secret/0dbf04dc-19a0-4694-bdc1-c5d81052efd3-secret-thanos-querier-kube-rbac-proxy-rules\") pod \"thanos-querier-778bc6ff98-47qfw\" (UID: \"0dbf04dc-19a0-4694-bdc1-c5d81052efd3\") " pod="openshift-monitoring/thanos-querier-778bc6ff98-47qfw" Dec 11 08:23:09 crc kubenswrapper[4881]: I1211 08:23:09.991625 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/alertmanager-main-0" event={"ID":"f2498125-8f34-4a97-92de-92085c448bf4","Type":"ContainerStarted","Data":"57ad0ae51cd72da1adf914569ba13d305e5ffedf3fadd4b125f2f6c1b32c64b2"} Dec 11 08:23:09 crc kubenswrapper[4881]: I1211 08:23:09.992402 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-598lm" event={"ID":"ad6fe517-f4e6-4fdf-b981-54924015778d","Type":"ContainerStarted","Data":"6133f8294bcedc95b19a3100567a53f94ea5abd032cd6eccdfc70132364c205b"} Dec 11 08:23:09 crc kubenswrapper[4881]: I1211 08:23:09.993704 4881 generic.go:334] "Generic (PLEG): container finished" podID="d8e9ea88-1dc9-4254-870d-c590763cc538" containerID="85937c0686742e9315b40a251993c1f1b40b7a4a10ad100de19eac485b4dfc06" exitCode=0 Dec 11 08:23:09 crc kubenswrapper[4881]: I1211 08:23:09.993730 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/node-exporter-l9wps" event={"ID":"d8e9ea88-1dc9-4254-870d-c590763cc538","Type":"ContainerDied","Data":"85937c0686742e9315b40a251993c1f1b40b7a4a10ad100de19eac485b4dfc06"} Dec 11 08:23:09 crc kubenswrapper[4881]: I1211 08:23:09.999894 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-grpc-tls\" (UniqueName: \"kubernetes.io/secret/0dbf04dc-19a0-4694-bdc1-c5d81052efd3-secret-grpc-tls\") pod \"thanos-querier-778bc6ff98-47qfw\" (UID: \"0dbf04dc-19a0-4694-bdc1-c5d81052efd3\") " pod="openshift-monitoring/thanos-querier-778bc6ff98-47qfw" Dec 11 08:23:10 crc kubenswrapper[4881]: I1211 08:23:10.001935 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xg5c6\" (UniqueName: \"kubernetes.io/projected/0dbf04dc-19a0-4694-bdc1-c5d81052efd3-kube-api-access-xg5c6\") pod \"thanos-querier-778bc6ff98-47qfw\" (UID: \"0dbf04dc-19a0-4694-bdc1-c5d81052efd3\") " pod="openshift-monitoring/thanos-querier-778bc6ff98-47qfw" Dec 11 08:23:10 crc kubenswrapper[4881]: I1211 08:23:10.044247 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/thanos-querier-778bc6ff98-47qfw" Dec 11 08:23:10 crc kubenswrapper[4881]: I1211 08:23:10.496744 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/thanos-querier-778bc6ff98-47qfw"] Dec 11 08:23:10 crc kubenswrapper[4881]: W1211 08:23:10.913469 4881 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0dbf04dc_19a0_4694_bdc1_c5d81052efd3.slice/crio-0cae7d97bb1d46785c683293a93326b9240c218640241314941394dac4a2ccd8 WatchSource:0}: Error finding container 0cae7d97bb1d46785c683293a93326b9240c218640241314941394dac4a2ccd8: Status 404 returned error can't find the container with id 0cae7d97bb1d46785c683293a93326b9240c218640241314941394dac4a2ccd8 Dec 11 08:23:11 crc kubenswrapper[4881]: I1211 08:23:11.002995 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/node-exporter-l9wps" event={"ID":"d8e9ea88-1dc9-4254-870d-c590763cc538","Type":"ContainerStarted","Data":"98b6756621ba79307e67438a9a9b584120616f03e23b11c8f1abdeff46575040"} Dec 11 08:23:11 crc kubenswrapper[4881]: I1211 08:23:11.013391 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/thanos-querier-778bc6ff98-47qfw" event={"ID":"0dbf04dc-19a0-4694-bdc1-c5d81052efd3","Type":"ContainerStarted","Data":"0cae7d97bb1d46785c683293a93326b9240c218640241314941394dac4a2ccd8"} Dec 11 08:23:12 crc kubenswrapper[4881]: I1211 08:23:12.016209 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/openshift-state-metrics-566fddb674-6lbqz" event={"ID":"0d9ffadb-6839-48af-b6a5-83263823a70e","Type":"ContainerStarted","Data":"c3b908b65df9aeda82dddbe937835ae70601e639f74f10f374e0ff2bfd5ff452"} Dec 11 08:23:12 crc kubenswrapper[4881]: I1211 08:23:12.021541 4881 generic.go:334] "Generic (PLEG): container finished" podID="f2498125-8f34-4a97-92de-92085c448bf4" containerID="b57cf5b2f5ecd9c04fd32c0f97c82abd596be59af0bb090392bbb6caeee77c58" exitCode=0 Dec 11 08:23:12 crc kubenswrapper[4881]: I1211 08:23:12.021676 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/alertmanager-main-0" event={"ID":"f2498125-8f34-4a97-92de-92085c448bf4","Type":"ContainerDied","Data":"b57cf5b2f5ecd9c04fd32c0f97c82abd596be59af0bb090392bbb6caeee77c58"} Dec 11 08:23:12 crc kubenswrapper[4881]: I1211 08:23:12.024858 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-598lm" event={"ID":"ad6fe517-f4e6-4fdf-b981-54924015778d","Type":"ContainerStarted","Data":"eb5bddabdee110b103da14a0e3a96aa1d4844f61c5d197915aebfe8fea79f597"} Dec 11 08:23:12 crc kubenswrapper[4881]: I1211 08:23:12.024910 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-598lm" event={"ID":"ad6fe517-f4e6-4fdf-b981-54924015778d","Type":"ContainerStarted","Data":"aa5740a709b9a4e8504f47f08dd51aeceaeddb32b05a3784454c6a47fa5b740f"} Dec 11 08:23:12 crc kubenswrapper[4881]: I1211 08:23:12.024926 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-598lm" event={"ID":"ad6fe517-f4e6-4fdf-b981-54924015778d","Type":"ContainerStarted","Data":"5a2d93a3500dcb0bb096571fe247c14433011e746cd71e5416dc27c8f3de9dd1"} Dec 11 08:23:12 crc kubenswrapper[4881]: I1211 08:23:12.028997 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/node-exporter-l9wps" event={"ID":"d8e9ea88-1dc9-4254-870d-c590763cc538","Type":"ContainerStarted","Data":"a6523bd92579b5f0359f3a6fe2e597a71bdb3aab91bb3e77afd787bc058263b3"} Dec 11 08:23:12 crc kubenswrapper[4881]: I1211 08:23:12.041911 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-monitoring/openshift-state-metrics-566fddb674-6lbqz" podStartSLOduration=2.745978735 podStartE2EDuration="5.041888685s" podCreationTimestamp="2025-12-11 08:23:07 +0000 UTC" firstStartedPulling="2025-12-11 08:23:08.650695772 +0000 UTC m=+437.028064459" lastFinishedPulling="2025-12-11 08:23:10.946605702 +0000 UTC m=+439.323974409" observedRunningTime="2025-12-11 08:23:12.039596139 +0000 UTC m=+440.416964846" watchObservedRunningTime="2025-12-11 08:23:12.041888685 +0000 UTC m=+440.419257382" Dec 11 08:23:12 crc kubenswrapper[4881]: I1211 08:23:12.058665 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-monitoring/node-exporter-l9wps" podStartSLOduration=3.741029591 podStartE2EDuration="5.058646223s" podCreationTimestamp="2025-12-11 08:23:07 +0000 UTC" firstStartedPulling="2025-12-11 08:23:07.980382046 +0000 UTC m=+436.357750733" lastFinishedPulling="2025-12-11 08:23:09.297998668 +0000 UTC m=+437.675367365" observedRunningTime="2025-12-11 08:23:12.057492495 +0000 UTC m=+440.434861192" watchObservedRunningTime="2025-12-11 08:23:12.058646223 +0000 UTC m=+440.436014920" Dec 11 08:23:12 crc kubenswrapper[4881]: I1211 08:23:12.083026 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-598lm" podStartSLOduration=3.163514162 podStartE2EDuration="5.083005805s" podCreationTimestamp="2025-12-11 08:23:07 +0000 UTC" firstStartedPulling="2025-12-11 08:23:09.030560312 +0000 UTC m=+437.407928999" lastFinishedPulling="2025-12-11 08:23:10.950051935 +0000 UTC m=+439.327420642" observedRunningTime="2025-12-11 08:23:12.077410499 +0000 UTC m=+440.454779216" watchObservedRunningTime="2025-12-11 08:23:12.083005805 +0000 UTC m=+440.460374502" Dec 11 08:23:12 crc kubenswrapper[4881]: I1211 08:23:12.435436 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-7dc5585756-89sps"] Dec 11 08:23:12 crc kubenswrapper[4881]: I1211 08:23:12.436606 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-7dc5585756-89sps" Dec 11 08:23:12 crc kubenswrapper[4881]: I1211 08:23:12.459349 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-7dc5585756-89sps"] Dec 11 08:23:12 crc kubenswrapper[4881]: I1211 08:23:12.621759 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/d3604568-9c08-4efc-a18d-72bdfb6f4763-trusted-ca-bundle\") pod \"console-7dc5585756-89sps\" (UID: \"d3604568-9c08-4efc-a18d-72bdfb6f4763\") " pod="openshift-console/console-7dc5585756-89sps" Dec 11 08:23:12 crc kubenswrapper[4881]: I1211 08:23:12.621819 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/d3604568-9c08-4efc-a18d-72bdfb6f4763-console-serving-cert\") pod \"console-7dc5585756-89sps\" (UID: \"d3604568-9c08-4efc-a18d-72bdfb6f4763\") " pod="openshift-console/console-7dc5585756-89sps" Dec 11 08:23:12 crc kubenswrapper[4881]: I1211 08:23:12.621839 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/d3604568-9c08-4efc-a18d-72bdfb6f4763-oauth-serving-cert\") pod \"console-7dc5585756-89sps\" (UID: \"d3604568-9c08-4efc-a18d-72bdfb6f4763\") " pod="openshift-console/console-7dc5585756-89sps" Dec 11 08:23:12 crc kubenswrapper[4881]: I1211 08:23:12.621956 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/d3604568-9c08-4efc-a18d-72bdfb6f4763-service-ca\") pod \"console-7dc5585756-89sps\" (UID: \"d3604568-9c08-4efc-a18d-72bdfb6f4763\") " pod="openshift-console/console-7dc5585756-89sps" Dec 11 08:23:12 crc kubenswrapper[4881]: I1211 08:23:12.622004 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q5gpq\" (UniqueName: \"kubernetes.io/projected/d3604568-9c08-4efc-a18d-72bdfb6f4763-kube-api-access-q5gpq\") pod \"console-7dc5585756-89sps\" (UID: \"d3604568-9c08-4efc-a18d-72bdfb6f4763\") " pod="openshift-console/console-7dc5585756-89sps" Dec 11 08:23:12 crc kubenswrapper[4881]: I1211 08:23:12.622033 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/d3604568-9c08-4efc-a18d-72bdfb6f4763-console-oauth-config\") pod \"console-7dc5585756-89sps\" (UID: \"d3604568-9c08-4efc-a18d-72bdfb6f4763\") " pod="openshift-console/console-7dc5585756-89sps" Dec 11 08:23:12 crc kubenswrapper[4881]: I1211 08:23:12.622085 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/d3604568-9c08-4efc-a18d-72bdfb6f4763-console-config\") pod \"console-7dc5585756-89sps\" (UID: \"d3604568-9c08-4efc-a18d-72bdfb6f4763\") " pod="openshift-console/console-7dc5585756-89sps" Dec 11 08:23:12 crc kubenswrapper[4881]: I1211 08:23:12.723209 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/d3604568-9c08-4efc-a18d-72bdfb6f4763-service-ca\") pod \"console-7dc5585756-89sps\" (UID: \"d3604568-9c08-4efc-a18d-72bdfb6f4763\") " pod="openshift-console/console-7dc5585756-89sps" Dec 11 08:23:12 crc kubenswrapper[4881]: I1211 08:23:12.723508 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q5gpq\" (UniqueName: \"kubernetes.io/projected/d3604568-9c08-4efc-a18d-72bdfb6f4763-kube-api-access-q5gpq\") pod \"console-7dc5585756-89sps\" (UID: \"d3604568-9c08-4efc-a18d-72bdfb6f4763\") " pod="openshift-console/console-7dc5585756-89sps" Dec 11 08:23:12 crc kubenswrapper[4881]: I1211 08:23:12.723562 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/d3604568-9c08-4efc-a18d-72bdfb6f4763-console-oauth-config\") pod \"console-7dc5585756-89sps\" (UID: \"d3604568-9c08-4efc-a18d-72bdfb6f4763\") " pod="openshift-console/console-7dc5585756-89sps" Dec 11 08:23:12 crc kubenswrapper[4881]: I1211 08:23:12.723603 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/d3604568-9c08-4efc-a18d-72bdfb6f4763-console-config\") pod \"console-7dc5585756-89sps\" (UID: \"d3604568-9c08-4efc-a18d-72bdfb6f4763\") " pod="openshift-console/console-7dc5585756-89sps" Dec 11 08:23:12 crc kubenswrapper[4881]: I1211 08:23:12.723625 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/d3604568-9c08-4efc-a18d-72bdfb6f4763-trusted-ca-bundle\") pod \"console-7dc5585756-89sps\" (UID: \"d3604568-9c08-4efc-a18d-72bdfb6f4763\") " pod="openshift-console/console-7dc5585756-89sps" Dec 11 08:23:12 crc kubenswrapper[4881]: I1211 08:23:12.723649 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/d3604568-9c08-4efc-a18d-72bdfb6f4763-console-serving-cert\") pod \"console-7dc5585756-89sps\" (UID: \"d3604568-9c08-4efc-a18d-72bdfb6f4763\") " pod="openshift-console/console-7dc5585756-89sps" Dec 11 08:23:12 crc kubenswrapper[4881]: I1211 08:23:12.723662 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/d3604568-9c08-4efc-a18d-72bdfb6f4763-oauth-serving-cert\") pod \"console-7dc5585756-89sps\" (UID: \"d3604568-9c08-4efc-a18d-72bdfb6f4763\") " pod="openshift-console/console-7dc5585756-89sps" Dec 11 08:23:12 crc kubenswrapper[4881]: I1211 08:23:12.725559 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/d3604568-9c08-4efc-a18d-72bdfb6f4763-oauth-serving-cert\") pod \"console-7dc5585756-89sps\" (UID: \"d3604568-9c08-4efc-a18d-72bdfb6f4763\") " pod="openshift-console/console-7dc5585756-89sps" Dec 11 08:23:12 crc kubenswrapper[4881]: I1211 08:23:12.725744 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/d3604568-9c08-4efc-a18d-72bdfb6f4763-console-config\") pod \"console-7dc5585756-89sps\" (UID: \"d3604568-9c08-4efc-a18d-72bdfb6f4763\") " pod="openshift-console/console-7dc5585756-89sps" Dec 11 08:23:12 crc kubenswrapper[4881]: I1211 08:23:12.725945 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/d3604568-9c08-4efc-a18d-72bdfb6f4763-trusted-ca-bundle\") pod \"console-7dc5585756-89sps\" (UID: \"d3604568-9c08-4efc-a18d-72bdfb6f4763\") " pod="openshift-console/console-7dc5585756-89sps" Dec 11 08:23:12 crc kubenswrapper[4881]: I1211 08:23:12.726427 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/d3604568-9c08-4efc-a18d-72bdfb6f4763-service-ca\") pod \"console-7dc5585756-89sps\" (UID: \"d3604568-9c08-4efc-a18d-72bdfb6f4763\") " pod="openshift-console/console-7dc5585756-89sps" Dec 11 08:23:12 crc kubenswrapper[4881]: I1211 08:23:12.728787 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/d3604568-9c08-4efc-a18d-72bdfb6f4763-console-oauth-config\") pod \"console-7dc5585756-89sps\" (UID: \"d3604568-9c08-4efc-a18d-72bdfb6f4763\") " pod="openshift-console/console-7dc5585756-89sps" Dec 11 08:23:12 crc kubenswrapper[4881]: I1211 08:23:12.731955 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/d3604568-9c08-4efc-a18d-72bdfb6f4763-console-serving-cert\") pod \"console-7dc5585756-89sps\" (UID: \"d3604568-9c08-4efc-a18d-72bdfb6f4763\") " pod="openshift-console/console-7dc5585756-89sps" Dec 11 08:23:12 crc kubenswrapper[4881]: I1211 08:23:12.739848 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q5gpq\" (UniqueName: \"kubernetes.io/projected/d3604568-9c08-4efc-a18d-72bdfb6f4763-kube-api-access-q5gpq\") pod \"console-7dc5585756-89sps\" (UID: \"d3604568-9c08-4efc-a18d-72bdfb6f4763\") " pod="openshift-console/console-7dc5585756-89sps" Dec 11 08:23:12 crc kubenswrapper[4881]: I1211 08:23:12.766264 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-7dc5585756-89sps" Dec 11 08:23:12 crc kubenswrapper[4881]: I1211 08:23:12.913421 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-monitoring/metrics-server-567ff9c44d-zvlcx"] Dec 11 08:23:12 crc kubenswrapper[4881]: I1211 08:23:12.914284 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/metrics-server-567ff9c44d-zvlcx" Dec 11 08:23:12 crc kubenswrapper[4881]: I1211 08:23:12.917589 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"metrics-server-dockercfg-xj6dm" Dec 11 08:23:12 crc kubenswrapper[4881]: I1211 08:23:12.918384 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"metrics-client-certs" Dec 11 08:23:12 crc kubenswrapper[4881]: I1211 08:23:12.919660 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"metrics-server-tls" Dec 11 08:23:12 crc kubenswrapper[4881]: I1211 08:23:12.927143 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"metrics-server-2sqavu8d38aea" Dec 11 08:23:12 crc kubenswrapper[4881]: I1211 08:23:12.927988 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-monitoring"/"kubelet-serving-ca-bundle" Dec 11 08:23:12 crc kubenswrapper[4881]: I1211 08:23:12.930315 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-monitoring"/"metrics-server-audit-profiles" Dec 11 08:23:12 crc kubenswrapper[4881]: I1211 08:23:12.933025 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/metrics-server-567ff9c44d-zvlcx"] Dec 11 08:23:13 crc kubenswrapper[4881]: I1211 08:23:13.028938 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-metrics-server-tls\" (UniqueName: \"kubernetes.io/secret/e33cf336-f1a0-4217-a606-c12ebf877533-secret-metrics-server-tls\") pod \"metrics-server-567ff9c44d-zvlcx\" (UID: \"e33cf336-f1a0-4217-a606-c12ebf877533\") " pod="openshift-monitoring/metrics-server-567ff9c44d-zvlcx" Dec 11 08:23:13 crc kubenswrapper[4881]: I1211 08:23:13.028980 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-metrics-client-certs\" (UniqueName: \"kubernetes.io/secret/e33cf336-f1a0-4217-a606-c12ebf877533-secret-metrics-client-certs\") pod \"metrics-server-567ff9c44d-zvlcx\" (UID: \"e33cf336-f1a0-4217-a606-c12ebf877533\") " pod="openshift-monitoring/metrics-server-567ff9c44d-zvlcx" Dec 11 08:23:13 crc kubenswrapper[4881]: I1211 08:23:13.029017 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cg6pw\" (UniqueName: \"kubernetes.io/projected/e33cf336-f1a0-4217-a606-c12ebf877533-kube-api-access-cg6pw\") pod \"metrics-server-567ff9c44d-zvlcx\" (UID: \"e33cf336-f1a0-4217-a606-c12ebf877533\") " pod="openshift-monitoring/metrics-server-567ff9c44d-zvlcx" Dec 11 08:23:13 crc kubenswrapper[4881]: I1211 08:23:13.029040 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"configmap-kubelet-serving-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/e33cf336-f1a0-4217-a606-c12ebf877533-configmap-kubelet-serving-ca-bundle\") pod \"metrics-server-567ff9c44d-zvlcx\" (UID: \"e33cf336-f1a0-4217-a606-c12ebf877533\") " pod="openshift-monitoring/metrics-server-567ff9c44d-zvlcx" Dec 11 08:23:13 crc kubenswrapper[4881]: I1211 08:23:13.029067 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-log\" (UniqueName: \"kubernetes.io/empty-dir/e33cf336-f1a0-4217-a606-c12ebf877533-audit-log\") pod \"metrics-server-567ff9c44d-zvlcx\" (UID: \"e33cf336-f1a0-4217-a606-c12ebf877533\") " pod="openshift-monitoring/metrics-server-567ff9c44d-zvlcx" Dec 11 08:23:13 crc kubenswrapper[4881]: I1211 08:23:13.029084 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-server-audit-profiles\" (UniqueName: \"kubernetes.io/configmap/e33cf336-f1a0-4217-a606-c12ebf877533-metrics-server-audit-profiles\") pod \"metrics-server-567ff9c44d-zvlcx\" (UID: \"e33cf336-f1a0-4217-a606-c12ebf877533\") " pod="openshift-monitoring/metrics-server-567ff9c44d-zvlcx" Dec 11 08:23:13 crc kubenswrapper[4881]: I1211 08:23:13.029125 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e33cf336-f1a0-4217-a606-c12ebf877533-client-ca-bundle\") pod \"metrics-server-567ff9c44d-zvlcx\" (UID: \"e33cf336-f1a0-4217-a606-c12ebf877533\") " pod="openshift-monitoring/metrics-server-567ff9c44d-zvlcx" Dec 11 08:23:13 crc kubenswrapper[4881]: I1211 08:23:13.130025 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e33cf336-f1a0-4217-a606-c12ebf877533-client-ca-bundle\") pod \"metrics-server-567ff9c44d-zvlcx\" (UID: \"e33cf336-f1a0-4217-a606-c12ebf877533\") " pod="openshift-monitoring/metrics-server-567ff9c44d-zvlcx" Dec 11 08:23:13 crc kubenswrapper[4881]: I1211 08:23:13.131129 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-metrics-server-tls\" (UniqueName: \"kubernetes.io/secret/e33cf336-f1a0-4217-a606-c12ebf877533-secret-metrics-server-tls\") pod \"metrics-server-567ff9c44d-zvlcx\" (UID: \"e33cf336-f1a0-4217-a606-c12ebf877533\") " pod="openshift-monitoring/metrics-server-567ff9c44d-zvlcx" Dec 11 08:23:13 crc kubenswrapper[4881]: I1211 08:23:13.131534 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-metrics-client-certs\" (UniqueName: \"kubernetes.io/secret/e33cf336-f1a0-4217-a606-c12ebf877533-secret-metrics-client-certs\") pod \"metrics-server-567ff9c44d-zvlcx\" (UID: \"e33cf336-f1a0-4217-a606-c12ebf877533\") " pod="openshift-monitoring/metrics-server-567ff9c44d-zvlcx" Dec 11 08:23:13 crc kubenswrapper[4881]: I1211 08:23:13.131628 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cg6pw\" (UniqueName: \"kubernetes.io/projected/e33cf336-f1a0-4217-a606-c12ebf877533-kube-api-access-cg6pw\") pod \"metrics-server-567ff9c44d-zvlcx\" (UID: \"e33cf336-f1a0-4217-a606-c12ebf877533\") " pod="openshift-monitoring/metrics-server-567ff9c44d-zvlcx" Dec 11 08:23:13 crc kubenswrapper[4881]: I1211 08:23:13.131681 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"configmap-kubelet-serving-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/e33cf336-f1a0-4217-a606-c12ebf877533-configmap-kubelet-serving-ca-bundle\") pod \"metrics-server-567ff9c44d-zvlcx\" (UID: \"e33cf336-f1a0-4217-a606-c12ebf877533\") " pod="openshift-monitoring/metrics-server-567ff9c44d-zvlcx" Dec 11 08:23:13 crc kubenswrapper[4881]: I1211 08:23:13.131739 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-log\" (UniqueName: \"kubernetes.io/empty-dir/e33cf336-f1a0-4217-a606-c12ebf877533-audit-log\") pod \"metrics-server-567ff9c44d-zvlcx\" (UID: \"e33cf336-f1a0-4217-a606-c12ebf877533\") " pod="openshift-monitoring/metrics-server-567ff9c44d-zvlcx" Dec 11 08:23:13 crc kubenswrapper[4881]: I1211 08:23:13.131800 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-server-audit-profiles\" (UniqueName: \"kubernetes.io/configmap/e33cf336-f1a0-4217-a606-c12ebf877533-metrics-server-audit-profiles\") pod \"metrics-server-567ff9c44d-zvlcx\" (UID: \"e33cf336-f1a0-4217-a606-c12ebf877533\") " pod="openshift-monitoring/metrics-server-567ff9c44d-zvlcx" Dec 11 08:23:13 crc kubenswrapper[4881]: I1211 08:23:13.132284 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-log\" (UniqueName: \"kubernetes.io/empty-dir/e33cf336-f1a0-4217-a606-c12ebf877533-audit-log\") pod \"metrics-server-567ff9c44d-zvlcx\" (UID: \"e33cf336-f1a0-4217-a606-c12ebf877533\") " pod="openshift-monitoring/metrics-server-567ff9c44d-zvlcx" Dec 11 08:23:13 crc kubenswrapper[4881]: I1211 08:23:13.132992 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"configmap-kubelet-serving-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/e33cf336-f1a0-4217-a606-c12ebf877533-configmap-kubelet-serving-ca-bundle\") pod \"metrics-server-567ff9c44d-zvlcx\" (UID: \"e33cf336-f1a0-4217-a606-c12ebf877533\") " pod="openshift-monitoring/metrics-server-567ff9c44d-zvlcx" Dec 11 08:23:13 crc kubenswrapper[4881]: I1211 08:23:13.133405 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-server-audit-profiles\" (UniqueName: \"kubernetes.io/configmap/e33cf336-f1a0-4217-a606-c12ebf877533-metrics-server-audit-profiles\") pod \"metrics-server-567ff9c44d-zvlcx\" (UID: \"e33cf336-f1a0-4217-a606-c12ebf877533\") " pod="openshift-monitoring/metrics-server-567ff9c44d-zvlcx" Dec 11 08:23:13 crc kubenswrapper[4881]: I1211 08:23:13.135741 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-metrics-server-tls\" (UniqueName: \"kubernetes.io/secret/e33cf336-f1a0-4217-a606-c12ebf877533-secret-metrics-server-tls\") pod \"metrics-server-567ff9c44d-zvlcx\" (UID: \"e33cf336-f1a0-4217-a606-c12ebf877533\") " pod="openshift-monitoring/metrics-server-567ff9c44d-zvlcx" Dec 11 08:23:13 crc kubenswrapper[4881]: I1211 08:23:13.135886 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e33cf336-f1a0-4217-a606-c12ebf877533-client-ca-bundle\") pod \"metrics-server-567ff9c44d-zvlcx\" (UID: \"e33cf336-f1a0-4217-a606-c12ebf877533\") " pod="openshift-monitoring/metrics-server-567ff9c44d-zvlcx" Dec 11 08:23:13 crc kubenswrapper[4881]: I1211 08:23:13.138034 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-metrics-client-certs\" (UniqueName: \"kubernetes.io/secret/e33cf336-f1a0-4217-a606-c12ebf877533-secret-metrics-client-certs\") pod \"metrics-server-567ff9c44d-zvlcx\" (UID: \"e33cf336-f1a0-4217-a606-c12ebf877533\") " pod="openshift-monitoring/metrics-server-567ff9c44d-zvlcx" Dec 11 08:23:13 crc kubenswrapper[4881]: I1211 08:23:13.147114 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cg6pw\" (UniqueName: \"kubernetes.io/projected/e33cf336-f1a0-4217-a606-c12ebf877533-kube-api-access-cg6pw\") pod \"metrics-server-567ff9c44d-zvlcx\" (UID: \"e33cf336-f1a0-4217-a606-c12ebf877533\") " pod="openshift-monitoring/metrics-server-567ff9c44d-zvlcx" Dec 11 08:23:13 crc kubenswrapper[4881]: I1211 08:23:13.188467 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-7dc5585756-89sps"] Dec 11 08:23:13 crc kubenswrapper[4881]: I1211 08:23:13.253421 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/metrics-server-567ff9c44d-zvlcx" Dec 11 08:23:13 crc kubenswrapper[4881]: I1211 08:23:13.420502 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-monitoring/monitoring-plugin-5cb9d5856f-c4cvw"] Dec 11 08:23:13 crc kubenswrapper[4881]: I1211 08:23:13.421289 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/monitoring-plugin-5cb9d5856f-c4cvw" Dec 11 08:23:13 crc kubenswrapper[4881]: I1211 08:23:13.423773 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"monitoring-plugin-cert" Dec 11 08:23:13 crc kubenswrapper[4881]: I1211 08:23:13.423891 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"default-dockercfg-6tstp" Dec 11 08:23:13 crc kubenswrapper[4881]: I1211 08:23:13.435568 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/monitoring-plugin-5cb9d5856f-c4cvw"] Dec 11 08:23:13 crc kubenswrapper[4881]: I1211 08:23:13.536848 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"monitoring-plugin-cert\" (UniqueName: \"kubernetes.io/secret/1904b37a-2b5b-425b-b3cb-cad8c86efcff-monitoring-plugin-cert\") pod \"monitoring-plugin-5cb9d5856f-c4cvw\" (UID: \"1904b37a-2b5b-425b-b3cb-cad8c86efcff\") " pod="openshift-monitoring/monitoring-plugin-5cb9d5856f-c4cvw" Dec 11 08:23:13 crc kubenswrapper[4881]: W1211 08:23:13.549906 4881 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd3604568_9c08_4efc_a18d_72bdfb6f4763.slice/crio-05e0c7c884f85ac002f8554bba843b483e886604183ef6e9659d31703ee275ee WatchSource:0}: Error finding container 05e0c7c884f85ac002f8554bba843b483e886604183ef6e9659d31703ee275ee: Status 404 returned error can't find the container with id 05e0c7c884f85ac002f8554bba843b483e886604183ef6e9659d31703ee275ee Dec 11 08:23:13 crc kubenswrapper[4881]: I1211 08:23:13.637887 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"monitoring-plugin-cert\" (UniqueName: \"kubernetes.io/secret/1904b37a-2b5b-425b-b3cb-cad8c86efcff-monitoring-plugin-cert\") pod \"monitoring-plugin-5cb9d5856f-c4cvw\" (UID: \"1904b37a-2b5b-425b-b3cb-cad8c86efcff\") " pod="openshift-monitoring/monitoring-plugin-5cb9d5856f-c4cvw" Dec 11 08:23:13 crc kubenswrapper[4881]: I1211 08:23:13.640945 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"monitoring-plugin-cert\" (UniqueName: \"kubernetes.io/secret/1904b37a-2b5b-425b-b3cb-cad8c86efcff-monitoring-plugin-cert\") pod \"monitoring-plugin-5cb9d5856f-c4cvw\" (UID: \"1904b37a-2b5b-425b-b3cb-cad8c86efcff\") " pod="openshift-monitoring/monitoring-plugin-5cb9d5856f-c4cvw" Dec 11 08:23:13 crc kubenswrapper[4881]: I1211 08:23:13.740317 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/monitoring-plugin-5cb9d5856f-c4cvw" Dec 11 08:23:14 crc kubenswrapper[4881]: I1211 08:23:14.022432 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-monitoring/prometheus-k8s-0"] Dec 11 08:23:14 crc kubenswrapper[4881]: I1211 08:23:14.024660 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/prometheus-k8s-0" Dec 11 08:23:14 crc kubenswrapper[4881]: I1211 08:23:14.027978 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-k8s-thanos-prometheus-http-client-file" Dec 11 08:23:14 crc kubenswrapper[4881]: I1211 08:23:14.029303 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"kube-rbac-proxy" Dec 11 08:23:14 crc kubenswrapper[4881]: I1211 08:23:14.030437 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-k8s-kube-rbac-proxy-web" Dec 11 08:23:14 crc kubenswrapper[4881]: I1211 08:23:14.030597 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-k8s-dockercfg-spns4" Dec 11 08:23:14 crc kubenswrapper[4881]: I1211 08:23:14.030748 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-k8s" Dec 11 08:23:14 crc kubenswrapper[4881]: I1211 08:23:14.031012 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-k8s-web-config" Dec 11 08:23:14 crc kubenswrapper[4881]: I1211 08:23:14.031151 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-k8s-tls-assets-0" Dec 11 08:23:14 crc kubenswrapper[4881]: I1211 08:23:14.031279 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-k8s-grpc-tls-6b7giufgt7g37" Dec 11 08:23:14 crc kubenswrapper[4881]: I1211 08:23:14.031422 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-monitoring"/"serving-certs-ca-bundle" Dec 11 08:23:14 crc kubenswrapper[4881]: I1211 08:23:14.031552 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-k8s-tls" Dec 11 08:23:14 crc kubenswrapper[4881]: I1211 08:23:14.032947 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-k8s-thanos-sidecar-tls" Dec 11 08:23:14 crc kubenswrapper[4881]: I1211 08:23:14.036806 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-monitoring"/"prometheus-k8s-rulefiles-0" Dec 11 08:23:14 crc kubenswrapper[4881]: I1211 08:23:14.038200 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-monitoring"/"prometheus-trusted-ca-bundle" Dec 11 08:23:14 crc kubenswrapper[4881]: I1211 08:23:14.043583 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/prometheus-k8s-0"] Dec 11 08:23:14 crc kubenswrapper[4881]: I1211 08:23:14.055891 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-7dc5585756-89sps" event={"ID":"d3604568-9c08-4efc-a18d-72bdfb6f4763","Type":"ContainerStarted","Data":"a3871c8a18213877103992909b597d13c6dc2ecc065b02d6004eb73db86e94ba"} Dec 11 08:23:14 crc kubenswrapper[4881]: I1211 08:23:14.056068 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-7dc5585756-89sps" event={"ID":"d3604568-9c08-4efc-a18d-72bdfb6f4763","Type":"ContainerStarted","Data":"05e0c7c884f85ac002f8554bba843b483e886604183ef6e9659d31703ee275ee"} Dec 11 08:23:14 crc kubenswrapper[4881]: I1211 08:23:14.058096 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/thanos-querier-778bc6ff98-47qfw" event={"ID":"0dbf04dc-19a0-4694-bdc1-c5d81052efd3","Type":"ContainerStarted","Data":"ec6e8665bb06a2cb75b1e9b2e6c73f0e05b2c1c4072a91db1f59de0a5f279f19"} Dec 11 08:23:14 crc kubenswrapper[4881]: I1211 08:23:14.058178 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/thanos-querier-778bc6ff98-47qfw" event={"ID":"0dbf04dc-19a0-4694-bdc1-c5d81052efd3","Type":"ContainerStarted","Data":"73fb6d84435d7d7e0ca6133960734654beba2809abe536b85e16d90ff159616b"} Dec 11 08:23:14 crc kubenswrapper[4881]: I1211 08:23:14.058231 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/thanos-querier-778bc6ff98-47qfw" event={"ID":"0dbf04dc-19a0-4694-bdc1-c5d81052efd3","Type":"ContainerStarted","Data":"f97eeeb27f01d17d437e84b9ee04212efcc26f0d2be12d1e28fc99a9a2401e8d"} Dec 11 08:23:14 crc kubenswrapper[4881]: I1211 08:23:14.109991 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/metrics-server-567ff9c44d-zvlcx"] Dec 11 08:23:14 crc kubenswrapper[4881]: I1211 08:23:14.110659 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-7dc5585756-89sps" podStartSLOduration=2.11064206 podStartE2EDuration="2.11064206s" podCreationTimestamp="2025-12-11 08:23:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 08:23:14.108648351 +0000 UTC m=+442.486017068" watchObservedRunningTime="2025-12-11 08:23:14.11064206 +0000 UTC m=+442.488010757" Dec 11 08:23:14 crc kubenswrapper[4881]: I1211 08:23:14.145156 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-metrics-client-certs\" (UniqueName: \"kubernetes.io/secret/5c778a06-3dc5-439c-858c-69ec714eabb8-secret-metrics-client-certs\") pod \"prometheus-k8s-0\" (UID: \"5c778a06-3dc5-439c-858c-69ec714eabb8\") " pod="openshift-monitoring/prometheus-k8s-0" Dec 11 08:23:14 crc kubenswrapper[4881]: I1211 08:23:14.145367 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"configmap-metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/5c778a06-3dc5-439c-858c-69ec714eabb8-configmap-metrics-client-ca\") pod \"prometheus-k8s-0\" (UID: \"5c778a06-3dc5-439c-858c-69ec714eabb8\") " pod="openshift-monitoring/prometheus-k8s-0" Dec 11 08:23:14 crc kubenswrapper[4881]: I1211 08:23:14.145474 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"configmap-kubelet-serving-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/5c778a06-3dc5-439c-858c-69ec714eabb8-configmap-kubelet-serving-ca-bundle\") pod \"prometheus-k8s-0\" (UID: \"5c778a06-3dc5-439c-858c-69ec714eabb8\") " pod="openshift-monitoring/prometheus-k8s-0" Dec 11 08:23:14 crc kubenswrapper[4881]: I1211 08:23:14.145978 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-prometheus-k8s-thanos-sidecar-tls\" (UniqueName: \"kubernetes.io/secret/5c778a06-3dc5-439c-858c-69ec714eabb8-secret-prometheus-k8s-thanos-sidecar-tls\") pod \"prometheus-k8s-0\" (UID: \"5c778a06-3dc5-439c-858c-69ec714eabb8\") " pod="openshift-monitoring/prometheus-k8s-0" Dec 11 08:23:14 crc kubenswrapper[4881]: I1211 08:23:14.146025 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/5c778a06-3dc5-439c-858c-69ec714eabb8-web-config\") pod \"prometheus-k8s-0\" (UID: \"5c778a06-3dc5-439c-858c-69ec714eabb8\") " pod="openshift-monitoring/prometheus-k8s-0" Dec 11 08:23:14 crc kubenswrapper[4881]: I1211 08:23:14.146071 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-kube-rbac-proxy\" (UniqueName: \"kubernetes.io/secret/5c778a06-3dc5-439c-858c-69ec714eabb8-secret-kube-rbac-proxy\") pod \"prometheus-k8s-0\" (UID: \"5c778a06-3dc5-439c-858c-69ec714eabb8\") " pod="openshift-monitoring/prometheus-k8s-0" Dec 11 08:23:14 crc kubenswrapper[4881]: I1211 08:23:14.146088 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-grpc-tls\" (UniqueName: \"kubernetes.io/secret/5c778a06-3dc5-439c-858c-69ec714eabb8-secret-grpc-tls\") pod \"prometheus-k8s-0\" (UID: \"5c778a06-3dc5-439c-858c-69ec714eabb8\") " pod="openshift-monitoring/prometheus-k8s-0" Dec 11 08:23:14 crc kubenswrapper[4881]: I1211 08:23:14.146108 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-prometheus-k8s-tls\" (UniqueName: \"kubernetes.io/secret/5c778a06-3dc5-439c-858c-69ec714eabb8-secret-prometheus-k8s-tls\") pod \"prometheus-k8s-0\" (UID: \"5c778a06-3dc5-439c-858c-69ec714eabb8\") " pod="openshift-monitoring/prometheus-k8s-0" Dec 11 08:23:14 crc kubenswrapper[4881]: I1211 08:23:14.146156 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-k8s-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/5c778a06-3dc5-439c-858c-69ec714eabb8-prometheus-k8s-rulefiles-0\") pod \"prometheus-k8s-0\" (UID: \"5c778a06-3dc5-439c-858c-69ec714eabb8\") " pod="openshift-monitoring/prometheus-k8s-0" Dec 11 08:23:14 crc kubenswrapper[4881]: I1211 08:23:14.146174 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/5c778a06-3dc5-439c-858c-69ec714eabb8-tls-assets\") pod \"prometheus-k8s-0\" (UID: \"5c778a06-3dc5-439c-858c-69ec714eabb8\") " pod="openshift-monitoring/prometheus-k8s-0" Dec 11 08:23:14 crc kubenswrapper[4881]: I1211 08:23:14.146199 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/5c778a06-3dc5-439c-858c-69ec714eabb8-config-out\") pod \"prometheus-k8s-0\" (UID: \"5c778a06-3dc5-439c-858c-69ec714eabb8\") " pod="openshift-monitoring/prometheus-k8s-0" Dec 11 08:23:14 crc kubenswrapper[4881]: I1211 08:23:14.146215 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/5c778a06-3dc5-439c-858c-69ec714eabb8-prometheus-trusted-ca-bundle\") pod \"prometheus-k8s-0\" (UID: \"5c778a06-3dc5-439c-858c-69ec714eabb8\") " pod="openshift-monitoring/prometheus-k8s-0" Dec 11 08:23:14 crc kubenswrapper[4881]: I1211 08:23:14.146241 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"configmap-serving-certs-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/5c778a06-3dc5-439c-858c-69ec714eabb8-configmap-serving-certs-ca-bundle\") pod \"prometheus-k8s-0\" (UID: \"5c778a06-3dc5-439c-858c-69ec714eabb8\") " pod="openshift-monitoring/prometheus-k8s-0" Dec 11 08:23:14 crc kubenswrapper[4881]: I1211 08:23:14.146259 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-prometheus-k8s-kube-rbac-proxy-web\" (UniqueName: \"kubernetes.io/secret/5c778a06-3dc5-439c-858c-69ec714eabb8-secret-prometheus-k8s-kube-rbac-proxy-web\") pod \"prometheus-k8s-0\" (UID: \"5c778a06-3dc5-439c-858c-69ec714eabb8\") " pod="openshift-monitoring/prometheus-k8s-0" Dec 11 08:23:14 crc kubenswrapper[4881]: I1211 08:23:14.146281 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/5c778a06-3dc5-439c-858c-69ec714eabb8-thanos-prometheus-http-client-file\") pod \"prometheus-k8s-0\" (UID: \"5c778a06-3dc5-439c-858c-69ec714eabb8\") " pod="openshift-monitoring/prometheus-k8s-0" Dec 11 08:23:14 crc kubenswrapper[4881]: I1211 08:23:14.146402 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/5c778a06-3dc5-439c-858c-69ec714eabb8-config\") pod \"prometheus-k8s-0\" (UID: \"5c778a06-3dc5-439c-858c-69ec714eabb8\") " pod="openshift-monitoring/prometheus-k8s-0" Dec 11 08:23:14 crc kubenswrapper[4881]: I1211 08:23:14.146434 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mhhbk\" (UniqueName: \"kubernetes.io/projected/5c778a06-3dc5-439c-858c-69ec714eabb8-kube-api-access-mhhbk\") pod \"prometheus-k8s-0\" (UID: \"5c778a06-3dc5-439c-858c-69ec714eabb8\") " pod="openshift-monitoring/prometheus-k8s-0" Dec 11 08:23:14 crc kubenswrapper[4881]: I1211 08:23:14.146520 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-k8s-db\" (UniqueName: \"kubernetes.io/empty-dir/5c778a06-3dc5-439c-858c-69ec714eabb8-prometheus-k8s-db\") pod \"prometheus-k8s-0\" (UID: \"5c778a06-3dc5-439c-858c-69ec714eabb8\") " pod="openshift-monitoring/prometheus-k8s-0" Dec 11 08:23:14 crc kubenswrapper[4881]: I1211 08:23:14.179185 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/monitoring-plugin-5cb9d5856f-c4cvw"] Dec 11 08:23:14 crc kubenswrapper[4881]: W1211 08:23:14.182853 4881 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod1904b37a_2b5b_425b_b3cb_cad8c86efcff.slice/crio-fee8dc1955033dbbe7c170e0ec8907851a7ddf840fb6f81df72d26ba36b63637 WatchSource:0}: Error finding container fee8dc1955033dbbe7c170e0ec8907851a7ddf840fb6f81df72d26ba36b63637: Status 404 returned error can't find the container with id fee8dc1955033dbbe7c170e0ec8907851a7ddf840fb6f81df72d26ba36b63637 Dec 11 08:23:14 crc kubenswrapper[4881]: I1211 08:23:14.248091 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"configmap-metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/5c778a06-3dc5-439c-858c-69ec714eabb8-configmap-metrics-client-ca\") pod \"prometheus-k8s-0\" (UID: \"5c778a06-3dc5-439c-858c-69ec714eabb8\") " pod="openshift-monitoring/prometheus-k8s-0" Dec 11 08:23:14 crc kubenswrapper[4881]: I1211 08:23:14.248142 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"configmap-kubelet-serving-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/5c778a06-3dc5-439c-858c-69ec714eabb8-configmap-kubelet-serving-ca-bundle\") pod \"prometheus-k8s-0\" (UID: \"5c778a06-3dc5-439c-858c-69ec714eabb8\") " pod="openshift-monitoring/prometheus-k8s-0" Dec 11 08:23:14 crc kubenswrapper[4881]: I1211 08:23:14.248161 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-prometheus-k8s-thanos-sidecar-tls\" (UniqueName: \"kubernetes.io/secret/5c778a06-3dc5-439c-858c-69ec714eabb8-secret-prometheus-k8s-thanos-sidecar-tls\") pod \"prometheus-k8s-0\" (UID: \"5c778a06-3dc5-439c-858c-69ec714eabb8\") " pod="openshift-monitoring/prometheus-k8s-0" Dec 11 08:23:14 crc kubenswrapper[4881]: I1211 08:23:14.248179 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/5c778a06-3dc5-439c-858c-69ec714eabb8-web-config\") pod \"prometheus-k8s-0\" (UID: \"5c778a06-3dc5-439c-858c-69ec714eabb8\") " pod="openshift-monitoring/prometheus-k8s-0" Dec 11 08:23:14 crc kubenswrapper[4881]: I1211 08:23:14.248206 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-kube-rbac-proxy\" (UniqueName: \"kubernetes.io/secret/5c778a06-3dc5-439c-858c-69ec714eabb8-secret-kube-rbac-proxy\") pod \"prometheus-k8s-0\" (UID: \"5c778a06-3dc5-439c-858c-69ec714eabb8\") " pod="openshift-monitoring/prometheus-k8s-0" Dec 11 08:23:14 crc kubenswrapper[4881]: I1211 08:23:14.248222 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-grpc-tls\" (UniqueName: \"kubernetes.io/secret/5c778a06-3dc5-439c-858c-69ec714eabb8-secret-grpc-tls\") pod \"prometheus-k8s-0\" (UID: \"5c778a06-3dc5-439c-858c-69ec714eabb8\") " pod="openshift-monitoring/prometheus-k8s-0" Dec 11 08:23:14 crc kubenswrapper[4881]: I1211 08:23:14.248238 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-prometheus-k8s-tls\" (UniqueName: \"kubernetes.io/secret/5c778a06-3dc5-439c-858c-69ec714eabb8-secret-prometheus-k8s-tls\") pod \"prometheus-k8s-0\" (UID: \"5c778a06-3dc5-439c-858c-69ec714eabb8\") " pod="openshift-monitoring/prometheus-k8s-0" Dec 11 08:23:14 crc kubenswrapper[4881]: I1211 08:23:14.248257 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/5c778a06-3dc5-439c-858c-69ec714eabb8-tls-assets\") pod \"prometheus-k8s-0\" (UID: \"5c778a06-3dc5-439c-858c-69ec714eabb8\") " pod="openshift-monitoring/prometheus-k8s-0" Dec 11 08:23:14 crc kubenswrapper[4881]: I1211 08:23:14.248271 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-k8s-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/5c778a06-3dc5-439c-858c-69ec714eabb8-prometheus-k8s-rulefiles-0\") pod \"prometheus-k8s-0\" (UID: \"5c778a06-3dc5-439c-858c-69ec714eabb8\") " pod="openshift-monitoring/prometheus-k8s-0" Dec 11 08:23:14 crc kubenswrapper[4881]: I1211 08:23:14.248293 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/5c778a06-3dc5-439c-858c-69ec714eabb8-config-out\") pod \"prometheus-k8s-0\" (UID: \"5c778a06-3dc5-439c-858c-69ec714eabb8\") " pod="openshift-monitoring/prometheus-k8s-0" Dec 11 08:23:14 crc kubenswrapper[4881]: I1211 08:23:14.248310 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/5c778a06-3dc5-439c-858c-69ec714eabb8-prometheus-trusted-ca-bundle\") pod \"prometheus-k8s-0\" (UID: \"5c778a06-3dc5-439c-858c-69ec714eabb8\") " pod="openshift-monitoring/prometheus-k8s-0" Dec 11 08:23:14 crc kubenswrapper[4881]: I1211 08:23:14.248356 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"configmap-serving-certs-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/5c778a06-3dc5-439c-858c-69ec714eabb8-configmap-serving-certs-ca-bundle\") pod \"prometheus-k8s-0\" (UID: \"5c778a06-3dc5-439c-858c-69ec714eabb8\") " pod="openshift-monitoring/prometheus-k8s-0" Dec 11 08:23:14 crc kubenswrapper[4881]: I1211 08:23:14.248381 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-prometheus-k8s-kube-rbac-proxy-web\" (UniqueName: \"kubernetes.io/secret/5c778a06-3dc5-439c-858c-69ec714eabb8-secret-prometheus-k8s-kube-rbac-proxy-web\") pod \"prometheus-k8s-0\" (UID: \"5c778a06-3dc5-439c-858c-69ec714eabb8\") " pod="openshift-monitoring/prometheus-k8s-0" Dec 11 08:23:14 crc kubenswrapper[4881]: I1211 08:23:14.248406 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/5c778a06-3dc5-439c-858c-69ec714eabb8-thanos-prometheus-http-client-file\") pod \"prometheus-k8s-0\" (UID: \"5c778a06-3dc5-439c-858c-69ec714eabb8\") " pod="openshift-monitoring/prometheus-k8s-0" Dec 11 08:23:14 crc kubenswrapper[4881]: I1211 08:23:14.248444 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/5c778a06-3dc5-439c-858c-69ec714eabb8-config\") pod \"prometheus-k8s-0\" (UID: \"5c778a06-3dc5-439c-858c-69ec714eabb8\") " pod="openshift-monitoring/prometheus-k8s-0" Dec 11 08:23:14 crc kubenswrapper[4881]: I1211 08:23:14.248464 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mhhbk\" (UniqueName: \"kubernetes.io/projected/5c778a06-3dc5-439c-858c-69ec714eabb8-kube-api-access-mhhbk\") pod \"prometheus-k8s-0\" (UID: \"5c778a06-3dc5-439c-858c-69ec714eabb8\") " pod="openshift-monitoring/prometheus-k8s-0" Dec 11 08:23:14 crc kubenswrapper[4881]: I1211 08:23:14.248484 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-k8s-db\" (UniqueName: \"kubernetes.io/empty-dir/5c778a06-3dc5-439c-858c-69ec714eabb8-prometheus-k8s-db\") pod \"prometheus-k8s-0\" (UID: \"5c778a06-3dc5-439c-858c-69ec714eabb8\") " pod="openshift-monitoring/prometheus-k8s-0" Dec 11 08:23:14 crc kubenswrapper[4881]: I1211 08:23:14.248505 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-metrics-client-certs\" (UniqueName: \"kubernetes.io/secret/5c778a06-3dc5-439c-858c-69ec714eabb8-secret-metrics-client-certs\") pod \"prometheus-k8s-0\" (UID: \"5c778a06-3dc5-439c-858c-69ec714eabb8\") " pod="openshift-monitoring/prometheus-k8s-0" Dec 11 08:23:14 crc kubenswrapper[4881]: I1211 08:23:14.249001 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"configmap-metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/5c778a06-3dc5-439c-858c-69ec714eabb8-configmap-metrics-client-ca\") pod \"prometheus-k8s-0\" (UID: \"5c778a06-3dc5-439c-858c-69ec714eabb8\") " pod="openshift-monitoring/prometheus-k8s-0" Dec 11 08:23:14 crc kubenswrapper[4881]: I1211 08:23:14.251007 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"configmap-kubelet-serving-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/5c778a06-3dc5-439c-858c-69ec714eabb8-configmap-kubelet-serving-ca-bundle\") pod \"prometheus-k8s-0\" (UID: \"5c778a06-3dc5-439c-858c-69ec714eabb8\") " pod="openshift-monitoring/prometheus-k8s-0" Dec 11 08:23:14 crc kubenswrapper[4881]: I1211 08:23:14.251669 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-k8s-db\" (UniqueName: \"kubernetes.io/empty-dir/5c778a06-3dc5-439c-858c-69ec714eabb8-prometheus-k8s-db\") pod \"prometheus-k8s-0\" (UID: \"5c778a06-3dc5-439c-858c-69ec714eabb8\") " pod="openshift-monitoring/prometheus-k8s-0" Dec 11 08:23:14 crc kubenswrapper[4881]: I1211 08:23:14.253391 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/5c778a06-3dc5-439c-858c-69ec714eabb8-tls-assets\") pod \"prometheus-k8s-0\" (UID: \"5c778a06-3dc5-439c-858c-69ec714eabb8\") " pod="openshift-monitoring/prometheus-k8s-0" Dec 11 08:23:14 crc kubenswrapper[4881]: I1211 08:23:14.253618 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-grpc-tls\" (UniqueName: \"kubernetes.io/secret/5c778a06-3dc5-439c-858c-69ec714eabb8-secret-grpc-tls\") pod \"prometheus-k8s-0\" (UID: \"5c778a06-3dc5-439c-858c-69ec714eabb8\") " pod="openshift-monitoring/prometheus-k8s-0" Dec 11 08:23:14 crc kubenswrapper[4881]: I1211 08:23:14.254166 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-metrics-client-certs\" (UniqueName: \"kubernetes.io/secret/5c778a06-3dc5-439c-858c-69ec714eabb8-secret-metrics-client-certs\") pod \"prometheus-k8s-0\" (UID: \"5c778a06-3dc5-439c-858c-69ec714eabb8\") " pod="openshift-monitoring/prometheus-k8s-0" Dec 11 08:23:14 crc kubenswrapper[4881]: I1211 08:23:14.254177 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/5c778a06-3dc5-439c-858c-69ec714eabb8-prometheus-trusted-ca-bundle\") pod \"prometheus-k8s-0\" (UID: \"5c778a06-3dc5-439c-858c-69ec714eabb8\") " pod="openshift-monitoring/prometheus-k8s-0" Dec 11 08:23:14 crc kubenswrapper[4881]: I1211 08:23:14.254242 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-kube-rbac-proxy\" (UniqueName: \"kubernetes.io/secret/5c778a06-3dc5-439c-858c-69ec714eabb8-secret-kube-rbac-proxy\") pod \"prometheus-k8s-0\" (UID: \"5c778a06-3dc5-439c-858c-69ec714eabb8\") " pod="openshift-monitoring/prometheus-k8s-0" Dec 11 08:23:14 crc kubenswrapper[4881]: I1211 08:23:14.254755 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"configmap-serving-certs-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/5c778a06-3dc5-439c-858c-69ec714eabb8-configmap-serving-certs-ca-bundle\") pod \"prometheus-k8s-0\" (UID: \"5c778a06-3dc5-439c-858c-69ec714eabb8\") " pod="openshift-monitoring/prometheus-k8s-0" Dec 11 08:23:14 crc kubenswrapper[4881]: I1211 08:23:14.255653 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-k8s-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/5c778a06-3dc5-439c-858c-69ec714eabb8-prometheus-k8s-rulefiles-0\") pod \"prometheus-k8s-0\" (UID: \"5c778a06-3dc5-439c-858c-69ec714eabb8\") " pod="openshift-monitoring/prometheus-k8s-0" Dec 11 08:23:14 crc kubenswrapper[4881]: I1211 08:23:14.256706 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-prometheus-k8s-tls\" (UniqueName: \"kubernetes.io/secret/5c778a06-3dc5-439c-858c-69ec714eabb8-secret-prometheus-k8s-tls\") pod \"prometheus-k8s-0\" (UID: \"5c778a06-3dc5-439c-858c-69ec714eabb8\") " pod="openshift-monitoring/prometheus-k8s-0" Dec 11 08:23:14 crc kubenswrapper[4881]: I1211 08:23:14.258975 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-prometheus-k8s-thanos-sidecar-tls\" (UniqueName: \"kubernetes.io/secret/5c778a06-3dc5-439c-858c-69ec714eabb8-secret-prometheus-k8s-thanos-sidecar-tls\") pod \"prometheus-k8s-0\" (UID: \"5c778a06-3dc5-439c-858c-69ec714eabb8\") " pod="openshift-monitoring/prometheus-k8s-0" Dec 11 08:23:14 crc kubenswrapper[4881]: I1211 08:23:14.260192 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/5c778a06-3dc5-439c-858c-69ec714eabb8-config\") pod \"prometheus-k8s-0\" (UID: \"5c778a06-3dc5-439c-858c-69ec714eabb8\") " pod="openshift-monitoring/prometheus-k8s-0" Dec 11 08:23:14 crc kubenswrapper[4881]: I1211 08:23:14.260298 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-prometheus-k8s-kube-rbac-proxy-web\" (UniqueName: \"kubernetes.io/secret/5c778a06-3dc5-439c-858c-69ec714eabb8-secret-prometheus-k8s-kube-rbac-proxy-web\") pod \"prometheus-k8s-0\" (UID: \"5c778a06-3dc5-439c-858c-69ec714eabb8\") " pod="openshift-monitoring/prometheus-k8s-0" Dec 11 08:23:14 crc kubenswrapper[4881]: I1211 08:23:14.261191 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/5c778a06-3dc5-439c-858c-69ec714eabb8-web-config\") pod \"prometheus-k8s-0\" (UID: \"5c778a06-3dc5-439c-858c-69ec714eabb8\") " pod="openshift-monitoring/prometheus-k8s-0" Dec 11 08:23:14 crc kubenswrapper[4881]: I1211 08:23:14.261609 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/5c778a06-3dc5-439c-858c-69ec714eabb8-config-out\") pod \"prometheus-k8s-0\" (UID: \"5c778a06-3dc5-439c-858c-69ec714eabb8\") " pod="openshift-monitoring/prometheus-k8s-0" Dec 11 08:23:14 crc kubenswrapper[4881]: I1211 08:23:14.261919 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/5c778a06-3dc5-439c-858c-69ec714eabb8-thanos-prometheus-http-client-file\") pod \"prometheus-k8s-0\" (UID: \"5c778a06-3dc5-439c-858c-69ec714eabb8\") " pod="openshift-monitoring/prometheus-k8s-0" Dec 11 08:23:14 crc kubenswrapper[4881]: I1211 08:23:14.267421 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mhhbk\" (UniqueName: \"kubernetes.io/projected/5c778a06-3dc5-439c-858c-69ec714eabb8-kube-api-access-mhhbk\") pod \"prometheus-k8s-0\" (UID: \"5c778a06-3dc5-439c-858c-69ec714eabb8\") " pod="openshift-monitoring/prometheus-k8s-0" Dec 11 08:23:14 crc kubenswrapper[4881]: I1211 08:23:14.352745 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/prometheus-k8s-0" Dec 11 08:23:14 crc kubenswrapper[4881]: I1211 08:23:14.816050 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/prometheus-k8s-0"] Dec 11 08:23:15 crc kubenswrapper[4881]: I1211 08:23:15.065290 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/metrics-server-567ff9c44d-zvlcx" event={"ID":"e33cf336-f1a0-4217-a606-c12ebf877533","Type":"ContainerStarted","Data":"a13e46de04b0281b31698fec401b92d501f99d001f407203b1c1e26484b45f34"} Dec 11 08:23:15 crc kubenswrapper[4881]: I1211 08:23:15.067444 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/monitoring-plugin-5cb9d5856f-c4cvw" event={"ID":"1904b37a-2b5b-425b-b3cb-cad8c86efcff","Type":"ContainerStarted","Data":"fee8dc1955033dbbe7c170e0ec8907851a7ddf840fb6f81df72d26ba36b63637"} Dec 11 08:23:16 crc kubenswrapper[4881]: I1211 08:23:16.075969 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/alertmanager-main-0" event={"ID":"f2498125-8f34-4a97-92de-92085c448bf4","Type":"ContainerStarted","Data":"924f504be50ac4b01d1c8b6e24446ef308b847d85ec4da340056f9101244c25c"} Dec 11 08:23:16 crc kubenswrapper[4881]: I1211 08:23:16.076721 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/alertmanager-main-0" event={"ID":"f2498125-8f34-4a97-92de-92085c448bf4","Type":"ContainerStarted","Data":"1201f7f7f9cdc6c6191f1406765e4c77cb2a939097b99440bff1bada452d9855"} Dec 11 08:23:16 crc kubenswrapper[4881]: I1211 08:23:16.078046 4881 generic.go:334] "Generic (PLEG): container finished" podID="5c778a06-3dc5-439c-858c-69ec714eabb8" containerID="37fe396e1dbcb46c9a991fc183af4fbef88956012775e1f2a1cf7a966cfcf8bf" exitCode=0 Dec 11 08:23:16 crc kubenswrapper[4881]: I1211 08:23:16.078105 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/prometheus-k8s-0" event={"ID":"5c778a06-3dc5-439c-858c-69ec714eabb8","Type":"ContainerDied","Data":"37fe396e1dbcb46c9a991fc183af4fbef88956012775e1f2a1cf7a966cfcf8bf"} Dec 11 08:23:16 crc kubenswrapper[4881]: I1211 08:23:16.078137 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/prometheus-k8s-0" event={"ID":"5c778a06-3dc5-439c-858c-69ec714eabb8","Type":"ContainerStarted","Data":"9eaf030bbc5d1bbdc5c3e099d68446f5ca850cc8f464bac155e9eb7e8c1c2cd2"} Dec 11 08:23:17 crc kubenswrapper[4881]: I1211 08:23:17.083957 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/metrics-server-567ff9c44d-zvlcx" event={"ID":"e33cf336-f1a0-4217-a606-c12ebf877533","Type":"ContainerStarted","Data":"e428b49914a49b45338f6b95e74ff42c8e1900bf83fa00a9584a0f7059aab053"} Dec 11 08:23:17 crc kubenswrapper[4881]: I1211 08:23:17.087429 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/thanos-querier-778bc6ff98-47qfw" event={"ID":"0dbf04dc-19a0-4694-bdc1-c5d81052efd3","Type":"ContainerStarted","Data":"ab2a683f91dc6d0dfde6797dd3bc3897ed77bf3144b924a66d7d32d437f330e5"} Dec 11 08:23:17 crc kubenswrapper[4881]: I1211 08:23:17.087806 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/thanos-querier-778bc6ff98-47qfw" event={"ID":"0dbf04dc-19a0-4694-bdc1-c5d81052efd3","Type":"ContainerStarted","Data":"c97f06b355d7f91b3ed03c034154e223ac5cd78232d449cb67e1749736cb40ea"} Dec 11 08:23:17 crc kubenswrapper[4881]: I1211 08:23:17.088573 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/thanos-querier-778bc6ff98-47qfw" event={"ID":"0dbf04dc-19a0-4694-bdc1-c5d81052efd3","Type":"ContainerStarted","Data":"6d63a3c41abdb12923d21ee06301021ce7b98484bcd38696cd68c5b9e2e0d578"} Dec 11 08:23:17 crc kubenswrapper[4881]: I1211 08:23:17.088759 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-monitoring/thanos-querier-778bc6ff98-47qfw" Dec 11 08:23:17 crc kubenswrapper[4881]: I1211 08:23:17.092548 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/alertmanager-main-0" event={"ID":"f2498125-8f34-4a97-92de-92085c448bf4","Type":"ContainerStarted","Data":"b22b376fadd573ccedff48599e820b6ffe052bec39f17847753636ef5d0b0d74"} Dec 11 08:23:17 crc kubenswrapper[4881]: I1211 08:23:17.092617 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/alertmanager-main-0" event={"ID":"f2498125-8f34-4a97-92de-92085c448bf4","Type":"ContainerStarted","Data":"70a207e96d3874650d73758e860797ce43b0651f792c802c8fe17051d216a29a"} Dec 11 08:23:17 crc kubenswrapper[4881]: I1211 08:23:17.092631 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/alertmanager-main-0" event={"ID":"f2498125-8f34-4a97-92de-92085c448bf4","Type":"ContainerStarted","Data":"251c3f2f090f22541d89f770434b7848782304f75add2194801912ffa5c07afb"} Dec 11 08:23:17 crc kubenswrapper[4881]: I1211 08:23:17.094398 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/monitoring-plugin-5cb9d5856f-c4cvw" event={"ID":"1904b37a-2b5b-425b-b3cb-cad8c86efcff","Type":"ContainerStarted","Data":"e65efdbd153d033109f233c985e7d6a86da32e86ad896d9f3cc63d4a0f204a50"} Dec 11 08:23:17 crc kubenswrapper[4881]: I1211 08:23:17.095057 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-monitoring/monitoring-plugin-5cb9d5856f-c4cvw" Dec 11 08:23:17 crc kubenswrapper[4881]: I1211 08:23:17.106277 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-monitoring/metrics-server-567ff9c44d-zvlcx" podStartSLOduration=2.62206782 podStartE2EDuration="5.10625818s" podCreationTimestamp="2025-12-11 08:23:12 +0000 UTC" firstStartedPulling="2025-12-11 08:23:14.119637358 +0000 UTC m=+442.497006055" lastFinishedPulling="2025-12-11 08:23:16.603827718 +0000 UTC m=+444.981196415" observedRunningTime="2025-12-11 08:23:17.104045086 +0000 UTC m=+445.481413793" watchObservedRunningTime="2025-12-11 08:23:17.10625818 +0000 UTC m=+445.483626877" Dec 11 08:23:17 crc kubenswrapper[4881]: I1211 08:23:17.108688 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-monitoring/monitoring-plugin-5cb9d5856f-c4cvw" Dec 11 08:23:17 crc kubenswrapper[4881]: I1211 08:23:17.124727 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-monitoring/monitoring-plugin-5cb9d5856f-c4cvw" podStartSLOduration=1.707599351 podStartE2EDuration="4.124709949s" podCreationTimestamp="2025-12-11 08:23:13 +0000 UTC" firstStartedPulling="2025-12-11 08:23:14.185554441 +0000 UTC m=+442.562923138" lastFinishedPulling="2025-12-11 08:23:16.602665039 +0000 UTC m=+444.980033736" observedRunningTime="2025-12-11 08:23:17.117954974 +0000 UTC m=+445.495323691" watchObservedRunningTime="2025-12-11 08:23:17.124709949 +0000 UTC m=+445.502078646" Dec 11 08:23:17 crc kubenswrapper[4881]: I1211 08:23:17.164465 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-monitoring/thanos-querier-778bc6ff98-47qfw" podStartSLOduration=2.492775828 podStartE2EDuration="8.164447046s" podCreationTimestamp="2025-12-11 08:23:09 +0000 UTC" firstStartedPulling="2025-12-11 08:23:10.932209481 +0000 UTC m=+439.309578178" lastFinishedPulling="2025-12-11 08:23:16.603880709 +0000 UTC m=+444.981249396" observedRunningTime="2025-12-11 08:23:17.161006632 +0000 UTC m=+445.538375329" watchObservedRunningTime="2025-12-11 08:23:17.164447046 +0000 UTC m=+445.541815753" Dec 11 08:23:18 crc kubenswrapper[4881]: I1211 08:23:18.111902 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/alertmanager-main-0" event={"ID":"f2498125-8f34-4a97-92de-92085c448bf4","Type":"ContainerStarted","Data":"794c9e5a51a4c76ceee2ae84b36bbc35c8ac36f772b37e766167c73e5a8294fc"} Dec 11 08:23:18 crc kubenswrapper[4881]: I1211 08:23:18.126787 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-monitoring/thanos-querier-778bc6ff98-47qfw" Dec 11 08:23:18 crc kubenswrapper[4881]: I1211 08:23:18.146381 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-monitoring/alertmanager-main-0" podStartSLOduration=4.597874571 podStartE2EDuration="10.146358711s" podCreationTimestamp="2025-12-11 08:23:08 +0000 UTC" firstStartedPulling="2025-12-11 08:23:09.682133883 +0000 UTC m=+438.059502580" lastFinishedPulling="2025-12-11 08:23:15.230618023 +0000 UTC m=+443.607986720" observedRunningTime="2025-12-11 08:23:18.143971683 +0000 UTC m=+446.521340390" watchObservedRunningTime="2025-12-11 08:23:18.146358711 +0000 UTC m=+446.523727418" Dec 11 08:23:21 crc kubenswrapper[4881]: I1211 08:23:21.133254 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/prometheus-k8s-0" event={"ID":"5c778a06-3dc5-439c-858c-69ec714eabb8","Type":"ContainerStarted","Data":"3f6b92c54f693dcd98e8a27040f922edf22874d5bed80494e335f2be317e9de1"} Dec 11 08:23:21 crc kubenswrapper[4881]: I1211 08:23:21.134305 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/prometheus-k8s-0" event={"ID":"5c778a06-3dc5-439c-858c-69ec714eabb8","Type":"ContainerStarted","Data":"1aa2bf79e75ae2052c36c6298cc073684968a487fe0464f35c4f488b3f9e1b8d"} Dec 11 08:23:21 crc kubenswrapper[4881]: I1211 08:23:21.134407 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/prometheus-k8s-0" event={"ID":"5c778a06-3dc5-439c-858c-69ec714eabb8","Type":"ContainerStarted","Data":"c345525dfa12f670f754e3b0f97ea4977c7ede8c2bd58bb1cf5fc46c538dcaf1"} Dec 11 08:23:22 crc kubenswrapper[4881]: I1211 08:23:22.144502 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/prometheus-k8s-0" event={"ID":"5c778a06-3dc5-439c-858c-69ec714eabb8","Type":"ContainerStarted","Data":"6595711df70fb6ec8dbc5df747ad306ccd9741a14b9c08d378d5b7f53ef060e3"} Dec 11 08:23:22 crc kubenswrapper[4881]: I1211 08:23:22.145016 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/prometheus-k8s-0" event={"ID":"5c778a06-3dc5-439c-858c-69ec714eabb8","Type":"ContainerStarted","Data":"7bbed9124971f401c81e8aeebae7938f183819fa7fd6d2177181c5dda50d9fff"} Dec 11 08:23:22 crc kubenswrapper[4881]: I1211 08:23:22.145054 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/prometheus-k8s-0" event={"ID":"5c778a06-3dc5-439c-858c-69ec714eabb8","Type":"ContainerStarted","Data":"aeb05a83f2d7eb885e33a6652ab76fd8ef0a328e286d8320ab18c73156c899f8"} Dec 11 08:23:22 crc kubenswrapper[4881]: I1211 08:23:22.188047 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-monitoring/prometheus-k8s-0" podStartSLOduration=3.613398887 podStartE2EDuration="8.188030218s" podCreationTimestamp="2025-12-11 08:23:14 +0000 UTC" firstStartedPulling="2025-12-11 08:23:16.07977042 +0000 UTC m=+444.457139117" lastFinishedPulling="2025-12-11 08:23:20.654401751 +0000 UTC m=+449.031770448" observedRunningTime="2025-12-11 08:23:22.186012679 +0000 UTC m=+450.563381386" watchObservedRunningTime="2025-12-11 08:23:22.188030218 +0000 UTC m=+450.565398915" Dec 11 08:23:22 crc kubenswrapper[4881]: I1211 08:23:22.768327 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-7dc5585756-89sps" Dec 11 08:23:22 crc kubenswrapper[4881]: I1211 08:23:22.768448 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-7dc5585756-89sps" Dec 11 08:23:22 crc kubenswrapper[4881]: I1211 08:23:22.774707 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-7dc5585756-89sps" Dec 11 08:23:23 crc kubenswrapper[4881]: I1211 08:23:23.154170 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-7dc5585756-89sps" Dec 11 08:23:23 crc kubenswrapper[4881]: I1211 08:23:23.202251 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-f9d7485db-d6tb5"] Dec 11 08:23:24 crc kubenswrapper[4881]: I1211 08:23:24.353480 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-monitoring/prometheus-k8s-0" Dec 11 08:23:33 crc kubenswrapper[4881]: I1211 08:23:33.254007 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-monitoring/metrics-server-567ff9c44d-zvlcx" Dec 11 08:23:33 crc kubenswrapper[4881]: I1211 08:23:33.258527 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-monitoring/metrics-server-567ff9c44d-zvlcx" Dec 11 08:23:48 crc kubenswrapper[4881]: I1211 08:23:48.248494 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-console/console-f9d7485db-d6tb5" podUID="09b82983-c4d5-4c1f-8f41-9dcc20fbfd03" containerName="console" containerID="cri-o://b279299bf01bc7bdacce1f90a320087c815d34f26dc2b7d78e28cb016043fc81" gracePeriod=15 Dec 11 08:23:48 crc kubenswrapper[4881]: I1211 08:23:48.665227 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-f9d7485db-d6tb5_09b82983-c4d5-4c1f-8f41-9dcc20fbfd03/console/0.log" Dec 11 08:23:48 crc kubenswrapper[4881]: I1211 08:23:48.665649 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-d6tb5" Dec 11 08:23:48 crc kubenswrapper[4881]: I1211 08:23:48.864774 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/09b82983-c4d5-4c1f-8f41-9dcc20fbfd03-trusted-ca-bundle\") pod \"09b82983-c4d5-4c1f-8f41-9dcc20fbfd03\" (UID: \"09b82983-c4d5-4c1f-8f41-9dcc20fbfd03\") " Dec 11 08:23:48 crc kubenswrapper[4881]: I1211 08:23:48.864844 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/09b82983-c4d5-4c1f-8f41-9dcc20fbfd03-oauth-serving-cert\") pod \"09b82983-c4d5-4c1f-8f41-9dcc20fbfd03\" (UID: \"09b82983-c4d5-4c1f-8f41-9dcc20fbfd03\") " Dec 11 08:23:48 crc kubenswrapper[4881]: I1211 08:23:48.864900 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/09b82983-c4d5-4c1f-8f41-9dcc20fbfd03-console-serving-cert\") pod \"09b82983-c4d5-4c1f-8f41-9dcc20fbfd03\" (UID: \"09b82983-c4d5-4c1f-8f41-9dcc20fbfd03\") " Dec 11 08:23:48 crc kubenswrapper[4881]: I1211 08:23:48.865065 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/09b82983-c4d5-4c1f-8f41-9dcc20fbfd03-service-ca\") pod \"09b82983-c4d5-4c1f-8f41-9dcc20fbfd03\" (UID: \"09b82983-c4d5-4c1f-8f41-9dcc20fbfd03\") " Dec 11 08:23:48 crc kubenswrapper[4881]: I1211 08:23:48.865142 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/09b82983-c4d5-4c1f-8f41-9dcc20fbfd03-console-config\") pod \"09b82983-c4d5-4c1f-8f41-9dcc20fbfd03\" (UID: \"09b82983-c4d5-4c1f-8f41-9dcc20fbfd03\") " Dec 11 08:23:48 crc kubenswrapper[4881]: I1211 08:23:48.865195 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/09b82983-c4d5-4c1f-8f41-9dcc20fbfd03-console-oauth-config\") pod \"09b82983-c4d5-4c1f-8f41-9dcc20fbfd03\" (UID: \"09b82983-c4d5-4c1f-8f41-9dcc20fbfd03\") " Dec 11 08:23:48 crc kubenswrapper[4881]: I1211 08:23:48.865247 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hnw6c\" (UniqueName: \"kubernetes.io/projected/09b82983-c4d5-4c1f-8f41-9dcc20fbfd03-kube-api-access-hnw6c\") pod \"09b82983-c4d5-4c1f-8f41-9dcc20fbfd03\" (UID: \"09b82983-c4d5-4c1f-8f41-9dcc20fbfd03\") " Dec 11 08:23:48 crc kubenswrapper[4881]: I1211 08:23:48.866469 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09b82983-c4d5-4c1f-8f41-9dcc20fbfd03-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "09b82983-c4d5-4c1f-8f41-9dcc20fbfd03" (UID: "09b82983-c4d5-4c1f-8f41-9dcc20fbfd03"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:23:48 crc kubenswrapper[4881]: I1211 08:23:48.867199 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09b82983-c4d5-4c1f-8f41-9dcc20fbfd03-service-ca" (OuterVolumeSpecName: "service-ca") pod "09b82983-c4d5-4c1f-8f41-9dcc20fbfd03" (UID: "09b82983-c4d5-4c1f-8f41-9dcc20fbfd03"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:23:48 crc kubenswrapper[4881]: I1211 08:23:48.868549 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09b82983-c4d5-4c1f-8f41-9dcc20fbfd03-console-config" (OuterVolumeSpecName: "console-config") pod "09b82983-c4d5-4c1f-8f41-9dcc20fbfd03" (UID: "09b82983-c4d5-4c1f-8f41-9dcc20fbfd03"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:23:48 crc kubenswrapper[4881]: I1211 08:23:48.869395 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09b82983-c4d5-4c1f-8f41-9dcc20fbfd03-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "09b82983-c4d5-4c1f-8f41-9dcc20fbfd03" (UID: "09b82983-c4d5-4c1f-8f41-9dcc20fbfd03"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:23:48 crc kubenswrapper[4881]: I1211 08:23:48.876056 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09b82983-c4d5-4c1f-8f41-9dcc20fbfd03-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "09b82983-c4d5-4c1f-8f41-9dcc20fbfd03" (UID: "09b82983-c4d5-4c1f-8f41-9dcc20fbfd03"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:23:48 crc kubenswrapper[4881]: I1211 08:23:48.876504 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09b82983-c4d5-4c1f-8f41-9dcc20fbfd03-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "09b82983-c4d5-4c1f-8f41-9dcc20fbfd03" (UID: "09b82983-c4d5-4c1f-8f41-9dcc20fbfd03"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:23:48 crc kubenswrapper[4881]: I1211 08:23:48.876960 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09b82983-c4d5-4c1f-8f41-9dcc20fbfd03-kube-api-access-hnw6c" (OuterVolumeSpecName: "kube-api-access-hnw6c") pod "09b82983-c4d5-4c1f-8f41-9dcc20fbfd03" (UID: "09b82983-c4d5-4c1f-8f41-9dcc20fbfd03"). InnerVolumeSpecName "kube-api-access-hnw6c". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:23:48 crc kubenswrapper[4881]: I1211 08:23:48.966731 4881 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/09b82983-c4d5-4c1f-8f41-9dcc20fbfd03-service-ca\") on node \"crc\" DevicePath \"\"" Dec 11 08:23:48 crc kubenswrapper[4881]: I1211 08:23:48.966778 4881 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/09b82983-c4d5-4c1f-8f41-9dcc20fbfd03-console-config\") on node \"crc\" DevicePath \"\"" Dec 11 08:23:48 crc kubenswrapper[4881]: I1211 08:23:48.966792 4881 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/09b82983-c4d5-4c1f-8f41-9dcc20fbfd03-console-oauth-config\") on node \"crc\" DevicePath \"\"" Dec 11 08:23:48 crc kubenswrapper[4881]: I1211 08:23:48.966804 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hnw6c\" (UniqueName: \"kubernetes.io/projected/09b82983-c4d5-4c1f-8f41-9dcc20fbfd03-kube-api-access-hnw6c\") on node \"crc\" DevicePath \"\"" Dec 11 08:23:48 crc kubenswrapper[4881]: I1211 08:23:48.966817 4881 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/09b82983-c4d5-4c1f-8f41-9dcc20fbfd03-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 11 08:23:48 crc kubenswrapper[4881]: I1211 08:23:48.966829 4881 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/09b82983-c4d5-4c1f-8f41-9dcc20fbfd03-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 11 08:23:48 crc kubenswrapper[4881]: I1211 08:23:48.966841 4881 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/09b82983-c4d5-4c1f-8f41-9dcc20fbfd03-console-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 11 08:23:49 crc kubenswrapper[4881]: I1211 08:23:49.332576 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-f9d7485db-d6tb5_09b82983-c4d5-4c1f-8f41-9dcc20fbfd03/console/0.log" Dec 11 08:23:49 crc kubenswrapper[4881]: I1211 08:23:49.333070 4881 generic.go:334] "Generic (PLEG): container finished" podID="09b82983-c4d5-4c1f-8f41-9dcc20fbfd03" containerID="b279299bf01bc7bdacce1f90a320087c815d34f26dc2b7d78e28cb016043fc81" exitCode=2 Dec 11 08:23:49 crc kubenswrapper[4881]: I1211 08:23:49.333151 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-d6tb5" event={"ID":"09b82983-c4d5-4c1f-8f41-9dcc20fbfd03","Type":"ContainerDied","Data":"b279299bf01bc7bdacce1f90a320087c815d34f26dc2b7d78e28cb016043fc81"} Dec 11 08:23:49 crc kubenswrapper[4881]: I1211 08:23:49.333162 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-d6tb5" Dec 11 08:23:49 crc kubenswrapper[4881]: I1211 08:23:49.333202 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-d6tb5" event={"ID":"09b82983-c4d5-4c1f-8f41-9dcc20fbfd03","Type":"ContainerDied","Data":"c104383ea2dc8dfe0e6f6537c3b72d2425698a783fb63d849e02c93d133303ca"} Dec 11 08:23:49 crc kubenswrapper[4881]: I1211 08:23:49.333242 4881 scope.go:117] "RemoveContainer" containerID="b279299bf01bc7bdacce1f90a320087c815d34f26dc2b7d78e28cb016043fc81" Dec 11 08:23:49 crc kubenswrapper[4881]: I1211 08:23:49.368183 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-f9d7485db-d6tb5"] Dec 11 08:23:49 crc kubenswrapper[4881]: I1211 08:23:49.377475 4881 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-console/console-f9d7485db-d6tb5"] Dec 11 08:23:49 crc kubenswrapper[4881]: I1211 08:23:49.378558 4881 scope.go:117] "RemoveContainer" containerID="b279299bf01bc7bdacce1f90a320087c815d34f26dc2b7d78e28cb016043fc81" Dec 11 08:23:49 crc kubenswrapper[4881]: E1211 08:23:49.379427 4881 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b279299bf01bc7bdacce1f90a320087c815d34f26dc2b7d78e28cb016043fc81\": container with ID starting with b279299bf01bc7bdacce1f90a320087c815d34f26dc2b7d78e28cb016043fc81 not found: ID does not exist" containerID="b279299bf01bc7bdacce1f90a320087c815d34f26dc2b7d78e28cb016043fc81" Dec 11 08:23:49 crc kubenswrapper[4881]: I1211 08:23:49.379489 4881 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b279299bf01bc7bdacce1f90a320087c815d34f26dc2b7d78e28cb016043fc81"} err="failed to get container status \"b279299bf01bc7bdacce1f90a320087c815d34f26dc2b7d78e28cb016043fc81\": rpc error: code = NotFound desc = could not find container \"b279299bf01bc7bdacce1f90a320087c815d34f26dc2b7d78e28cb016043fc81\": container with ID starting with b279299bf01bc7bdacce1f90a320087c815d34f26dc2b7d78e28cb016043fc81 not found: ID does not exist" Dec 11 08:23:49 crc kubenswrapper[4881]: I1211 08:23:49.479061 4881 patch_prober.go:28] interesting pod/console-f9d7485db-d6tb5 container/console namespace/openshift-console: Readiness probe status=failure output="Get \"https://10.217.0.12:8443/health\": context deadline exceeded" start-of-body= Dec 11 08:23:49 crc kubenswrapper[4881]: I1211 08:23:49.479182 4881 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/console-f9d7485db-d6tb5" podUID="09b82983-c4d5-4c1f-8f41-9dcc20fbfd03" containerName="console" probeResult="failure" output="Get \"https://10.217.0.12:8443/health\": context deadline exceeded" Dec 11 08:23:51 crc kubenswrapper[4881]: I1211 08:23:51.018682 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09b82983-c4d5-4c1f-8f41-9dcc20fbfd03" path="/var/lib/kubelet/pods/09b82983-c4d5-4c1f-8f41-9dcc20fbfd03/volumes" Dec 11 08:23:53 crc kubenswrapper[4881]: I1211 08:23:53.263644 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-monitoring/metrics-server-567ff9c44d-zvlcx" Dec 11 08:23:53 crc kubenswrapper[4881]: I1211 08:23:53.272001 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-monitoring/metrics-server-567ff9c44d-zvlcx" Dec 11 08:24:14 crc kubenswrapper[4881]: I1211 08:24:14.353884 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-monitoring/prometheus-k8s-0" Dec 11 08:24:14 crc kubenswrapper[4881]: I1211 08:24:14.391669 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-monitoring/prometheus-k8s-0" Dec 11 08:24:14 crc kubenswrapper[4881]: I1211 08:24:14.551764 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-monitoring/prometheus-k8s-0" Dec 11 08:24:35 crc kubenswrapper[4881]: I1211 08:24:35.052807 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-68c7b5cd9d-j9fr6"] Dec 11 08:24:35 crc kubenswrapper[4881]: E1211 08:24:35.055168 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="09b82983-c4d5-4c1f-8f41-9dcc20fbfd03" containerName="console" Dec 11 08:24:35 crc kubenswrapper[4881]: I1211 08:24:35.055413 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="09b82983-c4d5-4c1f-8f41-9dcc20fbfd03" containerName="console" Dec 11 08:24:35 crc kubenswrapper[4881]: I1211 08:24:35.055711 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="09b82983-c4d5-4c1f-8f41-9dcc20fbfd03" containerName="console" Dec 11 08:24:35 crc kubenswrapper[4881]: I1211 08:24:35.056585 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-68c7b5cd9d-j9fr6" Dec 11 08:24:35 crc kubenswrapper[4881]: I1211 08:24:35.064072 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-68c7b5cd9d-j9fr6"] Dec 11 08:24:35 crc kubenswrapper[4881]: I1211 08:24:35.113709 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/4d08bf9c-c409-4f74-afec-d566a07b6207-console-oauth-config\") pod \"console-68c7b5cd9d-j9fr6\" (UID: \"4d08bf9c-c409-4f74-afec-d566a07b6207\") " pod="openshift-console/console-68c7b5cd9d-j9fr6" Dec 11 08:24:35 crc kubenswrapper[4881]: I1211 08:24:35.113751 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/4d08bf9c-c409-4f74-afec-d566a07b6207-service-ca\") pod \"console-68c7b5cd9d-j9fr6\" (UID: \"4d08bf9c-c409-4f74-afec-d566a07b6207\") " pod="openshift-console/console-68c7b5cd9d-j9fr6" Dec 11 08:24:35 crc kubenswrapper[4881]: I1211 08:24:35.113769 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-md956\" (UniqueName: \"kubernetes.io/projected/4d08bf9c-c409-4f74-afec-d566a07b6207-kube-api-access-md956\") pod \"console-68c7b5cd9d-j9fr6\" (UID: \"4d08bf9c-c409-4f74-afec-d566a07b6207\") " pod="openshift-console/console-68c7b5cd9d-j9fr6" Dec 11 08:24:35 crc kubenswrapper[4881]: I1211 08:24:35.113786 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/4d08bf9c-c409-4f74-afec-d566a07b6207-console-config\") pod \"console-68c7b5cd9d-j9fr6\" (UID: \"4d08bf9c-c409-4f74-afec-d566a07b6207\") " pod="openshift-console/console-68c7b5cd9d-j9fr6" Dec 11 08:24:35 crc kubenswrapper[4881]: I1211 08:24:35.113977 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/4d08bf9c-c409-4f74-afec-d566a07b6207-console-serving-cert\") pod \"console-68c7b5cd9d-j9fr6\" (UID: \"4d08bf9c-c409-4f74-afec-d566a07b6207\") " pod="openshift-console/console-68c7b5cd9d-j9fr6" Dec 11 08:24:35 crc kubenswrapper[4881]: I1211 08:24:35.114086 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/4d08bf9c-c409-4f74-afec-d566a07b6207-trusted-ca-bundle\") pod \"console-68c7b5cd9d-j9fr6\" (UID: \"4d08bf9c-c409-4f74-afec-d566a07b6207\") " pod="openshift-console/console-68c7b5cd9d-j9fr6" Dec 11 08:24:35 crc kubenswrapper[4881]: I1211 08:24:35.114123 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/4d08bf9c-c409-4f74-afec-d566a07b6207-oauth-serving-cert\") pod \"console-68c7b5cd9d-j9fr6\" (UID: \"4d08bf9c-c409-4f74-afec-d566a07b6207\") " pod="openshift-console/console-68c7b5cd9d-j9fr6" Dec 11 08:24:35 crc kubenswrapper[4881]: I1211 08:24:35.215062 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/4d08bf9c-c409-4f74-afec-d566a07b6207-console-oauth-config\") pod \"console-68c7b5cd9d-j9fr6\" (UID: \"4d08bf9c-c409-4f74-afec-d566a07b6207\") " pod="openshift-console/console-68c7b5cd9d-j9fr6" Dec 11 08:24:35 crc kubenswrapper[4881]: I1211 08:24:35.215110 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/4d08bf9c-c409-4f74-afec-d566a07b6207-service-ca\") pod \"console-68c7b5cd9d-j9fr6\" (UID: \"4d08bf9c-c409-4f74-afec-d566a07b6207\") " pod="openshift-console/console-68c7b5cd9d-j9fr6" Dec 11 08:24:35 crc kubenswrapper[4881]: I1211 08:24:35.215130 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-md956\" (UniqueName: \"kubernetes.io/projected/4d08bf9c-c409-4f74-afec-d566a07b6207-kube-api-access-md956\") pod \"console-68c7b5cd9d-j9fr6\" (UID: \"4d08bf9c-c409-4f74-afec-d566a07b6207\") " pod="openshift-console/console-68c7b5cd9d-j9fr6" Dec 11 08:24:35 crc kubenswrapper[4881]: I1211 08:24:35.215148 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/4d08bf9c-c409-4f74-afec-d566a07b6207-console-config\") pod \"console-68c7b5cd9d-j9fr6\" (UID: \"4d08bf9c-c409-4f74-afec-d566a07b6207\") " pod="openshift-console/console-68c7b5cd9d-j9fr6" Dec 11 08:24:35 crc kubenswrapper[4881]: I1211 08:24:35.215223 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/4d08bf9c-c409-4f74-afec-d566a07b6207-console-serving-cert\") pod \"console-68c7b5cd9d-j9fr6\" (UID: \"4d08bf9c-c409-4f74-afec-d566a07b6207\") " pod="openshift-console/console-68c7b5cd9d-j9fr6" Dec 11 08:24:35 crc kubenswrapper[4881]: I1211 08:24:35.215281 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/4d08bf9c-c409-4f74-afec-d566a07b6207-trusted-ca-bundle\") pod \"console-68c7b5cd9d-j9fr6\" (UID: \"4d08bf9c-c409-4f74-afec-d566a07b6207\") " pod="openshift-console/console-68c7b5cd9d-j9fr6" Dec 11 08:24:35 crc kubenswrapper[4881]: I1211 08:24:35.215317 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/4d08bf9c-c409-4f74-afec-d566a07b6207-oauth-serving-cert\") pod \"console-68c7b5cd9d-j9fr6\" (UID: \"4d08bf9c-c409-4f74-afec-d566a07b6207\") " pod="openshift-console/console-68c7b5cd9d-j9fr6" Dec 11 08:24:35 crc kubenswrapper[4881]: I1211 08:24:35.216569 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/4d08bf9c-c409-4f74-afec-d566a07b6207-service-ca\") pod \"console-68c7b5cd9d-j9fr6\" (UID: \"4d08bf9c-c409-4f74-afec-d566a07b6207\") " pod="openshift-console/console-68c7b5cd9d-j9fr6" Dec 11 08:24:35 crc kubenswrapper[4881]: I1211 08:24:35.216664 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/4d08bf9c-c409-4f74-afec-d566a07b6207-oauth-serving-cert\") pod \"console-68c7b5cd9d-j9fr6\" (UID: \"4d08bf9c-c409-4f74-afec-d566a07b6207\") " pod="openshift-console/console-68c7b5cd9d-j9fr6" Dec 11 08:24:35 crc kubenswrapper[4881]: I1211 08:24:35.216694 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/4d08bf9c-c409-4f74-afec-d566a07b6207-console-config\") pod \"console-68c7b5cd9d-j9fr6\" (UID: \"4d08bf9c-c409-4f74-afec-d566a07b6207\") " pod="openshift-console/console-68c7b5cd9d-j9fr6" Dec 11 08:24:35 crc kubenswrapper[4881]: I1211 08:24:35.217101 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/4d08bf9c-c409-4f74-afec-d566a07b6207-trusted-ca-bundle\") pod \"console-68c7b5cd9d-j9fr6\" (UID: \"4d08bf9c-c409-4f74-afec-d566a07b6207\") " pod="openshift-console/console-68c7b5cd9d-j9fr6" Dec 11 08:24:35 crc kubenswrapper[4881]: I1211 08:24:35.219961 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/4d08bf9c-c409-4f74-afec-d566a07b6207-console-oauth-config\") pod \"console-68c7b5cd9d-j9fr6\" (UID: \"4d08bf9c-c409-4f74-afec-d566a07b6207\") " pod="openshift-console/console-68c7b5cd9d-j9fr6" Dec 11 08:24:35 crc kubenswrapper[4881]: I1211 08:24:35.222715 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/4d08bf9c-c409-4f74-afec-d566a07b6207-console-serving-cert\") pod \"console-68c7b5cd9d-j9fr6\" (UID: \"4d08bf9c-c409-4f74-afec-d566a07b6207\") " pod="openshift-console/console-68c7b5cd9d-j9fr6" Dec 11 08:24:35 crc kubenswrapper[4881]: I1211 08:24:35.237922 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-md956\" (UniqueName: \"kubernetes.io/projected/4d08bf9c-c409-4f74-afec-d566a07b6207-kube-api-access-md956\") pod \"console-68c7b5cd9d-j9fr6\" (UID: \"4d08bf9c-c409-4f74-afec-d566a07b6207\") " pod="openshift-console/console-68c7b5cd9d-j9fr6" Dec 11 08:24:35 crc kubenswrapper[4881]: I1211 08:24:35.376574 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-68c7b5cd9d-j9fr6" Dec 11 08:24:35 crc kubenswrapper[4881]: I1211 08:24:35.594522 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-68c7b5cd9d-j9fr6"] Dec 11 08:24:35 crc kubenswrapper[4881]: I1211 08:24:35.665679 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-68c7b5cd9d-j9fr6" event={"ID":"4d08bf9c-c409-4f74-afec-d566a07b6207","Type":"ContainerStarted","Data":"87785d399aee96832fd21e716fe9a18beab4390b0a005fd6673b87ed75fccd58"} Dec 11 08:24:36 crc kubenswrapper[4881]: I1211 08:24:36.676095 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-68c7b5cd9d-j9fr6" event={"ID":"4d08bf9c-c409-4f74-afec-d566a07b6207","Type":"ContainerStarted","Data":"e2c2ceae104d1983210b87b381f0457fc246b06bc8f3ca15bc6d4a0694433d72"} Dec 11 08:24:36 crc kubenswrapper[4881]: I1211 08:24:36.715616 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-68c7b5cd9d-j9fr6" podStartSLOduration=1.715582793 podStartE2EDuration="1.715582793s" podCreationTimestamp="2025-12-11 08:24:35 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 08:24:36.702095862 +0000 UTC m=+525.079464579" watchObservedRunningTime="2025-12-11 08:24:36.715582793 +0000 UTC m=+525.092951530" Dec 11 08:24:45 crc kubenswrapper[4881]: I1211 08:24:45.377123 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-68c7b5cd9d-j9fr6" Dec 11 08:24:45 crc kubenswrapper[4881]: I1211 08:24:45.377708 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-68c7b5cd9d-j9fr6" Dec 11 08:24:45 crc kubenswrapper[4881]: I1211 08:24:45.382028 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-68c7b5cd9d-j9fr6" Dec 11 08:24:45 crc kubenswrapper[4881]: I1211 08:24:45.740612 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-68c7b5cd9d-j9fr6" Dec 11 08:24:45 crc kubenswrapper[4881]: I1211 08:24:45.804000 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-7dc5585756-89sps"] Dec 11 08:24:53 crc kubenswrapper[4881]: I1211 08:24:53.215191 4881 scope.go:117] "RemoveContainer" containerID="2529e00c18dd5a4e1e02de975a5654ba6325ce7db6af218f77c0fb1a3f457634" Dec 11 08:25:10 crc kubenswrapper[4881]: I1211 08:25:10.850851 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-console/console-7dc5585756-89sps" podUID="d3604568-9c08-4efc-a18d-72bdfb6f4763" containerName="console" containerID="cri-o://a3871c8a18213877103992909b597d13c6dc2ecc065b02d6004eb73db86e94ba" gracePeriod=15 Dec 11 08:25:11 crc kubenswrapper[4881]: I1211 08:25:11.213862 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-7dc5585756-89sps_d3604568-9c08-4efc-a18d-72bdfb6f4763/console/0.log" Dec 11 08:25:11 crc kubenswrapper[4881]: I1211 08:25:11.214161 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-7dc5585756-89sps" Dec 11 08:25:11 crc kubenswrapper[4881]: I1211 08:25:11.271284 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/d3604568-9c08-4efc-a18d-72bdfb6f4763-service-ca\") pod \"d3604568-9c08-4efc-a18d-72bdfb6f4763\" (UID: \"d3604568-9c08-4efc-a18d-72bdfb6f4763\") " Dec 11 08:25:11 crc kubenswrapper[4881]: I1211 08:25:11.271369 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/d3604568-9c08-4efc-a18d-72bdfb6f4763-console-oauth-config\") pod \"d3604568-9c08-4efc-a18d-72bdfb6f4763\" (UID: \"d3604568-9c08-4efc-a18d-72bdfb6f4763\") " Dec 11 08:25:11 crc kubenswrapper[4881]: I1211 08:25:11.271405 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/d3604568-9c08-4efc-a18d-72bdfb6f4763-console-config\") pod \"d3604568-9c08-4efc-a18d-72bdfb6f4763\" (UID: \"d3604568-9c08-4efc-a18d-72bdfb6f4763\") " Dec 11 08:25:11 crc kubenswrapper[4881]: I1211 08:25:11.271439 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/d3604568-9c08-4efc-a18d-72bdfb6f4763-oauth-serving-cert\") pod \"d3604568-9c08-4efc-a18d-72bdfb6f4763\" (UID: \"d3604568-9c08-4efc-a18d-72bdfb6f4763\") " Dec 11 08:25:11 crc kubenswrapper[4881]: I1211 08:25:11.271469 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/d3604568-9c08-4efc-a18d-72bdfb6f4763-trusted-ca-bundle\") pod \"d3604568-9c08-4efc-a18d-72bdfb6f4763\" (UID: \"d3604568-9c08-4efc-a18d-72bdfb6f4763\") " Dec 11 08:25:11 crc kubenswrapper[4881]: I1211 08:25:11.271514 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-q5gpq\" (UniqueName: \"kubernetes.io/projected/d3604568-9c08-4efc-a18d-72bdfb6f4763-kube-api-access-q5gpq\") pod \"d3604568-9c08-4efc-a18d-72bdfb6f4763\" (UID: \"d3604568-9c08-4efc-a18d-72bdfb6f4763\") " Dec 11 08:25:11 crc kubenswrapper[4881]: I1211 08:25:11.271544 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/d3604568-9c08-4efc-a18d-72bdfb6f4763-console-serving-cert\") pod \"d3604568-9c08-4efc-a18d-72bdfb6f4763\" (UID: \"d3604568-9c08-4efc-a18d-72bdfb6f4763\") " Dec 11 08:25:11 crc kubenswrapper[4881]: I1211 08:25:11.272449 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d3604568-9c08-4efc-a18d-72bdfb6f4763-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "d3604568-9c08-4efc-a18d-72bdfb6f4763" (UID: "d3604568-9c08-4efc-a18d-72bdfb6f4763"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:25:11 crc kubenswrapper[4881]: I1211 08:25:11.272455 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d3604568-9c08-4efc-a18d-72bdfb6f4763-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "d3604568-9c08-4efc-a18d-72bdfb6f4763" (UID: "d3604568-9c08-4efc-a18d-72bdfb6f4763"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:25:11 crc kubenswrapper[4881]: I1211 08:25:11.272522 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d3604568-9c08-4efc-a18d-72bdfb6f4763-service-ca" (OuterVolumeSpecName: "service-ca") pod "d3604568-9c08-4efc-a18d-72bdfb6f4763" (UID: "d3604568-9c08-4efc-a18d-72bdfb6f4763"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:25:11 crc kubenswrapper[4881]: I1211 08:25:11.273119 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d3604568-9c08-4efc-a18d-72bdfb6f4763-console-config" (OuterVolumeSpecName: "console-config") pod "d3604568-9c08-4efc-a18d-72bdfb6f4763" (UID: "d3604568-9c08-4efc-a18d-72bdfb6f4763"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:25:11 crc kubenswrapper[4881]: I1211 08:25:11.273281 4881 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/d3604568-9c08-4efc-a18d-72bdfb6f4763-console-config\") on node \"crc\" DevicePath \"\"" Dec 11 08:25:11 crc kubenswrapper[4881]: I1211 08:25:11.273312 4881 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/d3604568-9c08-4efc-a18d-72bdfb6f4763-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 11 08:25:11 crc kubenswrapper[4881]: I1211 08:25:11.273410 4881 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/d3604568-9c08-4efc-a18d-72bdfb6f4763-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 11 08:25:11 crc kubenswrapper[4881]: I1211 08:25:11.273435 4881 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/d3604568-9c08-4efc-a18d-72bdfb6f4763-service-ca\") on node \"crc\" DevicePath \"\"" Dec 11 08:25:11 crc kubenswrapper[4881]: I1211 08:25:11.279586 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d3604568-9c08-4efc-a18d-72bdfb6f4763-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "d3604568-9c08-4efc-a18d-72bdfb6f4763" (UID: "d3604568-9c08-4efc-a18d-72bdfb6f4763"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:25:11 crc kubenswrapper[4881]: I1211 08:25:11.279668 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d3604568-9c08-4efc-a18d-72bdfb6f4763-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "d3604568-9c08-4efc-a18d-72bdfb6f4763" (UID: "d3604568-9c08-4efc-a18d-72bdfb6f4763"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:25:11 crc kubenswrapper[4881]: I1211 08:25:11.280146 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d3604568-9c08-4efc-a18d-72bdfb6f4763-kube-api-access-q5gpq" (OuterVolumeSpecName: "kube-api-access-q5gpq") pod "d3604568-9c08-4efc-a18d-72bdfb6f4763" (UID: "d3604568-9c08-4efc-a18d-72bdfb6f4763"). InnerVolumeSpecName "kube-api-access-q5gpq". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:25:11 crc kubenswrapper[4881]: I1211 08:25:11.374688 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-q5gpq\" (UniqueName: \"kubernetes.io/projected/d3604568-9c08-4efc-a18d-72bdfb6f4763-kube-api-access-q5gpq\") on node \"crc\" DevicePath \"\"" Dec 11 08:25:11 crc kubenswrapper[4881]: I1211 08:25:11.374726 4881 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/d3604568-9c08-4efc-a18d-72bdfb6f4763-console-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 11 08:25:11 crc kubenswrapper[4881]: I1211 08:25:11.374735 4881 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/d3604568-9c08-4efc-a18d-72bdfb6f4763-console-oauth-config\") on node \"crc\" DevicePath \"\"" Dec 11 08:25:11 crc kubenswrapper[4881]: I1211 08:25:11.949536 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-7dc5585756-89sps_d3604568-9c08-4efc-a18d-72bdfb6f4763/console/0.log" Dec 11 08:25:11 crc kubenswrapper[4881]: I1211 08:25:11.949620 4881 generic.go:334] "Generic (PLEG): container finished" podID="d3604568-9c08-4efc-a18d-72bdfb6f4763" containerID="a3871c8a18213877103992909b597d13c6dc2ecc065b02d6004eb73db86e94ba" exitCode=2 Dec 11 08:25:11 crc kubenswrapper[4881]: I1211 08:25:11.949703 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-7dc5585756-89sps" Dec 11 08:25:11 crc kubenswrapper[4881]: I1211 08:25:11.949723 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-7dc5585756-89sps" event={"ID":"d3604568-9c08-4efc-a18d-72bdfb6f4763","Type":"ContainerDied","Data":"a3871c8a18213877103992909b597d13c6dc2ecc065b02d6004eb73db86e94ba"} Dec 11 08:25:11 crc kubenswrapper[4881]: I1211 08:25:11.949789 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-7dc5585756-89sps" event={"ID":"d3604568-9c08-4efc-a18d-72bdfb6f4763","Type":"ContainerDied","Data":"05e0c7c884f85ac002f8554bba843b483e886604183ef6e9659d31703ee275ee"} Dec 11 08:25:11 crc kubenswrapper[4881]: I1211 08:25:11.949826 4881 scope.go:117] "RemoveContainer" containerID="a3871c8a18213877103992909b597d13c6dc2ecc065b02d6004eb73db86e94ba" Dec 11 08:25:11 crc kubenswrapper[4881]: I1211 08:25:11.970240 4881 scope.go:117] "RemoveContainer" containerID="a3871c8a18213877103992909b597d13c6dc2ecc065b02d6004eb73db86e94ba" Dec 11 08:25:11 crc kubenswrapper[4881]: E1211 08:25:11.970657 4881 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a3871c8a18213877103992909b597d13c6dc2ecc065b02d6004eb73db86e94ba\": container with ID starting with a3871c8a18213877103992909b597d13c6dc2ecc065b02d6004eb73db86e94ba not found: ID does not exist" containerID="a3871c8a18213877103992909b597d13c6dc2ecc065b02d6004eb73db86e94ba" Dec 11 08:25:11 crc kubenswrapper[4881]: I1211 08:25:11.970683 4881 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a3871c8a18213877103992909b597d13c6dc2ecc065b02d6004eb73db86e94ba"} err="failed to get container status \"a3871c8a18213877103992909b597d13c6dc2ecc065b02d6004eb73db86e94ba\": rpc error: code = NotFound desc = could not find container \"a3871c8a18213877103992909b597d13c6dc2ecc065b02d6004eb73db86e94ba\": container with ID starting with a3871c8a18213877103992909b597d13c6dc2ecc065b02d6004eb73db86e94ba not found: ID does not exist" Dec 11 08:25:11 crc kubenswrapper[4881]: I1211 08:25:11.987146 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-7dc5585756-89sps"] Dec 11 08:25:11 crc kubenswrapper[4881]: I1211 08:25:11.994880 4881 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-console/console-7dc5585756-89sps"] Dec 11 08:25:13 crc kubenswrapper[4881]: I1211 08:25:13.021295 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d3604568-9c08-4efc-a18d-72bdfb6f4763" path="/var/lib/kubelet/pods/d3604568-9c08-4efc-a18d-72bdfb6f4763/volumes" Dec 11 08:25:29 crc kubenswrapper[4881]: I1211 08:25:29.397010 4881 patch_prober.go:28] interesting pod/machine-config-daemon-z9nnh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 11 08:25:29 crc kubenswrapper[4881]: I1211 08:25:29.397578 4881 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 11 08:25:57 crc kubenswrapper[4881]: I1211 08:25:57.710838 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210zj5zk"] Dec 11 08:25:57 crc kubenswrapper[4881]: E1211 08:25:57.711563 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d3604568-9c08-4efc-a18d-72bdfb6f4763" containerName="console" Dec 11 08:25:57 crc kubenswrapper[4881]: I1211 08:25:57.711580 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="d3604568-9c08-4efc-a18d-72bdfb6f4763" containerName="console" Dec 11 08:25:57 crc kubenswrapper[4881]: I1211 08:25:57.711711 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="d3604568-9c08-4efc-a18d-72bdfb6f4763" containerName="console" Dec 11 08:25:57 crc kubenswrapper[4881]: I1211 08:25:57.712703 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210zj5zk" Dec 11 08:25:57 crc kubenswrapper[4881]: I1211 08:25:57.714430 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Dec 11 08:25:57 crc kubenswrapper[4881]: I1211 08:25:57.719255 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210zj5zk"] Dec 11 08:25:57 crc kubenswrapper[4881]: I1211 08:25:57.802190 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f9prv\" (UniqueName: \"kubernetes.io/projected/61c61666-4cf4-40df-bc54-202ef93cf87d-kube-api-access-f9prv\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210zj5zk\" (UID: \"61c61666-4cf4-40df-bc54-202ef93cf87d\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210zj5zk" Dec 11 08:25:57 crc kubenswrapper[4881]: I1211 08:25:57.802287 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/61c61666-4cf4-40df-bc54-202ef93cf87d-util\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210zj5zk\" (UID: \"61c61666-4cf4-40df-bc54-202ef93cf87d\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210zj5zk" Dec 11 08:25:57 crc kubenswrapper[4881]: I1211 08:25:57.802376 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/61c61666-4cf4-40df-bc54-202ef93cf87d-bundle\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210zj5zk\" (UID: \"61c61666-4cf4-40df-bc54-202ef93cf87d\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210zj5zk" Dec 11 08:25:57 crc kubenswrapper[4881]: I1211 08:25:57.903987 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/61c61666-4cf4-40df-bc54-202ef93cf87d-util\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210zj5zk\" (UID: \"61c61666-4cf4-40df-bc54-202ef93cf87d\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210zj5zk" Dec 11 08:25:57 crc kubenswrapper[4881]: I1211 08:25:57.904068 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/61c61666-4cf4-40df-bc54-202ef93cf87d-bundle\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210zj5zk\" (UID: \"61c61666-4cf4-40df-bc54-202ef93cf87d\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210zj5zk" Dec 11 08:25:57 crc kubenswrapper[4881]: I1211 08:25:57.904139 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f9prv\" (UniqueName: \"kubernetes.io/projected/61c61666-4cf4-40df-bc54-202ef93cf87d-kube-api-access-f9prv\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210zj5zk\" (UID: \"61c61666-4cf4-40df-bc54-202ef93cf87d\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210zj5zk" Dec 11 08:25:57 crc kubenswrapper[4881]: I1211 08:25:57.904660 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/61c61666-4cf4-40df-bc54-202ef93cf87d-util\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210zj5zk\" (UID: \"61c61666-4cf4-40df-bc54-202ef93cf87d\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210zj5zk" Dec 11 08:25:57 crc kubenswrapper[4881]: I1211 08:25:57.904693 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/61c61666-4cf4-40df-bc54-202ef93cf87d-bundle\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210zj5zk\" (UID: \"61c61666-4cf4-40df-bc54-202ef93cf87d\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210zj5zk" Dec 11 08:25:57 crc kubenswrapper[4881]: I1211 08:25:57.926445 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f9prv\" (UniqueName: \"kubernetes.io/projected/61c61666-4cf4-40df-bc54-202ef93cf87d-kube-api-access-f9prv\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210zj5zk\" (UID: \"61c61666-4cf4-40df-bc54-202ef93cf87d\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210zj5zk" Dec 11 08:25:58 crc kubenswrapper[4881]: I1211 08:25:58.029063 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210zj5zk" Dec 11 08:25:58 crc kubenswrapper[4881]: I1211 08:25:58.271515 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210zj5zk"] Dec 11 08:25:59 crc kubenswrapper[4881]: I1211 08:25:59.267647 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210zj5zk" event={"ID":"61c61666-4cf4-40df-bc54-202ef93cf87d","Type":"ContainerStarted","Data":"a75f8361a46cfe38af7d61fe4c017eadbbe1285442bbccd39560b403e4ad7fcf"} Dec 11 08:25:59 crc kubenswrapper[4881]: I1211 08:25:59.267977 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210zj5zk" event={"ID":"61c61666-4cf4-40df-bc54-202ef93cf87d","Type":"ContainerStarted","Data":"d4efc3a41ed25bdbdbdd91d5122e67331b3fd30a9fc658a21f6026178ded08f2"} Dec 11 08:25:59 crc kubenswrapper[4881]: I1211 08:25:59.397706 4881 patch_prober.go:28] interesting pod/machine-config-daemon-z9nnh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 11 08:25:59 crc kubenswrapper[4881]: I1211 08:25:59.397784 4881 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 11 08:26:00 crc kubenswrapper[4881]: I1211 08:26:00.274454 4881 generic.go:334] "Generic (PLEG): container finished" podID="61c61666-4cf4-40df-bc54-202ef93cf87d" containerID="a75f8361a46cfe38af7d61fe4c017eadbbe1285442bbccd39560b403e4ad7fcf" exitCode=0 Dec 11 08:26:00 crc kubenswrapper[4881]: I1211 08:26:00.274552 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210zj5zk" event={"ID":"61c61666-4cf4-40df-bc54-202ef93cf87d","Type":"ContainerDied","Data":"a75f8361a46cfe38af7d61fe4c017eadbbe1285442bbccd39560b403e4ad7fcf"} Dec 11 08:26:00 crc kubenswrapper[4881]: I1211 08:26:00.277608 4881 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Dec 11 08:26:03 crc kubenswrapper[4881]: I1211 08:26:03.294943 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210zj5zk" event={"ID":"61c61666-4cf4-40df-bc54-202ef93cf87d","Type":"ContainerStarted","Data":"5e4c74714ce4fa8f3ed01d33b7d47635497aa2bf4714366078df2c940d7a1223"} Dec 11 08:26:04 crc kubenswrapper[4881]: I1211 08:26:04.302962 4881 generic.go:334] "Generic (PLEG): container finished" podID="61c61666-4cf4-40df-bc54-202ef93cf87d" containerID="5e4c74714ce4fa8f3ed01d33b7d47635497aa2bf4714366078df2c940d7a1223" exitCode=0 Dec 11 08:26:04 crc kubenswrapper[4881]: I1211 08:26:04.303033 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210zj5zk" event={"ID":"61c61666-4cf4-40df-bc54-202ef93cf87d","Type":"ContainerDied","Data":"5e4c74714ce4fa8f3ed01d33b7d47635497aa2bf4714366078df2c940d7a1223"} Dec 11 08:26:06 crc kubenswrapper[4881]: I1211 08:26:06.319199 4881 generic.go:334] "Generic (PLEG): container finished" podID="61c61666-4cf4-40df-bc54-202ef93cf87d" containerID="1bd3f3747df56b851726ac7d9e09ea765ce512ee089abee6f1ac426b8b31a3be" exitCode=0 Dec 11 08:26:06 crc kubenswrapper[4881]: I1211 08:26:06.319283 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210zj5zk" event={"ID":"61c61666-4cf4-40df-bc54-202ef93cf87d","Type":"ContainerDied","Data":"1bd3f3747df56b851726ac7d9e09ea765ce512ee089abee6f1ac426b8b31a3be"} Dec 11 08:26:07 crc kubenswrapper[4881]: I1211 08:26:07.692525 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210zj5zk" Dec 11 08:26:07 crc kubenswrapper[4881]: I1211 08:26:07.767470 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/61c61666-4cf4-40df-bc54-202ef93cf87d-util\") pod \"61c61666-4cf4-40df-bc54-202ef93cf87d\" (UID: \"61c61666-4cf4-40df-bc54-202ef93cf87d\") " Dec 11 08:26:07 crc kubenswrapper[4881]: I1211 08:26:07.767887 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/61c61666-4cf4-40df-bc54-202ef93cf87d-bundle\") pod \"61c61666-4cf4-40df-bc54-202ef93cf87d\" (UID: \"61c61666-4cf4-40df-bc54-202ef93cf87d\") " Dec 11 08:26:07 crc kubenswrapper[4881]: I1211 08:26:07.767992 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-f9prv\" (UniqueName: \"kubernetes.io/projected/61c61666-4cf4-40df-bc54-202ef93cf87d-kube-api-access-f9prv\") pod \"61c61666-4cf4-40df-bc54-202ef93cf87d\" (UID: \"61c61666-4cf4-40df-bc54-202ef93cf87d\") " Dec 11 08:26:07 crc kubenswrapper[4881]: I1211 08:26:07.770236 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/61c61666-4cf4-40df-bc54-202ef93cf87d-bundle" (OuterVolumeSpecName: "bundle") pod "61c61666-4cf4-40df-bc54-202ef93cf87d" (UID: "61c61666-4cf4-40df-bc54-202ef93cf87d"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 08:26:07 crc kubenswrapper[4881]: I1211 08:26:07.778649 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/61c61666-4cf4-40df-bc54-202ef93cf87d-kube-api-access-f9prv" (OuterVolumeSpecName: "kube-api-access-f9prv") pod "61c61666-4cf4-40df-bc54-202ef93cf87d" (UID: "61c61666-4cf4-40df-bc54-202ef93cf87d"). InnerVolumeSpecName "kube-api-access-f9prv". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:26:07 crc kubenswrapper[4881]: I1211 08:26:07.785308 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/61c61666-4cf4-40df-bc54-202ef93cf87d-util" (OuterVolumeSpecName: "util") pod "61c61666-4cf4-40df-bc54-202ef93cf87d" (UID: "61c61666-4cf4-40df-bc54-202ef93cf87d"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 08:26:07 crc kubenswrapper[4881]: I1211 08:26:07.870950 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-f9prv\" (UniqueName: \"kubernetes.io/projected/61c61666-4cf4-40df-bc54-202ef93cf87d-kube-api-access-f9prv\") on node \"crc\" DevicePath \"\"" Dec 11 08:26:07 crc kubenswrapper[4881]: I1211 08:26:07.871019 4881 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/61c61666-4cf4-40df-bc54-202ef93cf87d-util\") on node \"crc\" DevicePath \"\"" Dec 11 08:26:07 crc kubenswrapper[4881]: I1211 08:26:07.871037 4881 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/61c61666-4cf4-40df-bc54-202ef93cf87d-bundle\") on node \"crc\" DevicePath \"\"" Dec 11 08:26:08 crc kubenswrapper[4881]: I1211 08:26:08.332934 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210zj5zk" event={"ID":"61c61666-4cf4-40df-bc54-202ef93cf87d","Type":"ContainerDied","Data":"d4efc3a41ed25bdbdbdd91d5122e67331b3fd30a9fc658a21f6026178ded08f2"} Dec 11 08:26:08 crc kubenswrapper[4881]: I1211 08:26:08.332974 4881 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d4efc3a41ed25bdbdbdd91d5122e67331b3fd30a9fc658a21f6026178ded08f2" Dec 11 08:26:08 crc kubenswrapper[4881]: I1211 08:26:08.333057 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210zj5zk" Dec 11 08:26:08 crc kubenswrapper[4881]: I1211 08:26:08.955733 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-wf8q8"] Dec 11 08:26:08 crc kubenswrapper[4881]: I1211 08:26:08.956281 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-wf8q8" podUID="f14cc110-e74f-4cb7-a998-041e3f9b537b" containerName="ovn-controller" containerID="cri-o://5b8e9dda68edf7ec8848da4eb851807ad0dc10a086fb6129e3a22f9a35e368ea" gracePeriod=30 Dec 11 08:26:08 crc kubenswrapper[4881]: I1211 08:26:08.956369 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-wf8q8" podUID="f14cc110-e74f-4cb7-a998-041e3f9b537b" containerName="nbdb" containerID="cri-o://c23a70dbe6b32be7c8a0c3799f7dd2323f0dcc86d7d58ee3e140cda4ffbb03f0" gracePeriod=30 Dec 11 08:26:08 crc kubenswrapper[4881]: I1211 08:26:08.956413 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-wf8q8" podUID="f14cc110-e74f-4cb7-a998-041e3f9b537b" containerName="kube-rbac-proxy-ovn-metrics" containerID="cri-o://3715d1eacbf7a843b7a1a0e83dee159ddbe5bf62812e94bf5f74f4677da2617b" gracePeriod=30 Dec 11 08:26:08 crc kubenswrapper[4881]: I1211 08:26:08.956500 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-wf8q8" podUID="f14cc110-e74f-4cb7-a998-041e3f9b537b" containerName="northd" containerID="cri-o://6d8574de8e2431b2aca7954b9a0498e353c00d982b4384c1f198b2099fe527a6" gracePeriod=30 Dec 11 08:26:08 crc kubenswrapper[4881]: I1211 08:26:08.956540 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-wf8q8" podUID="f14cc110-e74f-4cb7-a998-041e3f9b537b" containerName="ovn-acl-logging" containerID="cri-o://5a0cb039f4c38da8b633dfcd2e0d23afbc3607b41f04d3c230aee5f86cba79b5" gracePeriod=30 Dec 11 08:26:08 crc kubenswrapper[4881]: I1211 08:26:08.956768 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-wf8q8" podUID="f14cc110-e74f-4cb7-a998-041e3f9b537b" containerName="kube-rbac-proxy-node" containerID="cri-o://3fece2162ea78bf051c94f0f8adb0b194d05e5d3c5c25cb4e3674096d1204079" gracePeriod=30 Dec 11 08:26:08 crc kubenswrapper[4881]: I1211 08:26:08.956879 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-wf8q8" podUID="f14cc110-e74f-4cb7-a998-041e3f9b537b" containerName="sbdb" containerID="cri-o://88bd335385bc696076b9b2719bfbd27b294003397c545f24ee2c50cd854fcc28" gracePeriod=30 Dec 11 08:26:08 crc kubenswrapper[4881]: I1211 08:26:08.988360 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-wf8q8" podUID="f14cc110-e74f-4cb7-a998-041e3f9b537b" containerName="ovnkube-controller" containerID="cri-o://948f85b80bf82b9419418419a5b8d071a585911d9e4ac3cd9122fe83ee5836e9" gracePeriod=30 Dec 11 08:26:10 crc kubenswrapper[4881]: I1211 08:26:10.354988 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-wf8q8_f14cc110-e74f-4cb7-a998-041e3f9b537b/ovnkube-controller/3.log" Dec 11 08:26:10 crc kubenswrapper[4881]: I1211 08:26:10.359416 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-wf8q8_f14cc110-e74f-4cb7-a998-041e3f9b537b/ovn-acl-logging/0.log" Dec 11 08:26:10 crc kubenswrapper[4881]: I1211 08:26:10.360897 4881 generic.go:334] "Generic (PLEG): container finished" podID="f14cc110-e74f-4cb7-a998-041e3f9b537b" containerID="5a0cb039f4c38da8b633dfcd2e0d23afbc3607b41f04d3c230aee5f86cba79b5" exitCode=143 Dec 11 08:26:10 crc kubenswrapper[4881]: I1211 08:26:10.360957 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wf8q8" event={"ID":"f14cc110-e74f-4cb7-a998-041e3f9b537b","Type":"ContainerDied","Data":"5a0cb039f4c38da8b633dfcd2e0d23afbc3607b41f04d3c230aee5f86cba79b5"} Dec 11 08:26:11 crc kubenswrapper[4881]: E1211 08:26:11.889016 4881 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="88bd335385bc696076b9b2719bfbd27b294003397c545f24ee2c50cd854fcc28" cmd=["/bin/bash","-c","set -xeo pipefail\n. /ovnkube-lib/ovnkube-lib.sh || exit 1\novndb-readiness-probe \"sb\"\n"] Dec 11 08:26:11 crc kubenswrapper[4881]: E1211 08:26:11.889031 4881 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="c23a70dbe6b32be7c8a0c3799f7dd2323f0dcc86d7d58ee3e140cda4ffbb03f0" cmd=["/bin/bash","-c","set -xeo pipefail\n. /ovnkube-lib/ovnkube-lib.sh || exit 1\novndb-readiness-probe \"nb\"\n"] Dec 11 08:26:11 crc kubenswrapper[4881]: E1211 08:26:11.891693 4881 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="88bd335385bc696076b9b2719bfbd27b294003397c545f24ee2c50cd854fcc28" cmd=["/bin/bash","-c","set -xeo pipefail\n. /ovnkube-lib/ovnkube-lib.sh || exit 1\novndb-readiness-probe \"sb\"\n"] Dec 11 08:26:11 crc kubenswrapper[4881]: E1211 08:26:11.891876 4881 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="c23a70dbe6b32be7c8a0c3799f7dd2323f0dcc86d7d58ee3e140cda4ffbb03f0" cmd=["/bin/bash","-c","set -xeo pipefail\n. /ovnkube-lib/ovnkube-lib.sh || exit 1\novndb-readiness-probe \"nb\"\n"] Dec 11 08:26:11 crc kubenswrapper[4881]: E1211 08:26:11.893888 4881 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="c23a70dbe6b32be7c8a0c3799f7dd2323f0dcc86d7d58ee3e140cda4ffbb03f0" cmd=["/bin/bash","-c","set -xeo pipefail\n. /ovnkube-lib/ovnkube-lib.sh || exit 1\novndb-readiness-probe \"nb\"\n"] Dec 11 08:26:11 crc kubenswrapper[4881]: E1211 08:26:11.893971 4881 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openshift-ovn-kubernetes/ovnkube-node-wf8q8" podUID="f14cc110-e74f-4cb7-a998-041e3f9b537b" containerName="nbdb" Dec 11 08:26:11 crc kubenswrapper[4881]: E1211 08:26:11.893904 4881 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="88bd335385bc696076b9b2719bfbd27b294003397c545f24ee2c50cd854fcc28" cmd=["/bin/bash","-c","set -xeo pipefail\n. /ovnkube-lib/ovnkube-lib.sh || exit 1\novndb-readiness-probe \"sb\"\n"] Dec 11 08:26:11 crc kubenswrapper[4881]: E1211 08:26:11.894107 4881 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openshift-ovn-kubernetes/ovnkube-node-wf8q8" podUID="f14cc110-e74f-4cb7-a998-041e3f9b537b" containerName="sbdb" Dec 11 08:26:13 crc kubenswrapper[4881]: I1211 08:26:13.379736 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-wf8q8_f14cc110-e74f-4cb7-a998-041e3f9b537b/ovnkube-controller/3.log" Dec 11 08:26:13 crc kubenswrapper[4881]: I1211 08:26:13.381928 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-wf8q8_f14cc110-e74f-4cb7-a998-041e3f9b537b/ovn-acl-logging/0.log" Dec 11 08:26:13 crc kubenswrapper[4881]: I1211 08:26:13.382390 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-wf8q8_f14cc110-e74f-4cb7-a998-041e3f9b537b/ovn-controller/0.log" Dec 11 08:26:13 crc kubenswrapper[4881]: I1211 08:26:13.382735 4881 generic.go:334] "Generic (PLEG): container finished" podID="f14cc110-e74f-4cb7-a998-041e3f9b537b" containerID="948f85b80bf82b9419418419a5b8d071a585911d9e4ac3cd9122fe83ee5836e9" exitCode=0 Dec 11 08:26:13 crc kubenswrapper[4881]: I1211 08:26:13.382769 4881 generic.go:334] "Generic (PLEG): container finished" podID="f14cc110-e74f-4cb7-a998-041e3f9b537b" containerID="88bd335385bc696076b9b2719bfbd27b294003397c545f24ee2c50cd854fcc28" exitCode=0 Dec 11 08:26:13 crc kubenswrapper[4881]: I1211 08:26:13.382779 4881 generic.go:334] "Generic (PLEG): container finished" podID="f14cc110-e74f-4cb7-a998-041e3f9b537b" containerID="c23a70dbe6b32be7c8a0c3799f7dd2323f0dcc86d7d58ee3e140cda4ffbb03f0" exitCode=0 Dec 11 08:26:13 crc kubenswrapper[4881]: I1211 08:26:13.382788 4881 generic.go:334] "Generic (PLEG): container finished" podID="f14cc110-e74f-4cb7-a998-041e3f9b537b" containerID="6d8574de8e2431b2aca7954b9a0498e353c00d982b4384c1f198b2099fe527a6" exitCode=0 Dec 11 08:26:13 crc kubenswrapper[4881]: I1211 08:26:13.382800 4881 generic.go:334] "Generic (PLEG): container finished" podID="f14cc110-e74f-4cb7-a998-041e3f9b537b" containerID="3715d1eacbf7a843b7a1a0e83dee159ddbe5bf62812e94bf5f74f4677da2617b" exitCode=0 Dec 11 08:26:13 crc kubenswrapper[4881]: I1211 08:26:13.382811 4881 generic.go:334] "Generic (PLEG): container finished" podID="f14cc110-e74f-4cb7-a998-041e3f9b537b" containerID="3fece2162ea78bf051c94f0f8adb0b194d05e5d3c5c25cb4e3674096d1204079" exitCode=0 Dec 11 08:26:13 crc kubenswrapper[4881]: I1211 08:26:13.382820 4881 generic.go:334] "Generic (PLEG): container finished" podID="f14cc110-e74f-4cb7-a998-041e3f9b537b" containerID="5b8e9dda68edf7ec8848da4eb851807ad0dc10a086fb6129e3a22f9a35e368ea" exitCode=143 Dec 11 08:26:13 crc kubenswrapper[4881]: I1211 08:26:13.382866 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wf8q8" event={"ID":"f14cc110-e74f-4cb7-a998-041e3f9b537b","Type":"ContainerDied","Data":"948f85b80bf82b9419418419a5b8d071a585911d9e4ac3cd9122fe83ee5836e9"} Dec 11 08:26:13 crc kubenswrapper[4881]: I1211 08:26:13.382897 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wf8q8" event={"ID":"f14cc110-e74f-4cb7-a998-041e3f9b537b","Type":"ContainerDied","Data":"88bd335385bc696076b9b2719bfbd27b294003397c545f24ee2c50cd854fcc28"} Dec 11 08:26:13 crc kubenswrapper[4881]: I1211 08:26:13.382912 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wf8q8" event={"ID":"f14cc110-e74f-4cb7-a998-041e3f9b537b","Type":"ContainerDied","Data":"c23a70dbe6b32be7c8a0c3799f7dd2323f0dcc86d7d58ee3e140cda4ffbb03f0"} Dec 11 08:26:13 crc kubenswrapper[4881]: I1211 08:26:13.382925 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wf8q8" event={"ID":"f14cc110-e74f-4cb7-a998-041e3f9b537b","Type":"ContainerDied","Data":"6d8574de8e2431b2aca7954b9a0498e353c00d982b4384c1f198b2099fe527a6"} Dec 11 08:26:13 crc kubenswrapper[4881]: I1211 08:26:13.382939 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wf8q8" event={"ID":"f14cc110-e74f-4cb7-a998-041e3f9b537b","Type":"ContainerDied","Data":"3715d1eacbf7a843b7a1a0e83dee159ddbe5bf62812e94bf5f74f4677da2617b"} Dec 11 08:26:13 crc kubenswrapper[4881]: I1211 08:26:13.382953 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wf8q8" event={"ID":"f14cc110-e74f-4cb7-a998-041e3f9b537b","Type":"ContainerDied","Data":"3fece2162ea78bf051c94f0f8adb0b194d05e5d3c5c25cb4e3674096d1204079"} Dec 11 08:26:13 crc kubenswrapper[4881]: I1211 08:26:13.382965 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wf8q8" event={"ID":"f14cc110-e74f-4cb7-a998-041e3f9b537b","Type":"ContainerDied","Data":"5b8e9dda68edf7ec8848da4eb851807ad0dc10a086fb6129e3a22f9a35e368ea"} Dec 11 08:26:13 crc kubenswrapper[4881]: I1211 08:26:13.382984 4881 scope.go:117] "RemoveContainer" containerID="4613d67b59382894af01ae31e8f1a60355dd4362ea6499cdc10741d2abfd078b" Dec 11 08:26:13 crc kubenswrapper[4881]: I1211 08:26:13.385708 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-g8jhd_368e635e-0e63-4202-b9e4-4a3a85c6f30c/kube-multus/2.log" Dec 11 08:26:13 crc kubenswrapper[4881]: I1211 08:26:13.386251 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-g8jhd_368e635e-0e63-4202-b9e4-4a3a85c6f30c/kube-multus/1.log" Dec 11 08:26:13 crc kubenswrapper[4881]: I1211 08:26:13.386322 4881 generic.go:334] "Generic (PLEG): container finished" podID="368e635e-0e63-4202-b9e4-4a3a85c6f30c" containerID="021945edb3416828d6a387f2de7474bbde198cdef1eb1a9aea5de0cd3699a72a" exitCode=2 Dec 11 08:26:13 crc kubenswrapper[4881]: I1211 08:26:13.386375 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-g8jhd" event={"ID":"368e635e-0e63-4202-b9e4-4a3a85c6f30c","Type":"ContainerDied","Data":"021945edb3416828d6a387f2de7474bbde198cdef1eb1a9aea5de0cd3699a72a"} Dec 11 08:26:13 crc kubenswrapper[4881]: I1211 08:26:13.386893 4881 scope.go:117] "RemoveContainer" containerID="021945edb3416828d6a387f2de7474bbde198cdef1eb1a9aea5de0cd3699a72a" Dec 11 08:26:13 crc kubenswrapper[4881]: E1211 08:26:13.387123 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CrashLoopBackOff: \"back-off 20s restarting failed container=kube-multus pod=multus-g8jhd_openshift-multus(368e635e-0e63-4202-b9e4-4a3a85c6f30c)\"" pod="openshift-multus/multus-g8jhd" podUID="368e635e-0e63-4202-b9e4-4a3a85c6f30c" Dec 11 08:26:13 crc kubenswrapper[4881]: I1211 08:26:13.665833 4881 scope.go:117] "RemoveContainer" containerID="472f02e542c67bbd11145db9b59f2bae1dc688d45e95099b17a33fa1e27dbac8" Dec 11 08:26:13 crc kubenswrapper[4881]: I1211 08:26:13.852461 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-wf8q8_f14cc110-e74f-4cb7-a998-041e3f9b537b/ovn-acl-logging/0.log" Dec 11 08:26:13 crc kubenswrapper[4881]: I1211 08:26:13.852906 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-wf8q8_f14cc110-e74f-4cb7-a998-041e3f9b537b/ovn-controller/0.log" Dec 11 08:26:13 crc kubenswrapper[4881]: I1211 08:26:13.853278 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-wf8q8" Dec 11 08:26:13 crc kubenswrapper[4881]: I1211 08:26:13.897942 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-jn7gc"] Dec 11 08:26:13 crc kubenswrapper[4881]: E1211 08:26:13.898203 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f14cc110-e74f-4cb7-a998-041e3f9b537b" containerName="ovnkube-controller" Dec 11 08:26:13 crc kubenswrapper[4881]: I1211 08:26:13.898224 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="f14cc110-e74f-4cb7-a998-041e3f9b537b" containerName="ovnkube-controller" Dec 11 08:26:13 crc kubenswrapper[4881]: E1211 08:26:13.898239 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f14cc110-e74f-4cb7-a998-041e3f9b537b" containerName="sbdb" Dec 11 08:26:13 crc kubenswrapper[4881]: I1211 08:26:13.898247 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="f14cc110-e74f-4cb7-a998-041e3f9b537b" containerName="sbdb" Dec 11 08:26:13 crc kubenswrapper[4881]: E1211 08:26:13.898254 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f14cc110-e74f-4cb7-a998-041e3f9b537b" containerName="ovnkube-controller" Dec 11 08:26:13 crc kubenswrapper[4881]: I1211 08:26:13.898261 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="f14cc110-e74f-4cb7-a998-041e3f9b537b" containerName="ovnkube-controller" Dec 11 08:26:13 crc kubenswrapper[4881]: E1211 08:26:13.898273 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f14cc110-e74f-4cb7-a998-041e3f9b537b" containerName="ovnkube-controller" Dec 11 08:26:13 crc kubenswrapper[4881]: I1211 08:26:13.898281 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="f14cc110-e74f-4cb7-a998-041e3f9b537b" containerName="ovnkube-controller" Dec 11 08:26:13 crc kubenswrapper[4881]: E1211 08:26:13.898296 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f14cc110-e74f-4cb7-a998-041e3f9b537b" containerName="ovnkube-controller" Dec 11 08:26:13 crc kubenswrapper[4881]: I1211 08:26:13.898304 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="f14cc110-e74f-4cb7-a998-041e3f9b537b" containerName="ovnkube-controller" Dec 11 08:26:13 crc kubenswrapper[4881]: E1211 08:26:13.898314 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f14cc110-e74f-4cb7-a998-041e3f9b537b" containerName="kube-rbac-proxy-node" Dec 11 08:26:13 crc kubenswrapper[4881]: I1211 08:26:13.898322 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="f14cc110-e74f-4cb7-a998-041e3f9b537b" containerName="kube-rbac-proxy-node" Dec 11 08:26:13 crc kubenswrapper[4881]: E1211 08:26:13.898353 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f14cc110-e74f-4cb7-a998-041e3f9b537b" containerName="ovn-controller" Dec 11 08:26:13 crc kubenswrapper[4881]: I1211 08:26:13.898364 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="f14cc110-e74f-4cb7-a998-041e3f9b537b" containerName="ovn-controller" Dec 11 08:26:13 crc kubenswrapper[4881]: E1211 08:26:13.898375 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="61c61666-4cf4-40df-bc54-202ef93cf87d" containerName="pull" Dec 11 08:26:13 crc kubenswrapper[4881]: I1211 08:26:13.898383 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="61c61666-4cf4-40df-bc54-202ef93cf87d" containerName="pull" Dec 11 08:26:13 crc kubenswrapper[4881]: E1211 08:26:13.898392 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f14cc110-e74f-4cb7-a998-041e3f9b537b" containerName="kubecfg-setup" Dec 11 08:26:13 crc kubenswrapper[4881]: I1211 08:26:13.898399 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="f14cc110-e74f-4cb7-a998-041e3f9b537b" containerName="kubecfg-setup" Dec 11 08:26:13 crc kubenswrapper[4881]: E1211 08:26:13.898411 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f14cc110-e74f-4cb7-a998-041e3f9b537b" containerName="nbdb" Dec 11 08:26:13 crc kubenswrapper[4881]: I1211 08:26:13.898418 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="f14cc110-e74f-4cb7-a998-041e3f9b537b" containerName="nbdb" Dec 11 08:26:13 crc kubenswrapper[4881]: E1211 08:26:13.898429 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f14cc110-e74f-4cb7-a998-041e3f9b537b" containerName="northd" Dec 11 08:26:13 crc kubenswrapper[4881]: I1211 08:26:13.898435 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="f14cc110-e74f-4cb7-a998-041e3f9b537b" containerName="northd" Dec 11 08:26:13 crc kubenswrapper[4881]: E1211 08:26:13.898446 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="61c61666-4cf4-40df-bc54-202ef93cf87d" containerName="extract" Dec 11 08:26:13 crc kubenswrapper[4881]: I1211 08:26:13.898452 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="61c61666-4cf4-40df-bc54-202ef93cf87d" containerName="extract" Dec 11 08:26:13 crc kubenswrapper[4881]: E1211 08:26:13.898464 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f14cc110-e74f-4cb7-a998-041e3f9b537b" containerName="kube-rbac-proxy-ovn-metrics" Dec 11 08:26:13 crc kubenswrapper[4881]: I1211 08:26:13.898472 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="f14cc110-e74f-4cb7-a998-041e3f9b537b" containerName="kube-rbac-proxy-ovn-metrics" Dec 11 08:26:13 crc kubenswrapper[4881]: E1211 08:26:13.898483 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="61c61666-4cf4-40df-bc54-202ef93cf87d" containerName="util" Dec 11 08:26:13 crc kubenswrapper[4881]: I1211 08:26:13.898489 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="61c61666-4cf4-40df-bc54-202ef93cf87d" containerName="util" Dec 11 08:26:13 crc kubenswrapper[4881]: E1211 08:26:13.898497 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f14cc110-e74f-4cb7-a998-041e3f9b537b" containerName="ovn-acl-logging" Dec 11 08:26:13 crc kubenswrapper[4881]: I1211 08:26:13.898504 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="f14cc110-e74f-4cb7-a998-041e3f9b537b" containerName="ovn-acl-logging" Dec 11 08:26:13 crc kubenswrapper[4881]: I1211 08:26:13.898624 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="f14cc110-e74f-4cb7-a998-041e3f9b537b" containerName="ovnkube-controller" Dec 11 08:26:13 crc kubenswrapper[4881]: I1211 08:26:13.898636 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="f14cc110-e74f-4cb7-a998-041e3f9b537b" containerName="nbdb" Dec 11 08:26:13 crc kubenswrapper[4881]: I1211 08:26:13.898648 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="f14cc110-e74f-4cb7-a998-041e3f9b537b" containerName="ovnkube-controller" Dec 11 08:26:13 crc kubenswrapper[4881]: I1211 08:26:13.898659 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="f14cc110-e74f-4cb7-a998-041e3f9b537b" containerName="kube-rbac-proxy-ovn-metrics" Dec 11 08:26:13 crc kubenswrapper[4881]: I1211 08:26:13.898668 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="f14cc110-e74f-4cb7-a998-041e3f9b537b" containerName="ovnkube-controller" Dec 11 08:26:13 crc kubenswrapper[4881]: I1211 08:26:13.898677 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="f14cc110-e74f-4cb7-a998-041e3f9b537b" containerName="kube-rbac-proxy-node" Dec 11 08:26:13 crc kubenswrapper[4881]: I1211 08:26:13.898685 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="61c61666-4cf4-40df-bc54-202ef93cf87d" containerName="extract" Dec 11 08:26:13 crc kubenswrapper[4881]: I1211 08:26:13.898693 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="f14cc110-e74f-4cb7-a998-041e3f9b537b" containerName="northd" Dec 11 08:26:13 crc kubenswrapper[4881]: I1211 08:26:13.898701 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="f14cc110-e74f-4cb7-a998-041e3f9b537b" containerName="ovn-acl-logging" Dec 11 08:26:13 crc kubenswrapper[4881]: I1211 08:26:13.898716 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="f14cc110-e74f-4cb7-a998-041e3f9b537b" containerName="sbdb" Dec 11 08:26:13 crc kubenswrapper[4881]: I1211 08:26:13.898727 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="f14cc110-e74f-4cb7-a998-041e3f9b537b" containerName="ovn-controller" Dec 11 08:26:13 crc kubenswrapper[4881]: E1211 08:26:13.899015 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f14cc110-e74f-4cb7-a998-041e3f9b537b" containerName="ovnkube-controller" Dec 11 08:26:13 crc kubenswrapper[4881]: I1211 08:26:13.899029 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="f14cc110-e74f-4cb7-a998-041e3f9b537b" containerName="ovnkube-controller" Dec 11 08:26:13 crc kubenswrapper[4881]: I1211 08:26:13.899166 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="f14cc110-e74f-4cb7-a998-041e3f9b537b" containerName="ovnkube-controller" Dec 11 08:26:13 crc kubenswrapper[4881]: I1211 08:26:13.899179 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="f14cc110-e74f-4cb7-a998-041e3f9b537b" containerName="ovnkube-controller" Dec 11 08:26:13 crc kubenswrapper[4881]: I1211 08:26:13.901937 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-jn7gc" Dec 11 08:26:13 crc kubenswrapper[4881]: I1211 08:26:13.986540 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8rjwc\" (UniqueName: \"kubernetes.io/projected/f14cc110-e74f-4cb7-a998-041e3f9b537b-kube-api-access-8rjwc\") pod \"f14cc110-e74f-4cb7-a998-041e3f9b537b\" (UID: \"f14cc110-e74f-4cb7-a998-041e3f9b537b\") " Dec 11 08:26:13 crc kubenswrapper[4881]: I1211 08:26:13.986588 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/f14cc110-e74f-4cb7-a998-041e3f9b537b-host-cni-netd\") pod \"f14cc110-e74f-4cb7-a998-041e3f9b537b\" (UID: \"f14cc110-e74f-4cb7-a998-041e3f9b537b\") " Dec 11 08:26:13 crc kubenswrapper[4881]: I1211 08:26:13.986620 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/f14cc110-e74f-4cb7-a998-041e3f9b537b-ovnkube-script-lib\") pod \"f14cc110-e74f-4cb7-a998-041e3f9b537b\" (UID: \"f14cc110-e74f-4cb7-a998-041e3f9b537b\") " Dec 11 08:26:13 crc kubenswrapper[4881]: I1211 08:26:13.986637 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/f14cc110-e74f-4cb7-a998-041e3f9b537b-log-socket\") pod \"f14cc110-e74f-4cb7-a998-041e3f9b537b\" (UID: \"f14cc110-e74f-4cb7-a998-041e3f9b537b\") " Dec 11 08:26:13 crc kubenswrapper[4881]: I1211 08:26:13.986665 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/f14cc110-e74f-4cb7-a998-041e3f9b537b-systemd-units\") pod \"f14cc110-e74f-4cb7-a998-041e3f9b537b\" (UID: \"f14cc110-e74f-4cb7-a998-041e3f9b537b\") " Dec 11 08:26:13 crc kubenswrapper[4881]: I1211 08:26:13.986677 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f14cc110-e74f-4cb7-a998-041e3f9b537b-host-cni-netd" (OuterVolumeSpecName: "host-cni-netd") pod "f14cc110-e74f-4cb7-a998-041e3f9b537b" (UID: "f14cc110-e74f-4cb7-a998-041e3f9b537b"). InnerVolumeSpecName "host-cni-netd". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 11 08:26:13 crc kubenswrapper[4881]: I1211 08:26:13.986696 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/f14cc110-e74f-4cb7-a998-041e3f9b537b-host-slash\") pod \"f14cc110-e74f-4cb7-a998-041e3f9b537b\" (UID: \"f14cc110-e74f-4cb7-a998-041e3f9b537b\") " Dec 11 08:26:13 crc kubenswrapper[4881]: I1211 08:26:13.986740 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f14cc110-e74f-4cb7-a998-041e3f9b537b-host-slash" (OuterVolumeSpecName: "host-slash") pod "f14cc110-e74f-4cb7-a998-041e3f9b537b" (UID: "f14cc110-e74f-4cb7-a998-041e3f9b537b"). InnerVolumeSpecName "host-slash". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 11 08:26:13 crc kubenswrapper[4881]: I1211 08:26:13.986804 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/f14cc110-e74f-4cb7-a998-041e3f9b537b-host-cni-bin\") pod \"f14cc110-e74f-4cb7-a998-041e3f9b537b\" (UID: \"f14cc110-e74f-4cb7-a998-041e3f9b537b\") " Dec 11 08:26:13 crc kubenswrapper[4881]: I1211 08:26:13.986838 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/f14cc110-e74f-4cb7-a998-041e3f9b537b-run-openvswitch\") pod \"f14cc110-e74f-4cb7-a998-041e3f9b537b\" (UID: \"f14cc110-e74f-4cb7-a998-041e3f9b537b\") " Dec 11 08:26:13 crc kubenswrapper[4881]: I1211 08:26:13.986865 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/f14cc110-e74f-4cb7-a998-041e3f9b537b-host-run-netns\") pod \"f14cc110-e74f-4cb7-a998-041e3f9b537b\" (UID: \"f14cc110-e74f-4cb7-a998-041e3f9b537b\") " Dec 11 08:26:13 crc kubenswrapper[4881]: I1211 08:26:13.986887 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/f14cc110-e74f-4cb7-a998-041e3f9b537b-host-run-ovn-kubernetes\") pod \"f14cc110-e74f-4cb7-a998-041e3f9b537b\" (UID: \"f14cc110-e74f-4cb7-a998-041e3f9b537b\") " Dec 11 08:26:13 crc kubenswrapper[4881]: I1211 08:26:13.986931 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/f14cc110-e74f-4cb7-a998-041e3f9b537b-var-lib-openvswitch\") pod \"f14cc110-e74f-4cb7-a998-041e3f9b537b\" (UID: \"f14cc110-e74f-4cb7-a998-041e3f9b537b\") " Dec 11 08:26:13 crc kubenswrapper[4881]: I1211 08:26:13.986954 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/f14cc110-e74f-4cb7-a998-041e3f9b537b-ovn-node-metrics-cert\") pod \"f14cc110-e74f-4cb7-a998-041e3f9b537b\" (UID: \"f14cc110-e74f-4cb7-a998-041e3f9b537b\") " Dec 11 08:26:13 crc kubenswrapper[4881]: I1211 08:26:13.986977 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/f14cc110-e74f-4cb7-a998-041e3f9b537b-etc-openvswitch\") pod \"f14cc110-e74f-4cb7-a998-041e3f9b537b\" (UID: \"f14cc110-e74f-4cb7-a998-041e3f9b537b\") " Dec 11 08:26:13 crc kubenswrapper[4881]: I1211 08:26:13.987001 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/f14cc110-e74f-4cb7-a998-041e3f9b537b-ovnkube-config\") pod \"f14cc110-e74f-4cb7-a998-041e3f9b537b\" (UID: \"f14cc110-e74f-4cb7-a998-041e3f9b537b\") " Dec 11 08:26:13 crc kubenswrapper[4881]: I1211 08:26:13.987023 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/f14cc110-e74f-4cb7-a998-041e3f9b537b-run-ovn\") pod \"f14cc110-e74f-4cb7-a998-041e3f9b537b\" (UID: \"f14cc110-e74f-4cb7-a998-041e3f9b537b\") " Dec 11 08:26:13 crc kubenswrapper[4881]: I1211 08:26:13.987054 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/f14cc110-e74f-4cb7-a998-041e3f9b537b-run-systemd\") pod \"f14cc110-e74f-4cb7-a998-041e3f9b537b\" (UID: \"f14cc110-e74f-4cb7-a998-041e3f9b537b\") " Dec 11 08:26:13 crc kubenswrapper[4881]: I1211 08:26:13.987078 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/f14cc110-e74f-4cb7-a998-041e3f9b537b-host-var-lib-cni-networks-ovn-kubernetes\") pod \"f14cc110-e74f-4cb7-a998-041e3f9b537b\" (UID: \"f14cc110-e74f-4cb7-a998-041e3f9b537b\") " Dec 11 08:26:13 crc kubenswrapper[4881]: I1211 08:26:13.987114 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/f14cc110-e74f-4cb7-a998-041e3f9b537b-host-kubelet\") pod \"f14cc110-e74f-4cb7-a998-041e3f9b537b\" (UID: \"f14cc110-e74f-4cb7-a998-041e3f9b537b\") " Dec 11 08:26:13 crc kubenswrapper[4881]: I1211 08:26:13.987120 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f14cc110-e74f-4cb7-a998-041e3f9b537b-ovnkube-script-lib" (OuterVolumeSpecName: "ovnkube-script-lib") pod "f14cc110-e74f-4cb7-a998-041e3f9b537b" (UID: "f14cc110-e74f-4cb7-a998-041e3f9b537b"). InnerVolumeSpecName "ovnkube-script-lib". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:26:13 crc kubenswrapper[4881]: I1211 08:26:13.987151 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/f14cc110-e74f-4cb7-a998-041e3f9b537b-node-log\") pod \"f14cc110-e74f-4cb7-a998-041e3f9b537b\" (UID: \"f14cc110-e74f-4cb7-a998-041e3f9b537b\") " Dec 11 08:26:13 crc kubenswrapper[4881]: I1211 08:26:13.987175 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/f14cc110-e74f-4cb7-a998-041e3f9b537b-env-overrides\") pod \"f14cc110-e74f-4cb7-a998-041e3f9b537b\" (UID: \"f14cc110-e74f-4cb7-a998-041e3f9b537b\") " Dec 11 08:26:13 crc kubenswrapper[4881]: I1211 08:26:13.987328 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/613e1b2f-2f51-4f0e-819e-0e1a08392a0d-systemd-units\") pod \"ovnkube-node-jn7gc\" (UID: \"613e1b2f-2f51-4f0e-819e-0e1a08392a0d\") " pod="openshift-ovn-kubernetes/ovnkube-node-jn7gc" Dec 11 08:26:13 crc kubenswrapper[4881]: I1211 08:26:13.987374 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/613e1b2f-2f51-4f0e-819e-0e1a08392a0d-run-openvswitch\") pod \"ovnkube-node-jn7gc\" (UID: \"613e1b2f-2f51-4f0e-819e-0e1a08392a0d\") " pod="openshift-ovn-kubernetes/ovnkube-node-jn7gc" Dec 11 08:26:13 crc kubenswrapper[4881]: I1211 08:26:13.987426 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/613e1b2f-2f51-4f0e-819e-0e1a08392a0d-env-overrides\") pod \"ovnkube-node-jn7gc\" (UID: \"613e1b2f-2f51-4f0e-819e-0e1a08392a0d\") " pod="openshift-ovn-kubernetes/ovnkube-node-jn7gc" Dec 11 08:26:13 crc kubenswrapper[4881]: I1211 08:26:13.987465 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/613e1b2f-2f51-4f0e-819e-0e1a08392a0d-ovnkube-config\") pod \"ovnkube-node-jn7gc\" (UID: \"613e1b2f-2f51-4f0e-819e-0e1a08392a0d\") " pod="openshift-ovn-kubernetes/ovnkube-node-jn7gc" Dec 11 08:26:13 crc kubenswrapper[4881]: I1211 08:26:13.987487 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/613e1b2f-2f51-4f0e-819e-0e1a08392a0d-log-socket\") pod \"ovnkube-node-jn7gc\" (UID: \"613e1b2f-2f51-4f0e-819e-0e1a08392a0d\") " pod="openshift-ovn-kubernetes/ovnkube-node-jn7gc" Dec 11 08:26:13 crc kubenswrapper[4881]: I1211 08:26:13.987532 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xrlws\" (UniqueName: \"kubernetes.io/projected/613e1b2f-2f51-4f0e-819e-0e1a08392a0d-kube-api-access-xrlws\") pod \"ovnkube-node-jn7gc\" (UID: \"613e1b2f-2f51-4f0e-819e-0e1a08392a0d\") " pod="openshift-ovn-kubernetes/ovnkube-node-jn7gc" Dec 11 08:26:13 crc kubenswrapper[4881]: I1211 08:26:13.987563 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/613e1b2f-2f51-4f0e-819e-0e1a08392a0d-etc-openvswitch\") pod \"ovnkube-node-jn7gc\" (UID: \"613e1b2f-2f51-4f0e-819e-0e1a08392a0d\") " pod="openshift-ovn-kubernetes/ovnkube-node-jn7gc" Dec 11 08:26:13 crc kubenswrapper[4881]: I1211 08:26:13.987618 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/613e1b2f-2f51-4f0e-819e-0e1a08392a0d-run-systemd\") pod \"ovnkube-node-jn7gc\" (UID: \"613e1b2f-2f51-4f0e-819e-0e1a08392a0d\") " pod="openshift-ovn-kubernetes/ovnkube-node-jn7gc" Dec 11 08:26:13 crc kubenswrapper[4881]: I1211 08:26:13.987656 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/613e1b2f-2f51-4f0e-819e-0e1a08392a0d-host-cni-netd\") pod \"ovnkube-node-jn7gc\" (UID: \"613e1b2f-2f51-4f0e-819e-0e1a08392a0d\") " pod="openshift-ovn-kubernetes/ovnkube-node-jn7gc" Dec 11 08:26:13 crc kubenswrapper[4881]: I1211 08:26:13.987677 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/613e1b2f-2f51-4f0e-819e-0e1a08392a0d-host-slash\") pod \"ovnkube-node-jn7gc\" (UID: \"613e1b2f-2f51-4f0e-819e-0e1a08392a0d\") " pod="openshift-ovn-kubernetes/ovnkube-node-jn7gc" Dec 11 08:26:13 crc kubenswrapper[4881]: I1211 08:26:13.987718 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/613e1b2f-2f51-4f0e-819e-0e1a08392a0d-host-kubelet\") pod \"ovnkube-node-jn7gc\" (UID: \"613e1b2f-2f51-4f0e-819e-0e1a08392a0d\") " pod="openshift-ovn-kubernetes/ovnkube-node-jn7gc" Dec 11 08:26:13 crc kubenswrapper[4881]: I1211 08:26:13.987752 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/613e1b2f-2f51-4f0e-819e-0e1a08392a0d-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-jn7gc\" (UID: \"613e1b2f-2f51-4f0e-819e-0e1a08392a0d\") " pod="openshift-ovn-kubernetes/ovnkube-node-jn7gc" Dec 11 08:26:13 crc kubenswrapper[4881]: I1211 08:26:13.987786 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/613e1b2f-2f51-4f0e-819e-0e1a08392a0d-node-log\") pod \"ovnkube-node-jn7gc\" (UID: \"613e1b2f-2f51-4f0e-819e-0e1a08392a0d\") " pod="openshift-ovn-kubernetes/ovnkube-node-jn7gc" Dec 11 08:26:13 crc kubenswrapper[4881]: I1211 08:26:13.987807 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/613e1b2f-2f51-4f0e-819e-0e1a08392a0d-ovnkube-script-lib\") pod \"ovnkube-node-jn7gc\" (UID: \"613e1b2f-2f51-4f0e-819e-0e1a08392a0d\") " pod="openshift-ovn-kubernetes/ovnkube-node-jn7gc" Dec 11 08:26:13 crc kubenswrapper[4881]: I1211 08:26:13.987838 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/613e1b2f-2f51-4f0e-819e-0e1a08392a0d-run-ovn\") pod \"ovnkube-node-jn7gc\" (UID: \"613e1b2f-2f51-4f0e-819e-0e1a08392a0d\") " pod="openshift-ovn-kubernetes/ovnkube-node-jn7gc" Dec 11 08:26:13 crc kubenswrapper[4881]: I1211 08:26:13.987865 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/613e1b2f-2f51-4f0e-819e-0e1a08392a0d-host-run-netns\") pod \"ovnkube-node-jn7gc\" (UID: \"613e1b2f-2f51-4f0e-819e-0e1a08392a0d\") " pod="openshift-ovn-kubernetes/ovnkube-node-jn7gc" Dec 11 08:26:13 crc kubenswrapper[4881]: I1211 08:26:13.987897 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/613e1b2f-2f51-4f0e-819e-0e1a08392a0d-var-lib-openvswitch\") pod \"ovnkube-node-jn7gc\" (UID: \"613e1b2f-2f51-4f0e-819e-0e1a08392a0d\") " pod="openshift-ovn-kubernetes/ovnkube-node-jn7gc" Dec 11 08:26:13 crc kubenswrapper[4881]: I1211 08:26:13.987931 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/613e1b2f-2f51-4f0e-819e-0e1a08392a0d-ovn-node-metrics-cert\") pod \"ovnkube-node-jn7gc\" (UID: \"613e1b2f-2f51-4f0e-819e-0e1a08392a0d\") " pod="openshift-ovn-kubernetes/ovnkube-node-jn7gc" Dec 11 08:26:13 crc kubenswrapper[4881]: I1211 08:26:13.987987 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/613e1b2f-2f51-4f0e-819e-0e1a08392a0d-host-cni-bin\") pod \"ovnkube-node-jn7gc\" (UID: \"613e1b2f-2f51-4f0e-819e-0e1a08392a0d\") " pod="openshift-ovn-kubernetes/ovnkube-node-jn7gc" Dec 11 08:26:13 crc kubenswrapper[4881]: I1211 08:26:13.988014 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/613e1b2f-2f51-4f0e-819e-0e1a08392a0d-host-run-ovn-kubernetes\") pod \"ovnkube-node-jn7gc\" (UID: \"613e1b2f-2f51-4f0e-819e-0e1a08392a0d\") " pod="openshift-ovn-kubernetes/ovnkube-node-jn7gc" Dec 11 08:26:13 crc kubenswrapper[4881]: I1211 08:26:13.988083 4881 reconciler_common.go:293] "Volume detached for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/f14cc110-e74f-4cb7-a998-041e3f9b537b-host-cni-netd\") on node \"crc\" DevicePath \"\"" Dec 11 08:26:13 crc kubenswrapper[4881]: I1211 08:26:13.988099 4881 reconciler_common.go:293] "Volume detached for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/f14cc110-e74f-4cb7-a998-041e3f9b537b-ovnkube-script-lib\") on node \"crc\" DevicePath \"\"" Dec 11 08:26:13 crc kubenswrapper[4881]: I1211 08:26:13.988109 4881 reconciler_common.go:293] "Volume detached for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/f14cc110-e74f-4cb7-a998-041e3f9b537b-host-slash\") on node \"crc\" DevicePath \"\"" Dec 11 08:26:13 crc kubenswrapper[4881]: I1211 08:26:13.987152 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f14cc110-e74f-4cb7-a998-041e3f9b537b-log-socket" (OuterVolumeSpecName: "log-socket") pod "f14cc110-e74f-4cb7-a998-041e3f9b537b" (UID: "f14cc110-e74f-4cb7-a998-041e3f9b537b"). InnerVolumeSpecName "log-socket". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 11 08:26:13 crc kubenswrapper[4881]: I1211 08:26:13.987165 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f14cc110-e74f-4cb7-a998-041e3f9b537b-systemd-units" (OuterVolumeSpecName: "systemd-units") pod "f14cc110-e74f-4cb7-a998-041e3f9b537b" (UID: "f14cc110-e74f-4cb7-a998-041e3f9b537b"). InnerVolumeSpecName "systemd-units". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 11 08:26:13 crc kubenswrapper[4881]: I1211 08:26:13.987183 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f14cc110-e74f-4cb7-a998-041e3f9b537b-etc-openvswitch" (OuterVolumeSpecName: "etc-openvswitch") pod "f14cc110-e74f-4cb7-a998-041e3f9b537b" (UID: "f14cc110-e74f-4cb7-a998-041e3f9b537b"). InnerVolumeSpecName "etc-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 11 08:26:13 crc kubenswrapper[4881]: I1211 08:26:13.987195 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f14cc110-e74f-4cb7-a998-041e3f9b537b-host-cni-bin" (OuterVolumeSpecName: "host-cni-bin") pod "f14cc110-e74f-4cb7-a998-041e3f9b537b" (UID: "f14cc110-e74f-4cb7-a998-041e3f9b537b"). InnerVolumeSpecName "host-cni-bin". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 11 08:26:13 crc kubenswrapper[4881]: I1211 08:26:13.987208 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f14cc110-e74f-4cb7-a998-041e3f9b537b-run-openvswitch" (OuterVolumeSpecName: "run-openvswitch") pod "f14cc110-e74f-4cb7-a998-041e3f9b537b" (UID: "f14cc110-e74f-4cb7-a998-041e3f9b537b"). InnerVolumeSpecName "run-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 11 08:26:13 crc kubenswrapper[4881]: I1211 08:26:13.987223 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f14cc110-e74f-4cb7-a998-041e3f9b537b-host-run-netns" (OuterVolumeSpecName: "host-run-netns") pod "f14cc110-e74f-4cb7-a998-041e3f9b537b" (UID: "f14cc110-e74f-4cb7-a998-041e3f9b537b"). InnerVolumeSpecName "host-run-netns". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 11 08:26:13 crc kubenswrapper[4881]: I1211 08:26:13.987235 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f14cc110-e74f-4cb7-a998-041e3f9b537b-host-run-ovn-kubernetes" (OuterVolumeSpecName: "host-run-ovn-kubernetes") pod "f14cc110-e74f-4cb7-a998-041e3f9b537b" (UID: "f14cc110-e74f-4cb7-a998-041e3f9b537b"). InnerVolumeSpecName "host-run-ovn-kubernetes". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 11 08:26:13 crc kubenswrapper[4881]: I1211 08:26:13.987248 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f14cc110-e74f-4cb7-a998-041e3f9b537b-var-lib-openvswitch" (OuterVolumeSpecName: "var-lib-openvswitch") pod "f14cc110-e74f-4cb7-a998-041e3f9b537b" (UID: "f14cc110-e74f-4cb7-a998-041e3f9b537b"). InnerVolumeSpecName "var-lib-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 11 08:26:13 crc kubenswrapper[4881]: I1211 08:26:13.987375 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f14cc110-e74f-4cb7-a998-041e3f9b537b-host-var-lib-cni-networks-ovn-kubernetes" (OuterVolumeSpecName: "host-var-lib-cni-networks-ovn-kubernetes") pod "f14cc110-e74f-4cb7-a998-041e3f9b537b" (UID: "f14cc110-e74f-4cb7-a998-041e3f9b537b"). InnerVolumeSpecName "host-var-lib-cni-networks-ovn-kubernetes". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 11 08:26:13 crc kubenswrapper[4881]: I1211 08:26:13.987708 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f14cc110-e74f-4cb7-a998-041e3f9b537b-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "f14cc110-e74f-4cb7-a998-041e3f9b537b" (UID: "f14cc110-e74f-4cb7-a998-041e3f9b537b"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:26:13 crc kubenswrapper[4881]: I1211 08:26:13.987732 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f14cc110-e74f-4cb7-a998-041e3f9b537b-run-ovn" (OuterVolumeSpecName: "run-ovn") pod "f14cc110-e74f-4cb7-a998-041e3f9b537b" (UID: "f14cc110-e74f-4cb7-a998-041e3f9b537b"). InnerVolumeSpecName "run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 11 08:26:13 crc kubenswrapper[4881]: I1211 08:26:13.988145 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f14cc110-e74f-4cb7-a998-041e3f9b537b-host-kubelet" (OuterVolumeSpecName: "host-kubelet") pod "f14cc110-e74f-4cb7-a998-041e3f9b537b" (UID: "f14cc110-e74f-4cb7-a998-041e3f9b537b"). InnerVolumeSpecName "host-kubelet". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 11 08:26:13 crc kubenswrapper[4881]: I1211 08:26:13.988171 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f14cc110-e74f-4cb7-a998-041e3f9b537b-node-log" (OuterVolumeSpecName: "node-log") pod "f14cc110-e74f-4cb7-a998-041e3f9b537b" (UID: "f14cc110-e74f-4cb7-a998-041e3f9b537b"). InnerVolumeSpecName "node-log". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 11 08:26:13 crc kubenswrapper[4881]: I1211 08:26:13.988528 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f14cc110-e74f-4cb7-a998-041e3f9b537b-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "f14cc110-e74f-4cb7-a998-041e3f9b537b" (UID: "f14cc110-e74f-4cb7-a998-041e3f9b537b"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:26:13 crc kubenswrapper[4881]: I1211 08:26:13.992924 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f14cc110-e74f-4cb7-a998-041e3f9b537b-ovn-node-metrics-cert" (OuterVolumeSpecName: "ovn-node-metrics-cert") pod "f14cc110-e74f-4cb7-a998-041e3f9b537b" (UID: "f14cc110-e74f-4cb7-a998-041e3f9b537b"). InnerVolumeSpecName "ovn-node-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:26:13 crc kubenswrapper[4881]: I1211 08:26:13.998092 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f14cc110-e74f-4cb7-a998-041e3f9b537b-kube-api-access-8rjwc" (OuterVolumeSpecName: "kube-api-access-8rjwc") pod "f14cc110-e74f-4cb7-a998-041e3f9b537b" (UID: "f14cc110-e74f-4cb7-a998-041e3f9b537b"). InnerVolumeSpecName "kube-api-access-8rjwc". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:26:14 crc kubenswrapper[4881]: I1211 08:26:14.002724 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f14cc110-e74f-4cb7-a998-041e3f9b537b-run-systemd" (OuterVolumeSpecName: "run-systemd") pod "f14cc110-e74f-4cb7-a998-041e3f9b537b" (UID: "f14cc110-e74f-4cb7-a998-041e3f9b537b"). InnerVolumeSpecName "run-systemd". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 11 08:26:14 crc kubenswrapper[4881]: I1211 08:26:14.089105 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/613e1b2f-2f51-4f0e-819e-0e1a08392a0d-host-cni-bin\") pod \"ovnkube-node-jn7gc\" (UID: \"613e1b2f-2f51-4f0e-819e-0e1a08392a0d\") " pod="openshift-ovn-kubernetes/ovnkube-node-jn7gc" Dec 11 08:26:14 crc kubenswrapper[4881]: I1211 08:26:14.089148 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/613e1b2f-2f51-4f0e-819e-0e1a08392a0d-host-run-ovn-kubernetes\") pod \"ovnkube-node-jn7gc\" (UID: \"613e1b2f-2f51-4f0e-819e-0e1a08392a0d\") " pod="openshift-ovn-kubernetes/ovnkube-node-jn7gc" Dec 11 08:26:14 crc kubenswrapper[4881]: I1211 08:26:14.089178 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/613e1b2f-2f51-4f0e-819e-0e1a08392a0d-systemd-units\") pod \"ovnkube-node-jn7gc\" (UID: \"613e1b2f-2f51-4f0e-819e-0e1a08392a0d\") " pod="openshift-ovn-kubernetes/ovnkube-node-jn7gc" Dec 11 08:26:14 crc kubenswrapper[4881]: I1211 08:26:14.089210 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/613e1b2f-2f51-4f0e-819e-0e1a08392a0d-run-openvswitch\") pod \"ovnkube-node-jn7gc\" (UID: \"613e1b2f-2f51-4f0e-819e-0e1a08392a0d\") " pod="openshift-ovn-kubernetes/ovnkube-node-jn7gc" Dec 11 08:26:14 crc kubenswrapper[4881]: I1211 08:26:14.089236 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/613e1b2f-2f51-4f0e-819e-0e1a08392a0d-env-overrides\") pod \"ovnkube-node-jn7gc\" (UID: \"613e1b2f-2f51-4f0e-819e-0e1a08392a0d\") " pod="openshift-ovn-kubernetes/ovnkube-node-jn7gc" Dec 11 08:26:14 crc kubenswrapper[4881]: I1211 08:26:14.089245 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/613e1b2f-2f51-4f0e-819e-0e1a08392a0d-host-cni-bin\") pod \"ovnkube-node-jn7gc\" (UID: \"613e1b2f-2f51-4f0e-819e-0e1a08392a0d\") " pod="openshift-ovn-kubernetes/ovnkube-node-jn7gc" Dec 11 08:26:14 crc kubenswrapper[4881]: I1211 08:26:14.089255 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/613e1b2f-2f51-4f0e-819e-0e1a08392a0d-ovnkube-config\") pod \"ovnkube-node-jn7gc\" (UID: \"613e1b2f-2f51-4f0e-819e-0e1a08392a0d\") " pod="openshift-ovn-kubernetes/ovnkube-node-jn7gc" Dec 11 08:26:14 crc kubenswrapper[4881]: I1211 08:26:14.089308 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/613e1b2f-2f51-4f0e-819e-0e1a08392a0d-log-socket\") pod \"ovnkube-node-jn7gc\" (UID: \"613e1b2f-2f51-4f0e-819e-0e1a08392a0d\") " pod="openshift-ovn-kubernetes/ovnkube-node-jn7gc" Dec 11 08:26:14 crc kubenswrapper[4881]: I1211 08:26:14.089356 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xrlws\" (UniqueName: \"kubernetes.io/projected/613e1b2f-2f51-4f0e-819e-0e1a08392a0d-kube-api-access-xrlws\") pod \"ovnkube-node-jn7gc\" (UID: \"613e1b2f-2f51-4f0e-819e-0e1a08392a0d\") " pod="openshift-ovn-kubernetes/ovnkube-node-jn7gc" Dec 11 08:26:14 crc kubenswrapper[4881]: I1211 08:26:14.089383 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/613e1b2f-2f51-4f0e-819e-0e1a08392a0d-etc-openvswitch\") pod \"ovnkube-node-jn7gc\" (UID: \"613e1b2f-2f51-4f0e-819e-0e1a08392a0d\") " pod="openshift-ovn-kubernetes/ovnkube-node-jn7gc" Dec 11 08:26:14 crc kubenswrapper[4881]: I1211 08:26:14.089415 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/613e1b2f-2f51-4f0e-819e-0e1a08392a0d-run-systemd\") pod \"ovnkube-node-jn7gc\" (UID: \"613e1b2f-2f51-4f0e-819e-0e1a08392a0d\") " pod="openshift-ovn-kubernetes/ovnkube-node-jn7gc" Dec 11 08:26:14 crc kubenswrapper[4881]: I1211 08:26:14.089441 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/613e1b2f-2f51-4f0e-819e-0e1a08392a0d-host-cni-netd\") pod \"ovnkube-node-jn7gc\" (UID: \"613e1b2f-2f51-4f0e-819e-0e1a08392a0d\") " pod="openshift-ovn-kubernetes/ovnkube-node-jn7gc" Dec 11 08:26:14 crc kubenswrapper[4881]: I1211 08:26:14.089460 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/613e1b2f-2f51-4f0e-819e-0e1a08392a0d-host-slash\") pod \"ovnkube-node-jn7gc\" (UID: \"613e1b2f-2f51-4f0e-819e-0e1a08392a0d\") " pod="openshift-ovn-kubernetes/ovnkube-node-jn7gc" Dec 11 08:26:14 crc kubenswrapper[4881]: I1211 08:26:14.089488 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/613e1b2f-2f51-4f0e-819e-0e1a08392a0d-host-kubelet\") pod \"ovnkube-node-jn7gc\" (UID: \"613e1b2f-2f51-4f0e-819e-0e1a08392a0d\") " pod="openshift-ovn-kubernetes/ovnkube-node-jn7gc" Dec 11 08:26:14 crc kubenswrapper[4881]: I1211 08:26:14.089546 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/613e1b2f-2f51-4f0e-819e-0e1a08392a0d-node-log\") pod \"ovnkube-node-jn7gc\" (UID: \"613e1b2f-2f51-4f0e-819e-0e1a08392a0d\") " pod="openshift-ovn-kubernetes/ovnkube-node-jn7gc" Dec 11 08:26:14 crc kubenswrapper[4881]: I1211 08:26:14.089570 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/613e1b2f-2f51-4f0e-819e-0e1a08392a0d-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-jn7gc\" (UID: \"613e1b2f-2f51-4f0e-819e-0e1a08392a0d\") " pod="openshift-ovn-kubernetes/ovnkube-node-jn7gc" Dec 11 08:26:14 crc kubenswrapper[4881]: I1211 08:26:14.089594 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/613e1b2f-2f51-4f0e-819e-0e1a08392a0d-ovnkube-script-lib\") pod \"ovnkube-node-jn7gc\" (UID: \"613e1b2f-2f51-4f0e-819e-0e1a08392a0d\") " pod="openshift-ovn-kubernetes/ovnkube-node-jn7gc" Dec 11 08:26:14 crc kubenswrapper[4881]: I1211 08:26:14.089618 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/613e1b2f-2f51-4f0e-819e-0e1a08392a0d-host-kubelet\") pod \"ovnkube-node-jn7gc\" (UID: \"613e1b2f-2f51-4f0e-819e-0e1a08392a0d\") " pod="openshift-ovn-kubernetes/ovnkube-node-jn7gc" Dec 11 08:26:14 crc kubenswrapper[4881]: I1211 08:26:14.089651 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/613e1b2f-2f51-4f0e-819e-0e1a08392a0d-run-ovn\") pod \"ovnkube-node-jn7gc\" (UID: \"613e1b2f-2f51-4f0e-819e-0e1a08392a0d\") " pod="openshift-ovn-kubernetes/ovnkube-node-jn7gc" Dec 11 08:26:14 crc kubenswrapper[4881]: I1211 08:26:14.089680 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/613e1b2f-2f51-4f0e-819e-0e1a08392a0d-run-ovn\") pod \"ovnkube-node-jn7gc\" (UID: \"613e1b2f-2f51-4f0e-819e-0e1a08392a0d\") " pod="openshift-ovn-kubernetes/ovnkube-node-jn7gc" Dec 11 08:26:14 crc kubenswrapper[4881]: I1211 08:26:14.089689 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/613e1b2f-2f51-4f0e-819e-0e1a08392a0d-host-run-netns\") pod \"ovnkube-node-jn7gc\" (UID: \"613e1b2f-2f51-4f0e-819e-0e1a08392a0d\") " pod="openshift-ovn-kubernetes/ovnkube-node-jn7gc" Dec 11 08:26:14 crc kubenswrapper[4881]: I1211 08:26:14.089675 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/613e1b2f-2f51-4f0e-819e-0e1a08392a0d-etc-openvswitch\") pod \"ovnkube-node-jn7gc\" (UID: \"613e1b2f-2f51-4f0e-819e-0e1a08392a0d\") " pod="openshift-ovn-kubernetes/ovnkube-node-jn7gc" Dec 11 08:26:14 crc kubenswrapper[4881]: I1211 08:26:14.089737 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/613e1b2f-2f51-4f0e-819e-0e1a08392a0d-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-jn7gc\" (UID: \"613e1b2f-2f51-4f0e-819e-0e1a08392a0d\") " pod="openshift-ovn-kubernetes/ovnkube-node-jn7gc" Dec 11 08:26:14 crc kubenswrapper[4881]: I1211 08:26:14.089713 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/613e1b2f-2f51-4f0e-819e-0e1a08392a0d-node-log\") pod \"ovnkube-node-jn7gc\" (UID: \"613e1b2f-2f51-4f0e-819e-0e1a08392a0d\") " pod="openshift-ovn-kubernetes/ovnkube-node-jn7gc" Dec 11 08:26:14 crc kubenswrapper[4881]: I1211 08:26:14.089726 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/613e1b2f-2f51-4f0e-819e-0e1a08392a0d-var-lib-openvswitch\") pod \"ovnkube-node-jn7gc\" (UID: \"613e1b2f-2f51-4f0e-819e-0e1a08392a0d\") " pod="openshift-ovn-kubernetes/ovnkube-node-jn7gc" Dec 11 08:26:14 crc kubenswrapper[4881]: I1211 08:26:14.089759 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/613e1b2f-2f51-4f0e-819e-0e1a08392a0d-host-cni-netd\") pod \"ovnkube-node-jn7gc\" (UID: \"613e1b2f-2f51-4f0e-819e-0e1a08392a0d\") " pod="openshift-ovn-kubernetes/ovnkube-node-jn7gc" Dec 11 08:26:14 crc kubenswrapper[4881]: I1211 08:26:14.089742 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/613e1b2f-2f51-4f0e-819e-0e1a08392a0d-run-systemd\") pod \"ovnkube-node-jn7gc\" (UID: \"613e1b2f-2f51-4f0e-819e-0e1a08392a0d\") " pod="openshift-ovn-kubernetes/ovnkube-node-jn7gc" Dec 11 08:26:14 crc kubenswrapper[4881]: I1211 08:26:14.089787 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/613e1b2f-2f51-4f0e-819e-0e1a08392a0d-host-run-netns\") pod \"ovnkube-node-jn7gc\" (UID: \"613e1b2f-2f51-4f0e-819e-0e1a08392a0d\") " pod="openshift-ovn-kubernetes/ovnkube-node-jn7gc" Dec 11 08:26:14 crc kubenswrapper[4881]: I1211 08:26:14.089571 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/613e1b2f-2f51-4f0e-819e-0e1a08392a0d-systemd-units\") pod \"ovnkube-node-jn7gc\" (UID: \"613e1b2f-2f51-4f0e-819e-0e1a08392a0d\") " pod="openshift-ovn-kubernetes/ovnkube-node-jn7gc" Dec 11 08:26:14 crc kubenswrapper[4881]: I1211 08:26:14.089710 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/613e1b2f-2f51-4f0e-819e-0e1a08392a0d-var-lib-openvswitch\") pod \"ovnkube-node-jn7gc\" (UID: \"613e1b2f-2f51-4f0e-819e-0e1a08392a0d\") " pod="openshift-ovn-kubernetes/ovnkube-node-jn7gc" Dec 11 08:26:14 crc kubenswrapper[4881]: I1211 08:26:14.089795 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/613e1b2f-2f51-4f0e-819e-0e1a08392a0d-log-socket\") pod \"ovnkube-node-jn7gc\" (UID: \"613e1b2f-2f51-4f0e-819e-0e1a08392a0d\") " pod="openshift-ovn-kubernetes/ovnkube-node-jn7gc" Dec 11 08:26:14 crc kubenswrapper[4881]: I1211 08:26:14.089788 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/613e1b2f-2f51-4f0e-819e-0e1a08392a0d-host-slash\") pod \"ovnkube-node-jn7gc\" (UID: \"613e1b2f-2f51-4f0e-819e-0e1a08392a0d\") " pod="openshift-ovn-kubernetes/ovnkube-node-jn7gc" Dec 11 08:26:14 crc kubenswrapper[4881]: I1211 08:26:14.089820 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/613e1b2f-2f51-4f0e-819e-0e1a08392a0d-ovn-node-metrics-cert\") pod \"ovnkube-node-jn7gc\" (UID: \"613e1b2f-2f51-4f0e-819e-0e1a08392a0d\") " pod="openshift-ovn-kubernetes/ovnkube-node-jn7gc" Dec 11 08:26:14 crc kubenswrapper[4881]: I1211 08:26:14.089596 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/613e1b2f-2f51-4f0e-819e-0e1a08392a0d-host-run-ovn-kubernetes\") pod \"ovnkube-node-jn7gc\" (UID: \"613e1b2f-2f51-4f0e-819e-0e1a08392a0d\") " pod="openshift-ovn-kubernetes/ovnkube-node-jn7gc" Dec 11 08:26:14 crc kubenswrapper[4881]: I1211 08:26:14.090141 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/613e1b2f-2f51-4f0e-819e-0e1a08392a0d-ovnkube-config\") pod \"ovnkube-node-jn7gc\" (UID: \"613e1b2f-2f51-4f0e-819e-0e1a08392a0d\") " pod="openshift-ovn-kubernetes/ovnkube-node-jn7gc" Dec 11 08:26:14 crc kubenswrapper[4881]: I1211 08:26:14.090158 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/613e1b2f-2f51-4f0e-819e-0e1a08392a0d-run-openvswitch\") pod \"ovnkube-node-jn7gc\" (UID: \"613e1b2f-2f51-4f0e-819e-0e1a08392a0d\") " pod="openshift-ovn-kubernetes/ovnkube-node-jn7gc" Dec 11 08:26:14 crc kubenswrapper[4881]: I1211 08:26:14.090254 4881 reconciler_common.go:293] "Volume detached for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/f14cc110-e74f-4cb7-a998-041e3f9b537b-host-cni-bin\") on node \"crc\" DevicePath \"\"" Dec 11 08:26:14 crc kubenswrapper[4881]: I1211 08:26:14.090266 4881 reconciler_common.go:293] "Volume detached for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/f14cc110-e74f-4cb7-a998-041e3f9b537b-run-openvswitch\") on node \"crc\" DevicePath \"\"" Dec 11 08:26:14 crc kubenswrapper[4881]: I1211 08:26:14.090275 4881 reconciler_common.go:293] "Volume detached for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/f14cc110-e74f-4cb7-a998-041e3f9b537b-host-run-netns\") on node \"crc\" DevicePath \"\"" Dec 11 08:26:14 crc kubenswrapper[4881]: I1211 08:26:14.090285 4881 reconciler_common.go:293] "Volume detached for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/f14cc110-e74f-4cb7-a998-041e3f9b537b-host-run-ovn-kubernetes\") on node \"crc\" DevicePath \"\"" Dec 11 08:26:14 crc kubenswrapper[4881]: I1211 08:26:14.090294 4881 reconciler_common.go:293] "Volume detached for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/f14cc110-e74f-4cb7-a998-041e3f9b537b-var-lib-openvswitch\") on node \"crc\" DevicePath \"\"" Dec 11 08:26:14 crc kubenswrapper[4881]: I1211 08:26:14.090302 4881 reconciler_common.go:293] "Volume detached for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/f14cc110-e74f-4cb7-a998-041e3f9b537b-ovn-node-metrics-cert\") on node \"crc\" DevicePath \"\"" Dec 11 08:26:14 crc kubenswrapper[4881]: I1211 08:26:14.090309 4881 reconciler_common.go:293] "Volume detached for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/f14cc110-e74f-4cb7-a998-041e3f9b537b-etc-openvswitch\") on node \"crc\" DevicePath \"\"" Dec 11 08:26:14 crc kubenswrapper[4881]: I1211 08:26:14.090317 4881 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/f14cc110-e74f-4cb7-a998-041e3f9b537b-ovnkube-config\") on node \"crc\" DevicePath \"\"" Dec 11 08:26:14 crc kubenswrapper[4881]: I1211 08:26:14.090317 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/613e1b2f-2f51-4f0e-819e-0e1a08392a0d-ovnkube-script-lib\") pod \"ovnkube-node-jn7gc\" (UID: \"613e1b2f-2f51-4f0e-819e-0e1a08392a0d\") " pod="openshift-ovn-kubernetes/ovnkube-node-jn7gc" Dec 11 08:26:14 crc kubenswrapper[4881]: I1211 08:26:14.090326 4881 reconciler_common.go:293] "Volume detached for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/f14cc110-e74f-4cb7-a998-041e3f9b537b-run-ovn\") on node \"crc\" DevicePath \"\"" Dec 11 08:26:14 crc kubenswrapper[4881]: I1211 08:26:14.090355 4881 reconciler_common.go:293] "Volume detached for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/f14cc110-e74f-4cb7-a998-041e3f9b537b-run-systemd\") on node \"crc\" DevicePath \"\"" Dec 11 08:26:14 crc kubenswrapper[4881]: I1211 08:26:14.090364 4881 reconciler_common.go:293] "Volume detached for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/f14cc110-e74f-4cb7-a998-041e3f9b537b-host-var-lib-cni-networks-ovn-kubernetes\") on node \"crc\" DevicePath \"\"" Dec 11 08:26:14 crc kubenswrapper[4881]: I1211 08:26:14.090373 4881 reconciler_common.go:293] "Volume detached for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/f14cc110-e74f-4cb7-a998-041e3f9b537b-host-kubelet\") on node \"crc\" DevicePath \"\"" Dec 11 08:26:14 crc kubenswrapper[4881]: I1211 08:26:14.090380 4881 reconciler_common.go:293] "Volume detached for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/f14cc110-e74f-4cb7-a998-041e3f9b537b-node-log\") on node \"crc\" DevicePath \"\"" Dec 11 08:26:14 crc kubenswrapper[4881]: I1211 08:26:14.090388 4881 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/f14cc110-e74f-4cb7-a998-041e3f9b537b-env-overrides\") on node \"crc\" DevicePath \"\"" Dec 11 08:26:14 crc kubenswrapper[4881]: I1211 08:26:14.090397 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8rjwc\" (UniqueName: \"kubernetes.io/projected/f14cc110-e74f-4cb7-a998-041e3f9b537b-kube-api-access-8rjwc\") on node \"crc\" DevicePath \"\"" Dec 11 08:26:14 crc kubenswrapper[4881]: I1211 08:26:14.090405 4881 reconciler_common.go:293] "Volume detached for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/f14cc110-e74f-4cb7-a998-041e3f9b537b-log-socket\") on node \"crc\" DevicePath \"\"" Dec 11 08:26:14 crc kubenswrapper[4881]: I1211 08:26:14.090413 4881 reconciler_common.go:293] "Volume detached for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/f14cc110-e74f-4cb7-a998-041e3f9b537b-systemd-units\") on node \"crc\" DevicePath \"\"" Dec 11 08:26:14 crc kubenswrapper[4881]: I1211 08:26:14.090688 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/613e1b2f-2f51-4f0e-819e-0e1a08392a0d-env-overrides\") pod \"ovnkube-node-jn7gc\" (UID: \"613e1b2f-2f51-4f0e-819e-0e1a08392a0d\") " pod="openshift-ovn-kubernetes/ovnkube-node-jn7gc" Dec 11 08:26:14 crc kubenswrapper[4881]: I1211 08:26:14.093393 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/613e1b2f-2f51-4f0e-819e-0e1a08392a0d-ovn-node-metrics-cert\") pod \"ovnkube-node-jn7gc\" (UID: \"613e1b2f-2f51-4f0e-819e-0e1a08392a0d\") " pod="openshift-ovn-kubernetes/ovnkube-node-jn7gc" Dec 11 08:26:14 crc kubenswrapper[4881]: I1211 08:26:14.107737 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xrlws\" (UniqueName: \"kubernetes.io/projected/613e1b2f-2f51-4f0e-819e-0e1a08392a0d-kube-api-access-xrlws\") pod \"ovnkube-node-jn7gc\" (UID: \"613e1b2f-2f51-4f0e-819e-0e1a08392a0d\") " pod="openshift-ovn-kubernetes/ovnkube-node-jn7gc" Dec 11 08:26:14 crc kubenswrapper[4881]: I1211 08:26:14.218125 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-jn7gc" Dec 11 08:26:14 crc kubenswrapper[4881]: I1211 08:26:14.412457 4881 generic.go:334] "Generic (PLEG): container finished" podID="613e1b2f-2f51-4f0e-819e-0e1a08392a0d" containerID="c378972577b935c6f2ff58ce71f4f6fb2c86168834355623d838279751f0d2a0" exitCode=0 Dec 11 08:26:14 crc kubenswrapper[4881]: I1211 08:26:14.412555 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-jn7gc" event={"ID":"613e1b2f-2f51-4f0e-819e-0e1a08392a0d","Type":"ContainerDied","Data":"c378972577b935c6f2ff58ce71f4f6fb2c86168834355623d838279751f0d2a0"} Dec 11 08:26:14 crc kubenswrapper[4881]: I1211 08:26:14.412862 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-jn7gc" event={"ID":"613e1b2f-2f51-4f0e-819e-0e1a08392a0d","Type":"ContainerStarted","Data":"afe9cb616263d0033ac9ce6c14e618cc3b761c0afd3c537e3d7c3a70f2e4ee1f"} Dec 11 08:26:14 crc kubenswrapper[4881]: I1211 08:26:14.419510 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-wf8q8_f14cc110-e74f-4cb7-a998-041e3f9b537b/ovn-acl-logging/0.log" Dec 11 08:26:14 crc kubenswrapper[4881]: I1211 08:26:14.420001 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-wf8q8_f14cc110-e74f-4cb7-a998-041e3f9b537b/ovn-controller/0.log" Dec 11 08:26:14 crc kubenswrapper[4881]: I1211 08:26:14.420645 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wf8q8" event={"ID":"f14cc110-e74f-4cb7-a998-041e3f9b537b","Type":"ContainerDied","Data":"8df2e5f01cc186fb359a8d0d3221245c1f2782daa8511af918f436152f91dc1a"} Dec 11 08:26:14 crc kubenswrapper[4881]: I1211 08:26:14.420682 4881 scope.go:117] "RemoveContainer" containerID="948f85b80bf82b9419418419a5b8d071a585911d9e4ac3cd9122fe83ee5836e9" Dec 11 08:26:14 crc kubenswrapper[4881]: I1211 08:26:14.420821 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-wf8q8" Dec 11 08:26:14 crc kubenswrapper[4881]: I1211 08:26:14.434273 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-g8jhd_368e635e-0e63-4202-b9e4-4a3a85c6f30c/kube-multus/2.log" Dec 11 08:26:14 crc kubenswrapper[4881]: I1211 08:26:14.455959 4881 scope.go:117] "RemoveContainer" containerID="88bd335385bc696076b9b2719bfbd27b294003397c545f24ee2c50cd854fcc28" Dec 11 08:26:14 crc kubenswrapper[4881]: I1211 08:26:14.469310 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-wf8q8"] Dec 11 08:26:14 crc kubenswrapper[4881]: I1211 08:26:14.480756 4881 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-wf8q8"] Dec 11 08:26:14 crc kubenswrapper[4881]: I1211 08:26:14.507299 4881 scope.go:117] "RemoveContainer" containerID="c23a70dbe6b32be7c8a0c3799f7dd2323f0dcc86d7d58ee3e140cda4ffbb03f0" Dec 11 08:26:14 crc kubenswrapper[4881]: I1211 08:26:14.532104 4881 scope.go:117] "RemoveContainer" containerID="6d8574de8e2431b2aca7954b9a0498e353c00d982b4384c1f198b2099fe527a6" Dec 11 08:26:14 crc kubenswrapper[4881]: I1211 08:26:14.545486 4881 scope.go:117] "RemoveContainer" containerID="3715d1eacbf7a843b7a1a0e83dee159ddbe5bf62812e94bf5f74f4677da2617b" Dec 11 08:26:14 crc kubenswrapper[4881]: I1211 08:26:14.585521 4881 scope.go:117] "RemoveContainer" containerID="3fece2162ea78bf051c94f0f8adb0b194d05e5d3c5c25cb4e3674096d1204079" Dec 11 08:26:14 crc kubenswrapper[4881]: I1211 08:26:14.611817 4881 scope.go:117] "RemoveContainer" containerID="5a0cb039f4c38da8b633dfcd2e0d23afbc3607b41f04d3c230aee5f86cba79b5" Dec 11 08:26:14 crc kubenswrapper[4881]: I1211 08:26:14.632885 4881 scope.go:117] "RemoveContainer" containerID="5b8e9dda68edf7ec8848da4eb851807ad0dc10a086fb6129e3a22f9a35e368ea" Dec 11 08:26:14 crc kubenswrapper[4881]: I1211 08:26:14.655668 4881 scope.go:117] "RemoveContainer" containerID="1794069e7175bc0ce9dc315807ad766fd28fb46e11ed6e7addc3199cccaea63a" Dec 11 08:26:15 crc kubenswrapper[4881]: I1211 08:26:15.022829 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f14cc110-e74f-4cb7-a998-041e3f9b537b" path="/var/lib/kubelet/pods/f14cc110-e74f-4cb7-a998-041e3f9b537b/volumes" Dec 11 08:26:15 crc kubenswrapper[4881]: I1211 08:26:15.445120 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-jn7gc" event={"ID":"613e1b2f-2f51-4f0e-819e-0e1a08392a0d","Type":"ContainerStarted","Data":"cb7a021c507f41c49ef818712855ee2af02d52e09f2618d0ba65de6beee1f373"} Dec 11 08:26:15 crc kubenswrapper[4881]: I1211 08:26:15.445213 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-jn7gc" event={"ID":"613e1b2f-2f51-4f0e-819e-0e1a08392a0d","Type":"ContainerStarted","Data":"f8c5d1a4b05db9ffb4938e65b04573741935d6b0d63ee855fe9e3cdbbc3a96cf"} Dec 11 08:26:16 crc kubenswrapper[4881]: I1211 08:26:16.452029 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-jn7gc" event={"ID":"613e1b2f-2f51-4f0e-819e-0e1a08392a0d","Type":"ContainerStarted","Data":"54b5c81a44333c75864d7df3d5b24ec5880082457885952b23091228923efb7b"} Dec 11 08:26:16 crc kubenswrapper[4881]: I1211 08:26:16.452309 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-jn7gc" event={"ID":"613e1b2f-2f51-4f0e-819e-0e1a08392a0d","Type":"ContainerStarted","Data":"1eeaadbaba863cf1dab670b8d040c83eb7536ec2feec957191f0a24dc11aba8d"} Dec 11 08:26:16 crc kubenswrapper[4881]: I1211 08:26:16.452320 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-jn7gc" event={"ID":"613e1b2f-2f51-4f0e-819e-0e1a08392a0d","Type":"ContainerStarted","Data":"3c3ce20b396bc32a46213cd9799a0ffab32b327840d8ddf25ddea696e24db6c5"} Dec 11 08:26:18 crc kubenswrapper[4881]: I1211 08:26:18.465209 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-jn7gc" event={"ID":"613e1b2f-2f51-4f0e-819e-0e1a08392a0d","Type":"ContainerStarted","Data":"46e42fb591ae14234e5bc45c24e2d12bc78e7b9bf016fdf9c7389e4c4ea533d7"} Dec 11 08:26:20 crc kubenswrapper[4881]: I1211 08:26:20.496968 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/obo-prometheus-operator-668cf9dfbb-qwshf"] Dec 11 08:26:20 crc kubenswrapper[4881]: I1211 08:26:20.497997 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-qwshf" Dec 11 08:26:20 crc kubenswrapper[4881]: I1211 08:26:20.499721 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operators"/"kube-root-ca.crt" Dec 11 08:26:20 crc kubenswrapper[4881]: I1211 08:26:20.499894 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operators"/"openshift-service-ca.crt" Dec 11 08:26:20 crc kubenswrapper[4881]: I1211 08:26:20.502388 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"obo-prometheus-operator-dockercfg-t59st" Dec 11 08:26:20 crc kubenswrapper[4881]: I1211 08:26:20.593623 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n2rbd\" (UniqueName: \"kubernetes.io/projected/518448cb-2c51-4398-a75a-3d2c0d26905e-kube-api-access-n2rbd\") pod \"obo-prometheus-operator-668cf9dfbb-qwshf\" (UID: \"518448cb-2c51-4398-a75a-3d2c0d26905e\") " pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-qwshf" Dec 11 08:26:20 crc kubenswrapper[4881]: I1211 08:26:20.619632 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-77767bb68c-n4z89"] Dec 11 08:26:20 crc kubenswrapper[4881]: I1211 08:26:20.621240 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-77767bb68c-n4z89" Dec 11 08:26:20 crc kubenswrapper[4881]: I1211 08:26:20.623585 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"obo-prometheus-operator-admission-webhook-service-cert" Dec 11 08:26:20 crc kubenswrapper[4881]: I1211 08:26:20.623619 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"obo-prometheus-operator-admission-webhook-dockercfg-xf7cj" Dec 11 08:26:20 crc kubenswrapper[4881]: I1211 08:26:20.624572 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-77767bb68c-7w7p7"] Dec 11 08:26:20 crc kubenswrapper[4881]: I1211 08:26:20.625274 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-77767bb68c-7w7p7" Dec 11 08:26:20 crc kubenswrapper[4881]: I1211 08:26:20.695207 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n2rbd\" (UniqueName: \"kubernetes.io/projected/518448cb-2c51-4398-a75a-3d2c0d26905e-kube-api-access-n2rbd\") pod \"obo-prometheus-operator-668cf9dfbb-qwshf\" (UID: \"518448cb-2c51-4398-a75a-3d2c0d26905e\") " pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-qwshf" Dec 11 08:26:20 crc kubenswrapper[4881]: I1211 08:26:20.718266 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n2rbd\" (UniqueName: \"kubernetes.io/projected/518448cb-2c51-4398-a75a-3d2c0d26905e-kube-api-access-n2rbd\") pod \"obo-prometheus-operator-668cf9dfbb-qwshf\" (UID: \"518448cb-2c51-4398-a75a-3d2c0d26905e\") " pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-qwshf" Dec 11 08:26:20 crc kubenswrapper[4881]: I1211 08:26:20.722948 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/observability-operator-d8bb48f5d-zdvsr"] Dec 11 08:26:20 crc kubenswrapper[4881]: I1211 08:26:20.723673 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-operator-d8bb48f5d-zdvsr" Dec 11 08:26:20 crc kubenswrapper[4881]: I1211 08:26:20.725312 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"observability-operator-sa-dockercfg-wrkfb" Dec 11 08:26:20 crc kubenswrapper[4881]: I1211 08:26:20.725355 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"observability-operator-tls" Dec 11 08:26:20 crc kubenswrapper[4881]: I1211 08:26:20.796882 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/91763e1b-8187-4c07-be69-34a7330afb73-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-77767bb68c-n4z89\" (UID: \"91763e1b-8187-4c07-be69-34a7330afb73\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-77767bb68c-n4z89" Dec 11 08:26:20 crc kubenswrapper[4881]: I1211 08:26:20.796940 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/6e8d1991-85d6-4f79-8233-98a8c9be9b32-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-77767bb68c-7w7p7\" (UID: \"6e8d1991-85d6-4f79-8233-98a8c9be9b32\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-77767bb68c-7w7p7" Dec 11 08:26:20 crc kubenswrapper[4881]: I1211 08:26:20.797015 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/91763e1b-8187-4c07-be69-34a7330afb73-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-77767bb68c-n4z89\" (UID: \"91763e1b-8187-4c07-be69-34a7330afb73\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-77767bb68c-n4z89" Dec 11 08:26:20 crc kubenswrapper[4881]: I1211 08:26:20.797046 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/6e8d1991-85d6-4f79-8233-98a8c9be9b32-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-77767bb68c-7w7p7\" (UID: \"6e8d1991-85d6-4f79-8233-98a8c9be9b32\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-77767bb68c-7w7p7" Dec 11 08:26:20 crc kubenswrapper[4881]: I1211 08:26:20.814668 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-qwshf" Dec 11 08:26:20 crc kubenswrapper[4881]: I1211 08:26:20.830045 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/perses-operator-5446b9c989-clg8t"] Dec 11 08:26:20 crc kubenswrapper[4881]: I1211 08:26:20.831001 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/perses-operator-5446b9c989-clg8t" Dec 11 08:26:20 crc kubenswrapper[4881]: I1211 08:26:20.836184 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"perses-operator-dockercfg-7zmn6" Dec 11 08:26:20 crc kubenswrapper[4881]: E1211 08:26:20.851681 4881 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-668cf9dfbb-qwshf_openshift-operators_518448cb-2c51-4398-a75a-3d2c0d26905e_0(38d4ef6ad8f85afa6e11ec22acbfb40e42d4e133b0f4537b2f8501fbc4c3d832): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Dec 11 08:26:20 crc kubenswrapper[4881]: E1211 08:26:20.851770 4881 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-668cf9dfbb-qwshf_openshift-operators_518448cb-2c51-4398-a75a-3d2c0d26905e_0(38d4ef6ad8f85afa6e11ec22acbfb40e42d4e133b0f4537b2f8501fbc4c3d832): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-qwshf" Dec 11 08:26:20 crc kubenswrapper[4881]: E1211 08:26:20.851795 4881 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-668cf9dfbb-qwshf_openshift-operators_518448cb-2c51-4398-a75a-3d2c0d26905e_0(38d4ef6ad8f85afa6e11ec22acbfb40e42d4e133b0f4537b2f8501fbc4c3d832): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-qwshf" Dec 11 08:26:20 crc kubenswrapper[4881]: E1211 08:26:20.851862 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"obo-prometheus-operator-668cf9dfbb-qwshf_openshift-operators(518448cb-2c51-4398-a75a-3d2c0d26905e)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"obo-prometheus-operator-668cf9dfbb-qwshf_openshift-operators(518448cb-2c51-4398-a75a-3d2c0d26905e)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-668cf9dfbb-qwshf_openshift-operators_518448cb-2c51-4398-a75a-3d2c0d26905e_0(38d4ef6ad8f85afa6e11ec22acbfb40e42d4e133b0f4537b2f8501fbc4c3d832): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-qwshf" podUID="518448cb-2c51-4398-a75a-3d2c0d26905e" Dec 11 08:26:20 crc kubenswrapper[4881]: I1211 08:26:20.898016 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"observability-operator-tls\" (UniqueName: \"kubernetes.io/secret/578f637c-c2d8-46be-9838-f2a0b587b0c6-observability-operator-tls\") pod \"observability-operator-d8bb48f5d-zdvsr\" (UID: \"578f637c-c2d8-46be-9838-f2a0b587b0c6\") " pod="openshift-operators/observability-operator-d8bb48f5d-zdvsr" Dec 11 08:26:20 crc kubenswrapper[4881]: I1211 08:26:20.898085 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/91763e1b-8187-4c07-be69-34a7330afb73-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-77767bb68c-n4z89\" (UID: \"91763e1b-8187-4c07-be69-34a7330afb73\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-77767bb68c-n4z89" Dec 11 08:26:20 crc kubenswrapper[4881]: I1211 08:26:20.898115 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/6e8d1991-85d6-4f79-8233-98a8c9be9b32-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-77767bb68c-7w7p7\" (UID: \"6e8d1991-85d6-4f79-8233-98a8c9be9b32\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-77767bb68c-7w7p7" Dec 11 08:26:20 crc kubenswrapper[4881]: I1211 08:26:20.898154 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/91763e1b-8187-4c07-be69-34a7330afb73-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-77767bb68c-n4z89\" (UID: \"91763e1b-8187-4c07-be69-34a7330afb73\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-77767bb68c-n4z89" Dec 11 08:26:20 crc kubenswrapper[4881]: I1211 08:26:20.898177 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/6e8d1991-85d6-4f79-8233-98a8c9be9b32-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-77767bb68c-7w7p7\" (UID: \"6e8d1991-85d6-4f79-8233-98a8c9be9b32\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-77767bb68c-7w7p7" Dec 11 08:26:20 crc kubenswrapper[4881]: I1211 08:26:20.898199 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t9sbz\" (UniqueName: \"kubernetes.io/projected/578f637c-c2d8-46be-9838-f2a0b587b0c6-kube-api-access-t9sbz\") pod \"observability-operator-d8bb48f5d-zdvsr\" (UID: \"578f637c-c2d8-46be-9838-f2a0b587b0c6\") " pod="openshift-operators/observability-operator-d8bb48f5d-zdvsr" Dec 11 08:26:20 crc kubenswrapper[4881]: I1211 08:26:20.901834 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/91763e1b-8187-4c07-be69-34a7330afb73-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-77767bb68c-n4z89\" (UID: \"91763e1b-8187-4c07-be69-34a7330afb73\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-77767bb68c-n4z89" Dec 11 08:26:20 crc kubenswrapper[4881]: I1211 08:26:20.901952 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/91763e1b-8187-4c07-be69-34a7330afb73-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-77767bb68c-n4z89\" (UID: \"91763e1b-8187-4c07-be69-34a7330afb73\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-77767bb68c-n4z89" Dec 11 08:26:20 crc kubenswrapper[4881]: I1211 08:26:20.905854 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/6e8d1991-85d6-4f79-8233-98a8c9be9b32-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-77767bb68c-7w7p7\" (UID: \"6e8d1991-85d6-4f79-8233-98a8c9be9b32\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-77767bb68c-7w7p7" Dec 11 08:26:20 crc kubenswrapper[4881]: I1211 08:26:20.906175 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/6e8d1991-85d6-4f79-8233-98a8c9be9b32-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-77767bb68c-7w7p7\" (UID: \"6e8d1991-85d6-4f79-8233-98a8c9be9b32\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-77767bb68c-7w7p7" Dec 11 08:26:20 crc kubenswrapper[4881]: I1211 08:26:20.941264 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-77767bb68c-n4z89" Dec 11 08:26:20 crc kubenswrapper[4881]: I1211 08:26:20.949518 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-77767bb68c-7w7p7" Dec 11 08:26:20 crc kubenswrapper[4881]: E1211 08:26:20.966229 4881 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-77767bb68c-n4z89_openshift-operators_91763e1b-8187-4c07-be69-34a7330afb73_0(2757390091e5858860e5c0ce46cc976e8f43767fd753f4e7021f5b1153371b47): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Dec 11 08:26:20 crc kubenswrapper[4881]: E1211 08:26:20.966324 4881 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-77767bb68c-n4z89_openshift-operators_91763e1b-8187-4c07-be69-34a7330afb73_0(2757390091e5858860e5c0ce46cc976e8f43767fd753f4e7021f5b1153371b47): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-admission-webhook-77767bb68c-n4z89" Dec 11 08:26:20 crc kubenswrapper[4881]: E1211 08:26:20.966367 4881 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-77767bb68c-n4z89_openshift-operators_91763e1b-8187-4c07-be69-34a7330afb73_0(2757390091e5858860e5c0ce46cc976e8f43767fd753f4e7021f5b1153371b47): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-admission-webhook-77767bb68c-n4z89" Dec 11 08:26:20 crc kubenswrapper[4881]: E1211 08:26:20.966419 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"obo-prometheus-operator-admission-webhook-77767bb68c-n4z89_openshift-operators(91763e1b-8187-4c07-be69-34a7330afb73)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"obo-prometheus-operator-admission-webhook-77767bb68c-n4z89_openshift-operators(91763e1b-8187-4c07-be69-34a7330afb73)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-77767bb68c-n4z89_openshift-operators_91763e1b-8187-4c07-be69-34a7330afb73_0(2757390091e5858860e5c0ce46cc976e8f43767fd753f4e7021f5b1153371b47): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/obo-prometheus-operator-admission-webhook-77767bb68c-n4z89" podUID="91763e1b-8187-4c07-be69-34a7330afb73" Dec 11 08:26:20 crc kubenswrapper[4881]: E1211 08:26:20.981039 4881 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-77767bb68c-7w7p7_openshift-operators_6e8d1991-85d6-4f79-8233-98a8c9be9b32_0(aefcbec10dac17f5a8f2c67711cf95fa78b9830be4d64e4af44168fb71dae23e): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Dec 11 08:26:20 crc kubenswrapper[4881]: E1211 08:26:20.981114 4881 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-77767bb68c-7w7p7_openshift-operators_6e8d1991-85d6-4f79-8233-98a8c9be9b32_0(aefcbec10dac17f5a8f2c67711cf95fa78b9830be4d64e4af44168fb71dae23e): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-admission-webhook-77767bb68c-7w7p7" Dec 11 08:26:20 crc kubenswrapper[4881]: E1211 08:26:20.981135 4881 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-77767bb68c-7w7p7_openshift-operators_6e8d1991-85d6-4f79-8233-98a8c9be9b32_0(aefcbec10dac17f5a8f2c67711cf95fa78b9830be4d64e4af44168fb71dae23e): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-admission-webhook-77767bb68c-7w7p7" Dec 11 08:26:20 crc kubenswrapper[4881]: E1211 08:26:20.981184 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"obo-prometheus-operator-admission-webhook-77767bb68c-7w7p7_openshift-operators(6e8d1991-85d6-4f79-8233-98a8c9be9b32)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"obo-prometheus-operator-admission-webhook-77767bb68c-7w7p7_openshift-operators(6e8d1991-85d6-4f79-8233-98a8c9be9b32)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-77767bb68c-7w7p7_openshift-operators_6e8d1991-85d6-4f79-8233-98a8c9be9b32_0(aefcbec10dac17f5a8f2c67711cf95fa78b9830be4d64e4af44168fb71dae23e): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/obo-prometheus-operator-admission-webhook-77767bb68c-7w7p7" podUID="6e8d1991-85d6-4f79-8233-98a8c9be9b32" Dec 11 08:26:21 crc kubenswrapper[4881]: I1211 08:26:20.999121 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"observability-operator-tls\" (UniqueName: \"kubernetes.io/secret/578f637c-c2d8-46be-9838-f2a0b587b0c6-observability-operator-tls\") pod \"observability-operator-d8bb48f5d-zdvsr\" (UID: \"578f637c-c2d8-46be-9838-f2a0b587b0c6\") " pod="openshift-operators/observability-operator-d8bb48f5d-zdvsr" Dec 11 08:26:21 crc kubenswrapper[4881]: I1211 08:26:20.999240 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t9sbz\" (UniqueName: \"kubernetes.io/projected/578f637c-c2d8-46be-9838-f2a0b587b0c6-kube-api-access-t9sbz\") pod \"observability-operator-d8bb48f5d-zdvsr\" (UID: \"578f637c-c2d8-46be-9838-f2a0b587b0c6\") " pod="openshift-operators/observability-operator-d8bb48f5d-zdvsr" Dec 11 08:26:21 crc kubenswrapper[4881]: I1211 08:26:20.999264 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8hqmn\" (UniqueName: \"kubernetes.io/projected/8df2f7b3-931a-4e09-b473-f71d8ee210d8-kube-api-access-8hqmn\") pod \"perses-operator-5446b9c989-clg8t\" (UID: \"8df2f7b3-931a-4e09-b473-f71d8ee210d8\") " pod="openshift-operators/perses-operator-5446b9c989-clg8t" Dec 11 08:26:21 crc kubenswrapper[4881]: I1211 08:26:20.999288 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openshift-service-ca\" (UniqueName: \"kubernetes.io/configmap/8df2f7b3-931a-4e09-b473-f71d8ee210d8-openshift-service-ca\") pod \"perses-operator-5446b9c989-clg8t\" (UID: \"8df2f7b3-931a-4e09-b473-f71d8ee210d8\") " pod="openshift-operators/perses-operator-5446b9c989-clg8t" Dec 11 08:26:21 crc kubenswrapper[4881]: I1211 08:26:21.003793 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"observability-operator-tls\" (UniqueName: \"kubernetes.io/secret/578f637c-c2d8-46be-9838-f2a0b587b0c6-observability-operator-tls\") pod \"observability-operator-d8bb48f5d-zdvsr\" (UID: \"578f637c-c2d8-46be-9838-f2a0b587b0c6\") " pod="openshift-operators/observability-operator-d8bb48f5d-zdvsr" Dec 11 08:26:21 crc kubenswrapper[4881]: I1211 08:26:21.015880 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t9sbz\" (UniqueName: \"kubernetes.io/projected/578f637c-c2d8-46be-9838-f2a0b587b0c6-kube-api-access-t9sbz\") pod \"observability-operator-d8bb48f5d-zdvsr\" (UID: \"578f637c-c2d8-46be-9838-f2a0b587b0c6\") " pod="openshift-operators/observability-operator-d8bb48f5d-zdvsr" Dec 11 08:26:21 crc kubenswrapper[4881]: I1211 08:26:21.048863 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-operator-d8bb48f5d-zdvsr" Dec 11 08:26:21 crc kubenswrapper[4881]: E1211 08:26:21.069959 4881 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_observability-operator-d8bb48f5d-zdvsr_openshift-operators_578f637c-c2d8-46be-9838-f2a0b587b0c6_0(79ae3aa3e6d2b57cef7cf3eb02e3bf50538ed5bacd2827897336f676127b6a32): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Dec 11 08:26:21 crc kubenswrapper[4881]: E1211 08:26:21.070026 4881 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_observability-operator-d8bb48f5d-zdvsr_openshift-operators_578f637c-c2d8-46be-9838-f2a0b587b0c6_0(79ae3aa3e6d2b57cef7cf3eb02e3bf50538ed5bacd2827897336f676127b6a32): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/observability-operator-d8bb48f5d-zdvsr" Dec 11 08:26:21 crc kubenswrapper[4881]: E1211 08:26:21.070048 4881 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_observability-operator-d8bb48f5d-zdvsr_openshift-operators_578f637c-c2d8-46be-9838-f2a0b587b0c6_0(79ae3aa3e6d2b57cef7cf3eb02e3bf50538ed5bacd2827897336f676127b6a32): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/observability-operator-d8bb48f5d-zdvsr" Dec 11 08:26:21 crc kubenswrapper[4881]: E1211 08:26:21.070091 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"observability-operator-d8bb48f5d-zdvsr_openshift-operators(578f637c-c2d8-46be-9838-f2a0b587b0c6)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"observability-operator-d8bb48f5d-zdvsr_openshift-operators(578f637c-c2d8-46be-9838-f2a0b587b0c6)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_observability-operator-d8bb48f5d-zdvsr_openshift-operators_578f637c-c2d8-46be-9838-f2a0b587b0c6_0(79ae3aa3e6d2b57cef7cf3eb02e3bf50538ed5bacd2827897336f676127b6a32): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/observability-operator-d8bb48f5d-zdvsr" podUID="578f637c-c2d8-46be-9838-f2a0b587b0c6" Dec 11 08:26:21 crc kubenswrapper[4881]: I1211 08:26:21.100590 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8hqmn\" (UniqueName: \"kubernetes.io/projected/8df2f7b3-931a-4e09-b473-f71d8ee210d8-kube-api-access-8hqmn\") pod \"perses-operator-5446b9c989-clg8t\" (UID: \"8df2f7b3-931a-4e09-b473-f71d8ee210d8\") " pod="openshift-operators/perses-operator-5446b9c989-clg8t" Dec 11 08:26:21 crc kubenswrapper[4881]: I1211 08:26:21.100642 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openshift-service-ca\" (UniqueName: \"kubernetes.io/configmap/8df2f7b3-931a-4e09-b473-f71d8ee210d8-openshift-service-ca\") pod \"perses-operator-5446b9c989-clg8t\" (UID: \"8df2f7b3-931a-4e09-b473-f71d8ee210d8\") " pod="openshift-operators/perses-operator-5446b9c989-clg8t" Dec 11 08:26:21 crc kubenswrapper[4881]: I1211 08:26:21.101479 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openshift-service-ca\" (UniqueName: \"kubernetes.io/configmap/8df2f7b3-931a-4e09-b473-f71d8ee210d8-openshift-service-ca\") pod \"perses-operator-5446b9c989-clg8t\" (UID: \"8df2f7b3-931a-4e09-b473-f71d8ee210d8\") " pod="openshift-operators/perses-operator-5446b9c989-clg8t" Dec 11 08:26:21 crc kubenswrapper[4881]: I1211 08:26:21.119100 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8hqmn\" (UniqueName: \"kubernetes.io/projected/8df2f7b3-931a-4e09-b473-f71d8ee210d8-kube-api-access-8hqmn\") pod \"perses-operator-5446b9c989-clg8t\" (UID: \"8df2f7b3-931a-4e09-b473-f71d8ee210d8\") " pod="openshift-operators/perses-operator-5446b9c989-clg8t" Dec 11 08:26:21 crc kubenswrapper[4881]: I1211 08:26:21.186014 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/perses-operator-5446b9c989-clg8t" Dec 11 08:26:21 crc kubenswrapper[4881]: E1211 08:26:21.207447 4881 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_perses-operator-5446b9c989-clg8t_openshift-operators_8df2f7b3-931a-4e09-b473-f71d8ee210d8_0(c7b49880453acaab3615cd1d4d793ac07640c92f8602f5a6f7cdd67fae7da062): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Dec 11 08:26:21 crc kubenswrapper[4881]: E1211 08:26:21.207510 4881 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_perses-operator-5446b9c989-clg8t_openshift-operators_8df2f7b3-931a-4e09-b473-f71d8ee210d8_0(c7b49880453acaab3615cd1d4d793ac07640c92f8602f5a6f7cdd67fae7da062): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/perses-operator-5446b9c989-clg8t" Dec 11 08:26:21 crc kubenswrapper[4881]: E1211 08:26:21.207535 4881 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_perses-operator-5446b9c989-clg8t_openshift-operators_8df2f7b3-931a-4e09-b473-f71d8ee210d8_0(c7b49880453acaab3615cd1d4d793ac07640c92f8602f5a6f7cdd67fae7da062): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/perses-operator-5446b9c989-clg8t" Dec 11 08:26:21 crc kubenswrapper[4881]: E1211 08:26:21.207584 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"perses-operator-5446b9c989-clg8t_openshift-operators(8df2f7b3-931a-4e09-b473-f71d8ee210d8)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"perses-operator-5446b9c989-clg8t_openshift-operators(8df2f7b3-931a-4e09-b473-f71d8ee210d8)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_perses-operator-5446b9c989-clg8t_openshift-operators_8df2f7b3-931a-4e09-b473-f71d8ee210d8_0(c7b49880453acaab3615cd1d4d793ac07640c92f8602f5a6f7cdd67fae7da062): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/perses-operator-5446b9c989-clg8t" podUID="8df2f7b3-931a-4e09-b473-f71d8ee210d8" Dec 11 08:26:21 crc kubenswrapper[4881]: I1211 08:26:21.486857 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-jn7gc" event={"ID":"613e1b2f-2f51-4f0e-819e-0e1a08392a0d","Type":"ContainerStarted","Data":"f7ffe6dac3d3edf85031eb6402523a5671cce6d44631757b658d265126c6a62d"} Dec 11 08:26:23 crc kubenswrapper[4881]: I1211 08:26:23.510063 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-jn7gc" event={"ID":"613e1b2f-2f51-4f0e-819e-0e1a08392a0d","Type":"ContainerStarted","Data":"48af1e018bf91ebb83cada1e63ddc8c3d8eb22e6f40d11c4806db340b83d5652"} Dec 11 08:26:23 crc kubenswrapper[4881]: I1211 08:26:23.510627 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-jn7gc" Dec 11 08:26:23 crc kubenswrapper[4881]: I1211 08:26:23.563277 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-node-jn7gc" podStartSLOduration=10.563259819 podStartE2EDuration="10.563259819s" podCreationTimestamp="2025-12-11 08:26:13 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 08:26:23.561866935 +0000 UTC m=+631.939235642" watchObservedRunningTime="2025-12-11 08:26:23.563259819 +0000 UTC m=+631.940628516" Dec 11 08:26:23 crc kubenswrapper[4881]: I1211 08:26:23.585762 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-jn7gc" Dec 11 08:26:24 crc kubenswrapper[4881]: I1211 08:26:24.219862 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-jn7gc" Dec 11 08:26:24 crc kubenswrapper[4881]: I1211 08:26:24.254727 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-jn7gc" Dec 11 08:26:24 crc kubenswrapper[4881]: I1211 08:26:24.515973 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-jn7gc" Dec 11 08:26:24 crc kubenswrapper[4881]: I1211 08:26:24.554391 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-77767bb68c-n4z89"] Dec 11 08:26:24 crc kubenswrapper[4881]: I1211 08:26:24.554507 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-77767bb68c-n4z89" Dec 11 08:26:24 crc kubenswrapper[4881]: I1211 08:26:24.554921 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-77767bb68c-n4z89" Dec 11 08:26:24 crc kubenswrapper[4881]: I1211 08:26:24.560754 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-77767bb68c-7w7p7"] Dec 11 08:26:24 crc kubenswrapper[4881]: I1211 08:26:24.560826 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-77767bb68c-7w7p7" Dec 11 08:26:24 crc kubenswrapper[4881]: I1211 08:26:24.561071 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-77767bb68c-7w7p7" Dec 11 08:26:24 crc kubenswrapper[4881]: E1211 08:26:24.590214 4881 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-77767bb68c-n4z89_openshift-operators_91763e1b-8187-4c07-be69-34a7330afb73_0(65a3414a125fb8e911814327f29679467fd1186aa619a1dce46f5bf4f706d444): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Dec 11 08:26:24 crc kubenswrapper[4881]: E1211 08:26:24.590278 4881 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-77767bb68c-n4z89_openshift-operators_91763e1b-8187-4c07-be69-34a7330afb73_0(65a3414a125fb8e911814327f29679467fd1186aa619a1dce46f5bf4f706d444): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-admission-webhook-77767bb68c-n4z89" Dec 11 08:26:24 crc kubenswrapper[4881]: E1211 08:26:24.590299 4881 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-77767bb68c-n4z89_openshift-operators_91763e1b-8187-4c07-be69-34a7330afb73_0(65a3414a125fb8e911814327f29679467fd1186aa619a1dce46f5bf4f706d444): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-admission-webhook-77767bb68c-n4z89" Dec 11 08:26:24 crc kubenswrapper[4881]: E1211 08:26:24.590389 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"obo-prometheus-operator-admission-webhook-77767bb68c-n4z89_openshift-operators(91763e1b-8187-4c07-be69-34a7330afb73)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"obo-prometheus-operator-admission-webhook-77767bb68c-n4z89_openshift-operators(91763e1b-8187-4c07-be69-34a7330afb73)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-77767bb68c-n4z89_openshift-operators_91763e1b-8187-4c07-be69-34a7330afb73_0(65a3414a125fb8e911814327f29679467fd1186aa619a1dce46f5bf4f706d444): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/obo-prometheus-operator-admission-webhook-77767bb68c-n4z89" podUID="91763e1b-8187-4c07-be69-34a7330afb73" Dec 11 08:26:24 crc kubenswrapper[4881]: E1211 08:26:24.594876 4881 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-77767bb68c-7w7p7_openshift-operators_6e8d1991-85d6-4f79-8233-98a8c9be9b32_0(5fa1d4dc5ceeb683486b3574c1efbd41ac1491f9f57bc71d06b30a1a75689236): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Dec 11 08:26:24 crc kubenswrapper[4881]: E1211 08:26:24.594920 4881 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-77767bb68c-7w7p7_openshift-operators_6e8d1991-85d6-4f79-8233-98a8c9be9b32_0(5fa1d4dc5ceeb683486b3574c1efbd41ac1491f9f57bc71d06b30a1a75689236): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-admission-webhook-77767bb68c-7w7p7" Dec 11 08:26:24 crc kubenswrapper[4881]: E1211 08:26:24.594938 4881 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-77767bb68c-7w7p7_openshift-operators_6e8d1991-85d6-4f79-8233-98a8c9be9b32_0(5fa1d4dc5ceeb683486b3574c1efbd41ac1491f9f57bc71d06b30a1a75689236): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-admission-webhook-77767bb68c-7w7p7" Dec 11 08:26:24 crc kubenswrapper[4881]: E1211 08:26:24.594980 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"obo-prometheus-operator-admission-webhook-77767bb68c-7w7p7_openshift-operators(6e8d1991-85d6-4f79-8233-98a8c9be9b32)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"obo-prometheus-operator-admission-webhook-77767bb68c-7w7p7_openshift-operators(6e8d1991-85d6-4f79-8233-98a8c9be9b32)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-77767bb68c-7w7p7_openshift-operators_6e8d1991-85d6-4f79-8233-98a8c9be9b32_0(5fa1d4dc5ceeb683486b3574c1efbd41ac1491f9f57bc71d06b30a1a75689236): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/obo-prometheus-operator-admission-webhook-77767bb68c-7w7p7" podUID="6e8d1991-85d6-4f79-8233-98a8c9be9b32" Dec 11 08:26:24 crc kubenswrapper[4881]: I1211 08:26:24.602017 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/perses-operator-5446b9c989-clg8t"] Dec 11 08:26:24 crc kubenswrapper[4881]: I1211 08:26:24.602399 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/perses-operator-5446b9c989-clg8t" Dec 11 08:26:24 crc kubenswrapper[4881]: I1211 08:26:24.603104 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/perses-operator-5446b9c989-clg8t" Dec 11 08:26:24 crc kubenswrapper[4881]: E1211 08:26:24.649400 4881 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_perses-operator-5446b9c989-clg8t_openshift-operators_8df2f7b3-931a-4e09-b473-f71d8ee210d8_0(f6fc073d4918b952c2bd6b4016f47eae57ce66fb1aeb02f011413214a7d6b14a): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Dec 11 08:26:24 crc kubenswrapper[4881]: E1211 08:26:24.649459 4881 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_perses-operator-5446b9c989-clg8t_openshift-operators_8df2f7b3-931a-4e09-b473-f71d8ee210d8_0(f6fc073d4918b952c2bd6b4016f47eae57ce66fb1aeb02f011413214a7d6b14a): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/perses-operator-5446b9c989-clg8t" Dec 11 08:26:24 crc kubenswrapper[4881]: E1211 08:26:24.649484 4881 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_perses-operator-5446b9c989-clg8t_openshift-operators_8df2f7b3-931a-4e09-b473-f71d8ee210d8_0(f6fc073d4918b952c2bd6b4016f47eae57ce66fb1aeb02f011413214a7d6b14a): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/perses-operator-5446b9c989-clg8t" Dec 11 08:26:24 crc kubenswrapper[4881]: E1211 08:26:24.649532 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"perses-operator-5446b9c989-clg8t_openshift-operators(8df2f7b3-931a-4e09-b473-f71d8ee210d8)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"perses-operator-5446b9c989-clg8t_openshift-operators(8df2f7b3-931a-4e09-b473-f71d8ee210d8)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_perses-operator-5446b9c989-clg8t_openshift-operators_8df2f7b3-931a-4e09-b473-f71d8ee210d8_0(f6fc073d4918b952c2bd6b4016f47eae57ce66fb1aeb02f011413214a7d6b14a): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/perses-operator-5446b9c989-clg8t" podUID="8df2f7b3-931a-4e09-b473-f71d8ee210d8" Dec 11 08:26:24 crc kubenswrapper[4881]: I1211 08:26:24.738623 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/observability-operator-d8bb48f5d-zdvsr"] Dec 11 08:26:24 crc kubenswrapper[4881]: I1211 08:26:24.738732 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-operator-d8bb48f5d-zdvsr" Dec 11 08:26:24 crc kubenswrapper[4881]: I1211 08:26:24.739117 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-operator-d8bb48f5d-zdvsr" Dec 11 08:26:24 crc kubenswrapper[4881]: I1211 08:26:24.744897 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-668cf9dfbb-qwshf"] Dec 11 08:26:24 crc kubenswrapper[4881]: I1211 08:26:24.745038 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-qwshf" Dec 11 08:26:24 crc kubenswrapper[4881]: I1211 08:26:24.745548 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-qwshf" Dec 11 08:26:24 crc kubenswrapper[4881]: E1211 08:26:24.766255 4881 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_observability-operator-d8bb48f5d-zdvsr_openshift-operators_578f637c-c2d8-46be-9838-f2a0b587b0c6_0(0b7b9e757b4c6412ef675ef8583d0033ef7d8038a624cf20c6f987fef09532f9): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Dec 11 08:26:24 crc kubenswrapper[4881]: E1211 08:26:24.766349 4881 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_observability-operator-d8bb48f5d-zdvsr_openshift-operators_578f637c-c2d8-46be-9838-f2a0b587b0c6_0(0b7b9e757b4c6412ef675ef8583d0033ef7d8038a624cf20c6f987fef09532f9): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/observability-operator-d8bb48f5d-zdvsr" Dec 11 08:26:24 crc kubenswrapper[4881]: E1211 08:26:24.766373 4881 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_observability-operator-d8bb48f5d-zdvsr_openshift-operators_578f637c-c2d8-46be-9838-f2a0b587b0c6_0(0b7b9e757b4c6412ef675ef8583d0033ef7d8038a624cf20c6f987fef09532f9): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/observability-operator-d8bb48f5d-zdvsr" Dec 11 08:26:24 crc kubenswrapper[4881]: E1211 08:26:24.766417 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"observability-operator-d8bb48f5d-zdvsr_openshift-operators(578f637c-c2d8-46be-9838-f2a0b587b0c6)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"observability-operator-d8bb48f5d-zdvsr_openshift-operators(578f637c-c2d8-46be-9838-f2a0b587b0c6)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_observability-operator-d8bb48f5d-zdvsr_openshift-operators_578f637c-c2d8-46be-9838-f2a0b587b0c6_0(0b7b9e757b4c6412ef675ef8583d0033ef7d8038a624cf20c6f987fef09532f9): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/observability-operator-d8bb48f5d-zdvsr" podUID="578f637c-c2d8-46be-9838-f2a0b587b0c6" Dec 11 08:26:24 crc kubenswrapper[4881]: E1211 08:26:24.779508 4881 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-668cf9dfbb-qwshf_openshift-operators_518448cb-2c51-4398-a75a-3d2c0d26905e_0(59635da2630a4d134097381a49896eda2e54d4e191cd2f1ec393ba322f802e72): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Dec 11 08:26:24 crc kubenswrapper[4881]: E1211 08:26:24.779573 4881 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-668cf9dfbb-qwshf_openshift-operators_518448cb-2c51-4398-a75a-3d2c0d26905e_0(59635da2630a4d134097381a49896eda2e54d4e191cd2f1ec393ba322f802e72): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-qwshf" Dec 11 08:26:24 crc kubenswrapper[4881]: E1211 08:26:24.779595 4881 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-668cf9dfbb-qwshf_openshift-operators_518448cb-2c51-4398-a75a-3d2c0d26905e_0(59635da2630a4d134097381a49896eda2e54d4e191cd2f1ec393ba322f802e72): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-qwshf" Dec 11 08:26:24 crc kubenswrapper[4881]: E1211 08:26:24.779639 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"obo-prometheus-operator-668cf9dfbb-qwshf_openshift-operators(518448cb-2c51-4398-a75a-3d2c0d26905e)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"obo-prometheus-operator-668cf9dfbb-qwshf_openshift-operators(518448cb-2c51-4398-a75a-3d2c0d26905e)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-668cf9dfbb-qwshf_openshift-operators_518448cb-2c51-4398-a75a-3d2c0d26905e_0(59635da2630a4d134097381a49896eda2e54d4e191cd2f1ec393ba322f802e72): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-qwshf" podUID="518448cb-2c51-4398-a75a-3d2c0d26905e" Dec 11 08:26:29 crc kubenswrapper[4881]: I1211 08:26:29.005367 4881 scope.go:117] "RemoveContainer" containerID="021945edb3416828d6a387f2de7474bbde198cdef1eb1a9aea5de0cd3699a72a" Dec 11 08:26:29 crc kubenswrapper[4881]: E1211 08:26:29.005728 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CrashLoopBackOff: \"back-off 20s restarting failed container=kube-multus pod=multus-g8jhd_openshift-multus(368e635e-0e63-4202-b9e4-4a3a85c6f30c)\"" pod="openshift-multus/multus-g8jhd" podUID="368e635e-0e63-4202-b9e4-4a3a85c6f30c" Dec 11 08:26:29 crc kubenswrapper[4881]: I1211 08:26:29.396983 4881 patch_prober.go:28] interesting pod/machine-config-daemon-z9nnh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 11 08:26:29 crc kubenswrapper[4881]: I1211 08:26:29.397090 4881 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 11 08:26:29 crc kubenswrapper[4881]: I1211 08:26:29.397131 4881 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" Dec 11 08:26:29 crc kubenswrapper[4881]: I1211 08:26:29.397766 4881 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"2378020365a7ffc5afa00424ace5b73c56a13d69da3d6d17d8336f551688833d"} pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Dec 11 08:26:29 crc kubenswrapper[4881]: I1211 08:26:29.397835 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" containerName="machine-config-daemon" containerID="cri-o://2378020365a7ffc5afa00424ace5b73c56a13d69da3d6d17d8336f551688833d" gracePeriod=600 Dec 11 08:26:30 crc kubenswrapper[4881]: I1211 08:26:30.549558 4881 generic.go:334] "Generic (PLEG): container finished" podID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" containerID="2378020365a7ffc5afa00424ace5b73c56a13d69da3d6d17d8336f551688833d" exitCode=0 Dec 11 08:26:30 crc kubenswrapper[4881]: I1211 08:26:30.549605 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" event={"ID":"56d69133-af36-4cbd-af7d-3a58cc4dd8ca","Type":"ContainerDied","Data":"2378020365a7ffc5afa00424ace5b73c56a13d69da3d6d17d8336f551688833d"} Dec 11 08:26:30 crc kubenswrapper[4881]: I1211 08:26:30.549635 4881 scope.go:117] "RemoveContainer" containerID="d7ca78f14154a7019c587a5f29a4086b385a480f4f657464057274ffa0a054ec" Dec 11 08:26:31 crc kubenswrapper[4881]: I1211 08:26:31.562375 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" event={"ID":"56d69133-af36-4cbd-af7d-3a58cc4dd8ca","Type":"ContainerStarted","Data":"77814b1ae10c46f437c76c46bb8c5b9eb3fd105add11c725ab5d0afd4052b630"} Dec 11 08:26:36 crc kubenswrapper[4881]: I1211 08:26:36.005401 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-qwshf" Dec 11 08:26:36 crc kubenswrapper[4881]: I1211 08:26:36.005429 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-77767bb68c-n4z89" Dec 11 08:26:36 crc kubenswrapper[4881]: I1211 08:26:36.006185 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-77767bb68c-n4z89" Dec 11 08:26:36 crc kubenswrapper[4881]: I1211 08:26:36.006567 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-qwshf" Dec 11 08:26:36 crc kubenswrapper[4881]: E1211 08:26:36.068171 4881 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-668cf9dfbb-qwshf_openshift-operators_518448cb-2c51-4398-a75a-3d2c0d26905e_0(3dca24542f6d340a20f629ce88eb142ce5a7d80e3d40528281bcd3954191ed67): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Dec 11 08:26:36 crc kubenswrapper[4881]: E1211 08:26:36.068236 4881 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-668cf9dfbb-qwshf_openshift-operators_518448cb-2c51-4398-a75a-3d2c0d26905e_0(3dca24542f6d340a20f629ce88eb142ce5a7d80e3d40528281bcd3954191ed67): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-qwshf" Dec 11 08:26:36 crc kubenswrapper[4881]: E1211 08:26:36.068255 4881 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-668cf9dfbb-qwshf_openshift-operators_518448cb-2c51-4398-a75a-3d2c0d26905e_0(3dca24542f6d340a20f629ce88eb142ce5a7d80e3d40528281bcd3954191ed67): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-qwshf" Dec 11 08:26:36 crc kubenswrapper[4881]: E1211 08:26:36.068301 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"obo-prometheus-operator-668cf9dfbb-qwshf_openshift-operators(518448cb-2c51-4398-a75a-3d2c0d26905e)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"obo-prometheus-operator-668cf9dfbb-qwshf_openshift-operators(518448cb-2c51-4398-a75a-3d2c0d26905e)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-668cf9dfbb-qwshf_openshift-operators_518448cb-2c51-4398-a75a-3d2c0d26905e_0(3dca24542f6d340a20f629ce88eb142ce5a7d80e3d40528281bcd3954191ed67): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-qwshf" podUID="518448cb-2c51-4398-a75a-3d2c0d26905e" Dec 11 08:26:36 crc kubenswrapper[4881]: E1211 08:26:36.073194 4881 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-77767bb68c-n4z89_openshift-operators_91763e1b-8187-4c07-be69-34a7330afb73_0(2eae9bf44499eae6aaa99b1280954594266660cec50ec32c81e0338794df6908): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Dec 11 08:26:36 crc kubenswrapper[4881]: E1211 08:26:36.073284 4881 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-77767bb68c-n4z89_openshift-operators_91763e1b-8187-4c07-be69-34a7330afb73_0(2eae9bf44499eae6aaa99b1280954594266660cec50ec32c81e0338794df6908): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-admission-webhook-77767bb68c-n4z89" Dec 11 08:26:36 crc kubenswrapper[4881]: E1211 08:26:36.073380 4881 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-77767bb68c-n4z89_openshift-operators_91763e1b-8187-4c07-be69-34a7330afb73_0(2eae9bf44499eae6aaa99b1280954594266660cec50ec32c81e0338794df6908): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-admission-webhook-77767bb68c-n4z89" Dec 11 08:26:36 crc kubenswrapper[4881]: E1211 08:26:36.073466 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"obo-prometheus-operator-admission-webhook-77767bb68c-n4z89_openshift-operators(91763e1b-8187-4c07-be69-34a7330afb73)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"obo-prometheus-operator-admission-webhook-77767bb68c-n4z89_openshift-operators(91763e1b-8187-4c07-be69-34a7330afb73)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-77767bb68c-n4z89_openshift-operators_91763e1b-8187-4c07-be69-34a7330afb73_0(2eae9bf44499eae6aaa99b1280954594266660cec50ec32c81e0338794df6908): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/obo-prometheus-operator-admission-webhook-77767bb68c-n4z89" podUID="91763e1b-8187-4c07-be69-34a7330afb73" Dec 11 08:26:37 crc kubenswrapper[4881]: I1211 08:26:37.004823 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/perses-operator-5446b9c989-clg8t" Dec 11 08:26:37 crc kubenswrapper[4881]: I1211 08:26:37.005392 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/perses-operator-5446b9c989-clg8t" Dec 11 08:26:37 crc kubenswrapper[4881]: E1211 08:26:37.047018 4881 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_perses-operator-5446b9c989-clg8t_openshift-operators_8df2f7b3-931a-4e09-b473-f71d8ee210d8_0(fa320e4d63e56327767699c4ce64d737e4797193060c8d87ed4dbbb79f6985bb): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Dec 11 08:26:37 crc kubenswrapper[4881]: E1211 08:26:37.047092 4881 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_perses-operator-5446b9c989-clg8t_openshift-operators_8df2f7b3-931a-4e09-b473-f71d8ee210d8_0(fa320e4d63e56327767699c4ce64d737e4797193060c8d87ed4dbbb79f6985bb): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/perses-operator-5446b9c989-clg8t" Dec 11 08:26:37 crc kubenswrapper[4881]: E1211 08:26:37.047118 4881 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_perses-operator-5446b9c989-clg8t_openshift-operators_8df2f7b3-931a-4e09-b473-f71d8ee210d8_0(fa320e4d63e56327767699c4ce64d737e4797193060c8d87ed4dbbb79f6985bb): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/perses-operator-5446b9c989-clg8t" Dec 11 08:26:37 crc kubenswrapper[4881]: E1211 08:26:37.047172 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"perses-operator-5446b9c989-clg8t_openshift-operators(8df2f7b3-931a-4e09-b473-f71d8ee210d8)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"perses-operator-5446b9c989-clg8t_openshift-operators(8df2f7b3-931a-4e09-b473-f71d8ee210d8)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_perses-operator-5446b9c989-clg8t_openshift-operators_8df2f7b3-931a-4e09-b473-f71d8ee210d8_0(fa320e4d63e56327767699c4ce64d737e4797193060c8d87ed4dbbb79f6985bb): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/perses-operator-5446b9c989-clg8t" podUID="8df2f7b3-931a-4e09-b473-f71d8ee210d8" Dec 11 08:26:38 crc kubenswrapper[4881]: I1211 08:26:38.005100 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-77767bb68c-7w7p7" Dec 11 08:26:38 crc kubenswrapper[4881]: I1211 08:26:38.005159 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-operator-d8bb48f5d-zdvsr" Dec 11 08:26:38 crc kubenswrapper[4881]: I1211 08:26:38.005660 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-77767bb68c-7w7p7" Dec 11 08:26:38 crc kubenswrapper[4881]: I1211 08:26:38.005767 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-operator-d8bb48f5d-zdvsr" Dec 11 08:26:38 crc kubenswrapper[4881]: E1211 08:26:38.034946 4881 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_observability-operator-d8bb48f5d-zdvsr_openshift-operators_578f637c-c2d8-46be-9838-f2a0b587b0c6_0(7f69de3cdde0ecb1e064d694898a925e86e1877563b19d4c2fb1cb2e29a341d4): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Dec 11 08:26:38 crc kubenswrapper[4881]: E1211 08:26:38.035037 4881 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_observability-operator-d8bb48f5d-zdvsr_openshift-operators_578f637c-c2d8-46be-9838-f2a0b587b0c6_0(7f69de3cdde0ecb1e064d694898a925e86e1877563b19d4c2fb1cb2e29a341d4): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/observability-operator-d8bb48f5d-zdvsr" Dec 11 08:26:38 crc kubenswrapper[4881]: E1211 08:26:38.035072 4881 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_observability-operator-d8bb48f5d-zdvsr_openshift-operators_578f637c-c2d8-46be-9838-f2a0b587b0c6_0(7f69de3cdde0ecb1e064d694898a925e86e1877563b19d4c2fb1cb2e29a341d4): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/observability-operator-d8bb48f5d-zdvsr" Dec 11 08:26:38 crc kubenswrapper[4881]: E1211 08:26:38.035129 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"observability-operator-d8bb48f5d-zdvsr_openshift-operators(578f637c-c2d8-46be-9838-f2a0b587b0c6)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"observability-operator-d8bb48f5d-zdvsr_openshift-operators(578f637c-c2d8-46be-9838-f2a0b587b0c6)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_observability-operator-d8bb48f5d-zdvsr_openshift-operators_578f637c-c2d8-46be-9838-f2a0b587b0c6_0(7f69de3cdde0ecb1e064d694898a925e86e1877563b19d4c2fb1cb2e29a341d4): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/observability-operator-d8bb48f5d-zdvsr" podUID="578f637c-c2d8-46be-9838-f2a0b587b0c6" Dec 11 08:26:38 crc kubenswrapper[4881]: E1211 08:26:38.040396 4881 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-77767bb68c-7w7p7_openshift-operators_6e8d1991-85d6-4f79-8233-98a8c9be9b32_0(7afb5b1d4277454040037a2a075d7e07609dc57bd0aa28827572010dc778043d): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Dec 11 08:26:38 crc kubenswrapper[4881]: E1211 08:26:38.040427 4881 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-77767bb68c-7w7p7_openshift-operators_6e8d1991-85d6-4f79-8233-98a8c9be9b32_0(7afb5b1d4277454040037a2a075d7e07609dc57bd0aa28827572010dc778043d): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-admission-webhook-77767bb68c-7w7p7" Dec 11 08:26:38 crc kubenswrapper[4881]: E1211 08:26:38.040444 4881 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-77767bb68c-7w7p7_openshift-operators_6e8d1991-85d6-4f79-8233-98a8c9be9b32_0(7afb5b1d4277454040037a2a075d7e07609dc57bd0aa28827572010dc778043d): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-admission-webhook-77767bb68c-7w7p7" Dec 11 08:26:38 crc kubenswrapper[4881]: E1211 08:26:38.040483 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"obo-prometheus-operator-admission-webhook-77767bb68c-7w7p7_openshift-operators(6e8d1991-85d6-4f79-8233-98a8c9be9b32)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"obo-prometheus-operator-admission-webhook-77767bb68c-7w7p7_openshift-operators(6e8d1991-85d6-4f79-8233-98a8c9be9b32)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-77767bb68c-7w7p7_openshift-operators_6e8d1991-85d6-4f79-8233-98a8c9be9b32_0(7afb5b1d4277454040037a2a075d7e07609dc57bd0aa28827572010dc778043d): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/obo-prometheus-operator-admission-webhook-77767bb68c-7w7p7" podUID="6e8d1991-85d6-4f79-8233-98a8c9be9b32" Dec 11 08:26:42 crc kubenswrapper[4881]: I1211 08:26:42.005309 4881 scope.go:117] "RemoveContainer" containerID="021945edb3416828d6a387f2de7474bbde198cdef1eb1a9aea5de0cd3699a72a" Dec 11 08:26:43 crc kubenswrapper[4881]: I1211 08:26:43.642049 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-g8jhd_368e635e-0e63-4202-b9e4-4a3a85c6f30c/kube-multus/2.log" Dec 11 08:26:43 crc kubenswrapper[4881]: I1211 08:26:43.642448 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-g8jhd" event={"ID":"368e635e-0e63-4202-b9e4-4a3a85c6f30c","Type":"ContainerStarted","Data":"87098c266545303049d9a876ba7396791674d24878b7c6929dcb47ea413d62c4"} Dec 11 08:26:44 crc kubenswrapper[4881]: I1211 08:26:44.249757 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-jn7gc" Dec 11 08:26:49 crc kubenswrapper[4881]: I1211 08:26:49.004964 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/perses-operator-5446b9c989-clg8t" Dec 11 08:26:49 crc kubenswrapper[4881]: I1211 08:26:49.005097 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-qwshf" Dec 11 08:26:49 crc kubenswrapper[4881]: I1211 08:26:49.005117 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-77767bb68c-7w7p7" Dec 11 08:26:49 crc kubenswrapper[4881]: I1211 08:26:49.005918 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/perses-operator-5446b9c989-clg8t" Dec 11 08:26:49 crc kubenswrapper[4881]: I1211 08:26:49.006222 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-77767bb68c-7w7p7" Dec 11 08:26:49 crc kubenswrapper[4881]: I1211 08:26:49.006223 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-qwshf" Dec 11 08:26:49 crc kubenswrapper[4881]: I1211 08:26:49.345667 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/perses-operator-5446b9c989-clg8t"] Dec 11 08:26:49 crc kubenswrapper[4881]: I1211 08:26:49.681020 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/perses-operator-5446b9c989-clg8t" event={"ID":"8df2f7b3-931a-4e09-b473-f71d8ee210d8","Type":"ContainerStarted","Data":"8e00cb3a2985a35c0ed2ec7af6535ea7d962e8ea66860d045761d60164ac7984"} Dec 11 08:26:49 crc kubenswrapper[4881]: I1211 08:26:49.686101 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-668cf9dfbb-qwshf"] Dec 11 08:26:49 crc kubenswrapper[4881]: W1211 08:26:49.692271 4881 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod518448cb_2c51_4398_a75a_3d2c0d26905e.slice/crio-312ce6199859e1f50e52519371d0ceac5dc5cb8fb6322a6e94e231bd91f60272 WatchSource:0}: Error finding container 312ce6199859e1f50e52519371d0ceac5dc5cb8fb6322a6e94e231bd91f60272: Status 404 returned error can't find the container with id 312ce6199859e1f50e52519371d0ceac5dc5cb8fb6322a6e94e231bd91f60272 Dec 11 08:26:49 crc kubenswrapper[4881]: I1211 08:26:49.704928 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-77767bb68c-7w7p7"] Dec 11 08:26:49 crc kubenswrapper[4881]: W1211 08:26:49.712799 4881 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6e8d1991_85d6_4f79_8233_98a8c9be9b32.slice/crio-2182f97d5f24955178bd977bfd4ba66822d6575efc2768742aa252113e1c5eb4 WatchSource:0}: Error finding container 2182f97d5f24955178bd977bfd4ba66822d6575efc2768742aa252113e1c5eb4: Status 404 returned error can't find the container with id 2182f97d5f24955178bd977bfd4ba66822d6575efc2768742aa252113e1c5eb4 Dec 11 08:26:50 crc kubenswrapper[4881]: I1211 08:26:50.004932 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-77767bb68c-n4z89" Dec 11 08:26:50 crc kubenswrapper[4881]: I1211 08:26:50.005445 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-77767bb68c-n4z89" Dec 11 08:26:50 crc kubenswrapper[4881]: I1211 08:26:50.688290 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-qwshf" event={"ID":"518448cb-2c51-4398-a75a-3d2c0d26905e","Type":"ContainerStarted","Data":"312ce6199859e1f50e52519371d0ceac5dc5cb8fb6322a6e94e231bd91f60272"} Dec 11 08:26:50 crc kubenswrapper[4881]: I1211 08:26:50.689563 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-admission-webhook-77767bb68c-7w7p7" event={"ID":"6e8d1991-85d6-4f79-8233-98a8c9be9b32","Type":"ContainerStarted","Data":"2182f97d5f24955178bd977bfd4ba66822d6575efc2768742aa252113e1c5eb4"} Dec 11 08:26:50 crc kubenswrapper[4881]: I1211 08:26:50.855679 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-77767bb68c-n4z89"] Dec 11 08:26:50 crc kubenswrapper[4881]: W1211 08:26:50.878702 4881 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod91763e1b_8187_4c07_be69_34a7330afb73.slice/crio-56e80c67a28a5b6b3afcca32dc94ea2f0f8e57160fd281ebbd1671efd3ca4d09 WatchSource:0}: Error finding container 56e80c67a28a5b6b3afcca32dc94ea2f0f8e57160fd281ebbd1671efd3ca4d09: Status 404 returned error can't find the container with id 56e80c67a28a5b6b3afcca32dc94ea2f0f8e57160fd281ebbd1671efd3ca4d09 Dec 11 08:26:51 crc kubenswrapper[4881]: I1211 08:26:51.696359 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-admission-webhook-77767bb68c-n4z89" event={"ID":"91763e1b-8187-4c07-be69-34a7330afb73","Type":"ContainerStarted","Data":"56e80c67a28a5b6b3afcca32dc94ea2f0f8e57160fd281ebbd1671efd3ca4d09"} Dec 11 08:26:52 crc kubenswrapper[4881]: I1211 08:26:52.004987 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-operator-d8bb48f5d-zdvsr" Dec 11 08:26:52 crc kubenswrapper[4881]: I1211 08:26:52.006047 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-operator-d8bb48f5d-zdvsr" Dec 11 08:26:52 crc kubenswrapper[4881]: I1211 08:26:52.395300 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/observability-operator-d8bb48f5d-zdvsr"] Dec 11 08:26:52 crc kubenswrapper[4881]: I1211 08:26:52.705581 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/observability-operator-d8bb48f5d-zdvsr" event={"ID":"578f637c-c2d8-46be-9838-f2a0b587b0c6","Type":"ContainerStarted","Data":"bfd98dbc960e5d255a8bd39282d721f91e489ba1dd2112bac0913f99591e0158"} Dec 11 08:27:04 crc kubenswrapper[4881]: E1211 08:27:04.636066 4881 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/cluster-observability-operator/perses-rhel9-operator@sha256:9aec4c328ec43e40481e06ca5808deead74b75c0aacb90e9e72966c3fa14f385" Dec 11 08:27:04 crc kubenswrapper[4881]: E1211 08:27:04.636753 4881 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:perses-operator,Image:registry.redhat.io/cluster-observability-operator/perses-rhel9-operator@sha256:9aec4c328ec43e40481e06ca5808deead74b75c0aacb90e9e72966c3fa14f385,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:OPERATOR_CONDITION_NAME,Value:cluster-observability-operator.v1.3.0,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{100 -3} {} 100m DecimalSI},memory: {{134217728 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:openshift-service-ca,ReadOnly:true,MountPath:/ca,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-8hqmn,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000350000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod perses-operator-5446b9c989-clg8t_openshift-operators(8df2f7b3-931a-4e09-b473-f71d8ee210d8): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Dec 11 08:27:04 crc kubenswrapper[4881]: E1211 08:27:04.637987 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"perses-operator\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-operators/perses-operator-5446b9c989-clg8t" podUID="8df2f7b3-931a-4e09-b473-f71d8ee210d8" Dec 11 08:27:04 crc kubenswrapper[4881]: E1211 08:27:04.674537 4881 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/cluster-observability-operator/obo-prometheus-operator-admission-webhook-rhel9@sha256:43d33f0125e6b990f4a972ac4e952a065d7e72dc1690c6c836963b7341734aec" Dec 11 08:27:04 crc kubenswrapper[4881]: E1211 08:27:04.674832 4881 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:prometheus-operator-admission-webhook,Image:registry.redhat.io/cluster-observability-operator/obo-prometheus-operator-admission-webhook-rhel9@sha256:43d33f0125e6b990f4a972ac4e952a065d7e72dc1690c6c836963b7341734aec,Command:[],Args:[--web.enable-tls=true --web.cert-file=/tmp/k8s-webhook-server/serving-certs/tls.crt --web.key-file=/tmp/k8s-webhook-server/serving-certs/tls.key],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:OPERATOR_CONDITION_NAME,Value:cluster-observability-operator.v1.3.0,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{200 -3} {} 200m DecimalSI},memory: {{209715200 0} {} BinarySI},},Requests:ResourceList{cpu: {{50 -3} {} 50m DecimalSI},memory: {{52428800 0} {} 50Mi BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:apiservice-cert,ReadOnly:false,MountPath:/apiserver.local.config/certificates,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:webhook-cert,ReadOnly:false,MountPath:/tmp/k8s-webhook-server/serving-certs,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:*true,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod obo-prometheus-operator-admission-webhook-77767bb68c-7w7p7_openshift-operators(6e8d1991-85d6-4f79-8233-98a8c9be9b32): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Dec 11 08:27:04 crc kubenswrapper[4881]: E1211 08:27:04.676088 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"prometheus-operator-admission-webhook\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-operators/obo-prometheus-operator-admission-webhook-77767bb68c-7w7p7" podUID="6e8d1991-85d6-4f79-8233-98a8c9be9b32" Dec 11 08:27:04 crc kubenswrapper[4881]: E1211 08:27:04.841073 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"perses-operator\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/cluster-observability-operator/perses-rhel9-operator@sha256:9aec4c328ec43e40481e06ca5808deead74b75c0aacb90e9e72966c3fa14f385\\\"\"" pod="openshift-operators/perses-operator-5446b9c989-clg8t" podUID="8df2f7b3-931a-4e09-b473-f71d8ee210d8" Dec 11 08:27:04 crc kubenswrapper[4881]: E1211 08:27:04.841965 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"prometheus-operator-admission-webhook\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/cluster-observability-operator/obo-prometheus-operator-admission-webhook-rhel9@sha256:43d33f0125e6b990f4a972ac4e952a065d7e72dc1690c6c836963b7341734aec\\\"\"" pod="openshift-operators/obo-prometheus-operator-admission-webhook-77767bb68c-7w7p7" podUID="6e8d1991-85d6-4f79-8233-98a8c9be9b32" Dec 11 08:27:07 crc kubenswrapper[4881]: I1211 08:27:07.864706 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/observability-operator-d8bb48f5d-zdvsr" event={"ID":"578f637c-c2d8-46be-9838-f2a0b587b0c6","Type":"ContainerStarted","Data":"91a3eba366f4afa644f685cb85d2e89f583793e217eca6972066a084974c037e"} Dec 11 08:27:07 crc kubenswrapper[4881]: I1211 08:27:07.865063 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operators/observability-operator-d8bb48f5d-zdvsr" Dec 11 08:27:07 crc kubenswrapper[4881]: I1211 08:27:07.866788 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-admission-webhook-77767bb68c-n4z89" event={"ID":"91763e1b-8187-4c07-be69-34a7330afb73","Type":"ContainerStarted","Data":"32685f2e0f45997504855d0056b5fcd10cafdc43201532d298a501e6234d3245"} Dec 11 08:27:07 crc kubenswrapper[4881]: I1211 08:27:07.868926 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-qwshf" event={"ID":"518448cb-2c51-4398-a75a-3d2c0d26905e","Type":"ContainerStarted","Data":"b3472f02ac4e07cd8696fa5118033461d8c0636a4a4986b3f7167ad80a4398a4"} Dec 11 08:27:07 crc kubenswrapper[4881]: I1211 08:27:07.887243 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/observability-operator-d8bb48f5d-zdvsr" podStartSLOduration=33.531629158 podStartE2EDuration="47.887215s" podCreationTimestamp="2025-12-11 08:26:20 +0000 UTC" firstStartedPulling="2025-12-11 08:26:52.407051036 +0000 UTC m=+660.784419733" lastFinishedPulling="2025-12-11 08:27:06.762636878 +0000 UTC m=+675.140005575" observedRunningTime="2025-12-11 08:27:07.882010969 +0000 UTC m=+676.259379676" watchObservedRunningTime="2025-12-11 08:27:07.887215 +0000 UTC m=+676.264583697" Dec 11 08:27:07 crc kubenswrapper[4881]: I1211 08:27:07.892360 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operators/observability-operator-d8bb48f5d-zdvsr" Dec 11 08:27:07 crc kubenswrapper[4881]: I1211 08:27:07.901232 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-qwshf" podStartSLOduration=30.875555083 podStartE2EDuration="47.90121048s" podCreationTimestamp="2025-12-11 08:26:20 +0000 UTC" firstStartedPulling="2025-12-11 08:26:49.695447481 +0000 UTC m=+658.072816178" lastFinishedPulling="2025-12-11 08:27:06.721102878 +0000 UTC m=+675.098471575" observedRunningTime="2025-12-11 08:27:07.897306492 +0000 UTC m=+676.274675189" watchObservedRunningTime="2025-12-11 08:27:07.90121048 +0000 UTC m=+676.278579177" Dec 11 08:27:07 crc kubenswrapper[4881]: I1211 08:27:07.914809 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/obo-prometheus-operator-admission-webhook-77767bb68c-n4z89" podStartSLOduration=32.079238337 podStartE2EDuration="47.91478365s" podCreationTimestamp="2025-12-11 08:26:20 +0000 UTC" firstStartedPulling="2025-12-11 08:26:50.885127574 +0000 UTC m=+659.262496271" lastFinishedPulling="2025-12-11 08:27:06.720672887 +0000 UTC m=+675.098041584" observedRunningTime="2025-12-11 08:27:07.911378325 +0000 UTC m=+676.288747032" watchObservedRunningTime="2025-12-11 08:27:07.91478365 +0000 UTC m=+676.292152357" Dec 11 08:27:16 crc kubenswrapper[4881]: I1211 08:27:16.046944 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-cainjector-7f985d654d-m6cfc"] Dec 11 08:27:16 crc kubenswrapper[4881]: I1211 08:27:16.048311 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-cainjector-7f985d654d-m6cfc" Dec 11 08:27:16 crc kubenswrapper[4881]: I1211 08:27:16.053808 4881 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-cainjector-dockercfg-5dzzg" Dec 11 08:27:16 crc kubenswrapper[4881]: I1211 08:27:16.054400 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-5b446d88c5-9wlsj"] Dec 11 08:27:16 crc kubenswrapper[4881]: I1211 08:27:16.054567 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager"/"openshift-service-ca.crt" Dec 11 08:27:16 crc kubenswrapper[4881]: I1211 08:27:16.055028 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager"/"kube-root-ca.crt" Dec 11 08:27:16 crc kubenswrapper[4881]: I1211 08:27:16.055366 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-5b446d88c5-9wlsj" Dec 11 08:27:16 crc kubenswrapper[4881]: I1211 08:27:16.057597 4881 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-dockercfg-4j9nw" Dec 11 08:27:16 crc kubenswrapper[4881]: I1211 08:27:16.067838 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-5b446d88c5-9wlsj"] Dec 11 08:27:16 crc kubenswrapper[4881]: I1211 08:27:16.083074 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-webhook-5655c58dd6-k8mbh"] Dec 11 08:27:16 crc kubenswrapper[4881]: I1211 08:27:16.084381 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-webhook-5655c58dd6-k8mbh" Dec 11 08:27:16 crc kubenswrapper[4881]: I1211 08:27:16.086225 4881 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-webhook-dockercfg-tv24h" Dec 11 08:27:16 crc kubenswrapper[4881]: I1211 08:27:16.100793 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-cainjector-7f985d654d-m6cfc"] Dec 11 08:27:16 crc kubenswrapper[4881]: I1211 08:27:16.106793 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-webhook-5655c58dd6-k8mbh"] Dec 11 08:27:16 crc kubenswrapper[4881]: I1211 08:27:16.148869 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c7lh8\" (UniqueName: \"kubernetes.io/projected/e18d3098-b003-42c6-bba8-0fdeff9222d4-kube-api-access-c7lh8\") pod \"cert-manager-cainjector-7f985d654d-m6cfc\" (UID: \"e18d3098-b003-42c6-bba8-0fdeff9222d4\") " pod="cert-manager/cert-manager-cainjector-7f985d654d-m6cfc" Dec 11 08:27:16 crc kubenswrapper[4881]: I1211 08:27:16.250506 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fkgqk\" (UniqueName: \"kubernetes.io/projected/b5ad6193-6ecc-4146-ba6a-b16704219c0b-kube-api-access-fkgqk\") pod \"cert-manager-webhook-5655c58dd6-k8mbh\" (UID: \"b5ad6193-6ecc-4146-ba6a-b16704219c0b\") " pod="cert-manager/cert-manager-webhook-5655c58dd6-k8mbh" Dec 11 08:27:16 crc kubenswrapper[4881]: I1211 08:27:16.250617 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v6p72\" (UniqueName: \"kubernetes.io/projected/39798ebd-f640-4134-881a-1fc8aae8caf2-kube-api-access-v6p72\") pod \"cert-manager-5b446d88c5-9wlsj\" (UID: \"39798ebd-f640-4134-881a-1fc8aae8caf2\") " pod="cert-manager/cert-manager-5b446d88c5-9wlsj" Dec 11 08:27:16 crc kubenswrapper[4881]: I1211 08:27:16.250648 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c7lh8\" (UniqueName: \"kubernetes.io/projected/e18d3098-b003-42c6-bba8-0fdeff9222d4-kube-api-access-c7lh8\") pod \"cert-manager-cainjector-7f985d654d-m6cfc\" (UID: \"e18d3098-b003-42c6-bba8-0fdeff9222d4\") " pod="cert-manager/cert-manager-cainjector-7f985d654d-m6cfc" Dec 11 08:27:16 crc kubenswrapper[4881]: I1211 08:27:16.269393 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c7lh8\" (UniqueName: \"kubernetes.io/projected/e18d3098-b003-42c6-bba8-0fdeff9222d4-kube-api-access-c7lh8\") pod \"cert-manager-cainjector-7f985d654d-m6cfc\" (UID: \"e18d3098-b003-42c6-bba8-0fdeff9222d4\") " pod="cert-manager/cert-manager-cainjector-7f985d654d-m6cfc" Dec 11 08:27:16 crc kubenswrapper[4881]: I1211 08:27:16.352153 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fkgqk\" (UniqueName: \"kubernetes.io/projected/b5ad6193-6ecc-4146-ba6a-b16704219c0b-kube-api-access-fkgqk\") pod \"cert-manager-webhook-5655c58dd6-k8mbh\" (UID: \"b5ad6193-6ecc-4146-ba6a-b16704219c0b\") " pod="cert-manager/cert-manager-webhook-5655c58dd6-k8mbh" Dec 11 08:27:16 crc kubenswrapper[4881]: I1211 08:27:16.352245 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v6p72\" (UniqueName: \"kubernetes.io/projected/39798ebd-f640-4134-881a-1fc8aae8caf2-kube-api-access-v6p72\") pod \"cert-manager-5b446d88c5-9wlsj\" (UID: \"39798ebd-f640-4134-881a-1fc8aae8caf2\") " pod="cert-manager/cert-manager-5b446d88c5-9wlsj" Dec 11 08:27:16 crc kubenswrapper[4881]: I1211 08:27:16.372714 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fkgqk\" (UniqueName: \"kubernetes.io/projected/b5ad6193-6ecc-4146-ba6a-b16704219c0b-kube-api-access-fkgqk\") pod \"cert-manager-webhook-5655c58dd6-k8mbh\" (UID: \"b5ad6193-6ecc-4146-ba6a-b16704219c0b\") " pod="cert-manager/cert-manager-webhook-5655c58dd6-k8mbh" Dec 11 08:27:16 crc kubenswrapper[4881]: I1211 08:27:16.376237 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-cainjector-7f985d654d-m6cfc" Dec 11 08:27:16 crc kubenswrapper[4881]: I1211 08:27:16.382119 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v6p72\" (UniqueName: \"kubernetes.io/projected/39798ebd-f640-4134-881a-1fc8aae8caf2-kube-api-access-v6p72\") pod \"cert-manager-5b446d88c5-9wlsj\" (UID: \"39798ebd-f640-4134-881a-1fc8aae8caf2\") " pod="cert-manager/cert-manager-5b446d88c5-9wlsj" Dec 11 08:27:16 crc kubenswrapper[4881]: I1211 08:27:16.387775 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-5b446d88c5-9wlsj" Dec 11 08:27:16 crc kubenswrapper[4881]: I1211 08:27:16.399689 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-webhook-5655c58dd6-k8mbh" Dec 11 08:27:16 crc kubenswrapper[4881]: I1211 08:27:16.790617 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-webhook-5655c58dd6-k8mbh"] Dec 11 08:27:16 crc kubenswrapper[4881]: W1211 08:27:16.881226 4881 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode18d3098_b003_42c6_bba8_0fdeff9222d4.slice/crio-5773fcf4ee35c83b77473cd11874fbc44b4c7824304fc4dc2d9f3bf18107ab95 WatchSource:0}: Error finding container 5773fcf4ee35c83b77473cd11874fbc44b4c7824304fc4dc2d9f3bf18107ab95: Status 404 returned error can't find the container with id 5773fcf4ee35c83b77473cd11874fbc44b4c7824304fc4dc2d9f3bf18107ab95 Dec 11 08:27:16 crc kubenswrapper[4881]: I1211 08:27:16.887264 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-cainjector-7f985d654d-m6cfc"] Dec 11 08:27:16 crc kubenswrapper[4881]: I1211 08:27:16.892081 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-5b446d88c5-9wlsj"] Dec 11 08:27:16 crc kubenswrapper[4881]: I1211 08:27:16.922810 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-cainjector-7f985d654d-m6cfc" event={"ID":"e18d3098-b003-42c6-bba8-0fdeff9222d4","Type":"ContainerStarted","Data":"5773fcf4ee35c83b77473cd11874fbc44b4c7824304fc4dc2d9f3bf18107ab95"} Dec 11 08:27:16 crc kubenswrapper[4881]: I1211 08:27:16.923836 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-webhook-5655c58dd6-k8mbh" event={"ID":"b5ad6193-6ecc-4146-ba6a-b16704219c0b","Type":"ContainerStarted","Data":"429c3899c2765d48c1f088b536a9a75e9547c1009704c137bd9e869c9600b12c"} Dec 11 08:27:16 crc kubenswrapper[4881]: I1211 08:27:16.924750 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-5b446d88c5-9wlsj" event={"ID":"39798ebd-f640-4134-881a-1fc8aae8caf2","Type":"ContainerStarted","Data":"ac5bab671a5627c49d0643f26872049536eef7b43e95962db115e97f07e14385"} Dec 11 08:27:21 crc kubenswrapper[4881]: I1211 08:27:21.955921 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-admission-webhook-77767bb68c-7w7p7" event={"ID":"6e8d1991-85d6-4f79-8233-98a8c9be9b32","Type":"ContainerStarted","Data":"e2f86e2f691372e4a2c7066a820b4e16982736077502331b10149f3faeea6967"} Dec 11 08:27:21 crc kubenswrapper[4881]: I1211 08:27:21.974561 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/obo-prometheus-operator-admission-webhook-77767bb68c-7w7p7" podStartSLOduration=-9223371974.88024 podStartE2EDuration="1m1.974536944s" podCreationTimestamp="2025-12-11 08:26:20 +0000 UTC" firstStartedPulling="2025-12-11 08:26:49.717041453 +0000 UTC m=+658.094410150" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 08:27:21.97319143 +0000 UTC m=+690.350560137" watchObservedRunningTime="2025-12-11 08:27:21.974536944 +0000 UTC m=+690.351905641" Dec 11 08:27:23 crc kubenswrapper[4881]: I1211 08:27:23.973110 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/perses-operator-5446b9c989-clg8t" event={"ID":"8df2f7b3-931a-4e09-b473-f71d8ee210d8","Type":"ContainerStarted","Data":"efb096100ec30d061a4422e9568ffc3dfbf7e4c06556bb2cea03a77e23496c70"} Dec 11 08:27:23 crc kubenswrapper[4881]: I1211 08:27:23.974045 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operators/perses-operator-5446b9c989-clg8t" Dec 11 08:27:23 crc kubenswrapper[4881]: I1211 08:27:23.996114 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/perses-operator-5446b9c989-clg8t" podStartSLOduration=29.629014668 podStartE2EDuration="1m3.996092868s" podCreationTimestamp="2025-12-11 08:26:20 +0000 UTC" firstStartedPulling="2025-12-11 08:26:49.35596234 +0000 UTC m=+657.733331047" lastFinishedPulling="2025-12-11 08:27:23.72304055 +0000 UTC m=+692.100409247" observedRunningTime="2025-12-11 08:27:23.995961774 +0000 UTC m=+692.373330471" watchObservedRunningTime="2025-12-11 08:27:23.996092868 +0000 UTC m=+692.373461565" Dec 11 08:27:31 crc kubenswrapper[4881]: I1211 08:27:31.199988 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operators/perses-operator-5446b9c989-clg8t" Dec 11 08:27:32 crc kubenswrapper[4881]: I1211 08:27:32.027422 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-webhook-5655c58dd6-k8mbh" event={"ID":"b5ad6193-6ecc-4146-ba6a-b16704219c0b","Type":"ContainerStarted","Data":"eb82629f2ab8805674482d2e0d41027159c00d1051db7678655421c39fc7f79f"} Dec 11 08:27:32 crc kubenswrapper[4881]: I1211 08:27:32.027738 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="cert-manager/cert-manager-webhook-5655c58dd6-k8mbh" Dec 11 08:27:32 crc kubenswrapper[4881]: I1211 08:27:32.029160 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-cainjector-7f985d654d-m6cfc" event={"ID":"e18d3098-b003-42c6-bba8-0fdeff9222d4","Type":"ContainerStarted","Data":"0d816934119253c00a1339d5a26a5d655d57310adf8116e438ff1da3245f0399"} Dec 11 08:27:32 crc kubenswrapper[4881]: I1211 08:27:32.049882 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-webhook-5655c58dd6-k8mbh" podStartSLOduration=1.576046357 podStartE2EDuration="16.0498635s" podCreationTimestamp="2025-12-11 08:27:16 +0000 UTC" firstStartedPulling="2025-12-11 08:27:16.801897941 +0000 UTC m=+685.179266638" lastFinishedPulling="2025-12-11 08:27:31.275715084 +0000 UTC m=+699.653083781" observedRunningTime="2025-12-11 08:27:32.042121957 +0000 UTC m=+700.419490654" watchObservedRunningTime="2025-12-11 08:27:32.0498635 +0000 UTC m=+700.427232197" Dec 11 08:27:32 crc kubenswrapper[4881]: I1211 08:27:32.063426 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-cainjector-7f985d654d-m6cfc" podStartSLOduration=1.754054495 podStartE2EDuration="16.06340998s" podCreationTimestamp="2025-12-11 08:27:16 +0000 UTC" firstStartedPulling="2025-12-11 08:27:16.883094784 +0000 UTC m=+685.260463481" lastFinishedPulling="2025-12-11 08:27:31.192450269 +0000 UTC m=+699.569818966" observedRunningTime="2025-12-11 08:27:32.061392028 +0000 UTC m=+700.438760725" watchObservedRunningTime="2025-12-11 08:27:32.06340998 +0000 UTC m=+700.440778677" Dec 11 08:27:33 crc kubenswrapper[4881]: I1211 08:27:33.036452 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-5b446d88c5-9wlsj" event={"ID":"39798ebd-f640-4134-881a-1fc8aae8caf2","Type":"ContainerStarted","Data":"4e214998eb8b431c8416a8b87f49a01e9e99367bad4e4b21c3f1e0812046c620"} Dec 11 08:27:33 crc kubenswrapper[4881]: I1211 08:27:33.055377 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-5b446d88c5-9wlsj" podStartSLOduration=1.538929017 podStartE2EDuration="17.055355299s" podCreationTimestamp="2025-12-11 08:27:16 +0000 UTC" firstStartedPulling="2025-12-11 08:27:16.893344531 +0000 UTC m=+685.270713228" lastFinishedPulling="2025-12-11 08:27:32.409770803 +0000 UTC m=+700.787139510" observedRunningTime="2025-12-11 08:27:33.049986666 +0000 UTC m=+701.427355363" watchObservedRunningTime="2025-12-11 08:27:33.055355299 +0000 UTC m=+701.432724226" Dec 11 08:27:36 crc kubenswrapper[4881]: I1211 08:27:36.402036 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="cert-manager/cert-manager-webhook-5655c58dd6-k8mbh" Dec 11 08:27:59 crc kubenswrapper[4881]: I1211 08:27:59.171256 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463f2wqng"] Dec 11 08:27:59 crc kubenswrapper[4881]: I1211 08:27:59.173033 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463f2wqng" Dec 11 08:27:59 crc kubenswrapper[4881]: I1211 08:27:59.176175 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Dec 11 08:27:59 crc kubenswrapper[4881]: I1211 08:27:59.182848 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463f2wqng"] Dec 11 08:27:59 crc kubenswrapper[4881]: I1211 08:27:59.269179 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/b1985fae-8a44-4865-b0bc-7d9e8d197a02-bundle\") pod \"a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463f2wqng\" (UID: \"b1985fae-8a44-4865-b0bc-7d9e8d197a02\") " pod="openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463f2wqng" Dec 11 08:27:59 crc kubenswrapper[4881]: I1211 08:27:59.269382 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/b1985fae-8a44-4865-b0bc-7d9e8d197a02-util\") pod \"a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463f2wqng\" (UID: \"b1985fae-8a44-4865-b0bc-7d9e8d197a02\") " pod="openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463f2wqng" Dec 11 08:27:59 crc kubenswrapper[4881]: I1211 08:27:59.269436 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pdfwb\" (UniqueName: \"kubernetes.io/projected/b1985fae-8a44-4865-b0bc-7d9e8d197a02-kube-api-access-pdfwb\") pod \"a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463f2wqng\" (UID: \"b1985fae-8a44-4865-b0bc-7d9e8d197a02\") " pod="openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463f2wqng" Dec 11 08:27:59 crc kubenswrapper[4881]: I1211 08:27:59.370277 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/b1985fae-8a44-4865-b0bc-7d9e8d197a02-util\") pod \"a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463f2wqng\" (UID: \"b1985fae-8a44-4865-b0bc-7d9e8d197a02\") " pod="openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463f2wqng" Dec 11 08:27:59 crc kubenswrapper[4881]: I1211 08:27:59.370366 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pdfwb\" (UniqueName: \"kubernetes.io/projected/b1985fae-8a44-4865-b0bc-7d9e8d197a02-kube-api-access-pdfwb\") pod \"a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463f2wqng\" (UID: \"b1985fae-8a44-4865-b0bc-7d9e8d197a02\") " pod="openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463f2wqng" Dec 11 08:27:59 crc kubenswrapper[4881]: I1211 08:27:59.370411 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/b1985fae-8a44-4865-b0bc-7d9e8d197a02-bundle\") pod \"a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463f2wqng\" (UID: \"b1985fae-8a44-4865-b0bc-7d9e8d197a02\") " pod="openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463f2wqng" Dec 11 08:27:59 crc kubenswrapper[4881]: I1211 08:27:59.371259 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/b1985fae-8a44-4865-b0bc-7d9e8d197a02-bundle\") pod \"a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463f2wqng\" (UID: \"b1985fae-8a44-4865-b0bc-7d9e8d197a02\") " pod="openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463f2wqng" Dec 11 08:27:59 crc kubenswrapper[4881]: I1211 08:27:59.371774 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/b1985fae-8a44-4865-b0bc-7d9e8d197a02-util\") pod \"a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463f2wqng\" (UID: \"b1985fae-8a44-4865-b0bc-7d9e8d197a02\") " pod="openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463f2wqng" Dec 11 08:27:59 crc kubenswrapper[4881]: I1211 08:27:59.379975 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8sbtpr"] Dec 11 08:27:59 crc kubenswrapper[4881]: I1211 08:27:59.383819 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8sbtpr" Dec 11 08:27:59 crc kubenswrapper[4881]: I1211 08:27:59.388006 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8sbtpr"] Dec 11 08:27:59 crc kubenswrapper[4881]: I1211 08:27:59.412821 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pdfwb\" (UniqueName: \"kubernetes.io/projected/b1985fae-8a44-4865-b0bc-7d9e8d197a02-kube-api-access-pdfwb\") pod \"a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463f2wqng\" (UID: \"b1985fae-8a44-4865-b0bc-7d9e8d197a02\") " pod="openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463f2wqng" Dec 11 08:27:59 crc kubenswrapper[4881]: I1211 08:27:59.471157 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/e703c073-e40b-4293-9138-648e3d24c648-bundle\") pod \"4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8sbtpr\" (UID: \"e703c073-e40b-4293-9138-648e3d24c648\") " pod="openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8sbtpr" Dec 11 08:27:59 crc kubenswrapper[4881]: I1211 08:27:59.471557 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d4fh2\" (UniqueName: \"kubernetes.io/projected/e703c073-e40b-4293-9138-648e3d24c648-kube-api-access-d4fh2\") pod \"4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8sbtpr\" (UID: \"e703c073-e40b-4293-9138-648e3d24c648\") " pod="openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8sbtpr" Dec 11 08:27:59 crc kubenswrapper[4881]: I1211 08:27:59.471711 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/e703c073-e40b-4293-9138-648e3d24c648-util\") pod \"4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8sbtpr\" (UID: \"e703c073-e40b-4293-9138-648e3d24c648\") " pod="openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8sbtpr" Dec 11 08:27:59 crc kubenswrapper[4881]: I1211 08:27:59.500424 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463f2wqng" Dec 11 08:27:59 crc kubenswrapper[4881]: I1211 08:27:59.572592 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/e703c073-e40b-4293-9138-648e3d24c648-util\") pod \"4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8sbtpr\" (UID: \"e703c073-e40b-4293-9138-648e3d24c648\") " pod="openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8sbtpr" Dec 11 08:27:59 crc kubenswrapper[4881]: I1211 08:27:59.573040 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/e703c073-e40b-4293-9138-648e3d24c648-util\") pod \"4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8sbtpr\" (UID: \"e703c073-e40b-4293-9138-648e3d24c648\") " pod="openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8sbtpr" Dec 11 08:27:59 crc kubenswrapper[4881]: I1211 08:27:59.573564 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/e703c073-e40b-4293-9138-648e3d24c648-bundle\") pod \"4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8sbtpr\" (UID: \"e703c073-e40b-4293-9138-648e3d24c648\") " pod="openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8sbtpr" Dec 11 08:27:59 crc kubenswrapper[4881]: I1211 08:27:59.573770 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d4fh2\" (UniqueName: \"kubernetes.io/projected/e703c073-e40b-4293-9138-648e3d24c648-kube-api-access-d4fh2\") pod \"4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8sbtpr\" (UID: \"e703c073-e40b-4293-9138-648e3d24c648\") " pod="openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8sbtpr" Dec 11 08:27:59 crc kubenswrapper[4881]: I1211 08:27:59.574630 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/e703c073-e40b-4293-9138-648e3d24c648-bundle\") pod \"4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8sbtpr\" (UID: \"e703c073-e40b-4293-9138-648e3d24c648\") " pod="openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8sbtpr" Dec 11 08:27:59 crc kubenswrapper[4881]: I1211 08:27:59.606351 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d4fh2\" (UniqueName: \"kubernetes.io/projected/e703c073-e40b-4293-9138-648e3d24c648-kube-api-access-d4fh2\") pod \"4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8sbtpr\" (UID: \"e703c073-e40b-4293-9138-648e3d24c648\") " pod="openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8sbtpr" Dec 11 08:27:59 crc kubenswrapper[4881]: I1211 08:27:59.729822 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8sbtpr" Dec 11 08:27:59 crc kubenswrapper[4881]: I1211 08:27:59.972200 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463f2wqng"] Dec 11 08:28:00 crc kubenswrapper[4881]: I1211 08:28:00.034469 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8sbtpr"] Dec 11 08:28:00 crc kubenswrapper[4881]: W1211 08:28:00.044690 4881 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pode703c073_e40b_4293_9138_648e3d24c648.slice/crio-f79a050676e63b29093d75849a4fb9d2a4fe84951bbdb5bb114b3e567ac69631 WatchSource:0}: Error finding container f79a050676e63b29093d75849a4fb9d2a4fe84951bbdb5bb114b3e567ac69631: Status 404 returned error can't find the container with id f79a050676e63b29093d75849a4fb9d2a4fe84951bbdb5bb114b3e567ac69631 Dec 11 08:28:00 crc kubenswrapper[4881]: I1211 08:28:00.223042 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463f2wqng" event={"ID":"b1985fae-8a44-4865-b0bc-7d9e8d197a02","Type":"ContainerStarted","Data":"2e5ca11653b469a2c9295fb80db1f7b54f67906c1b78d971861639278f89bfb1"} Dec 11 08:28:00 crc kubenswrapper[4881]: I1211 08:28:00.224495 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8sbtpr" event={"ID":"e703c073-e40b-4293-9138-648e3d24c648","Type":"ContainerStarted","Data":"f79a050676e63b29093d75849a4fb9d2a4fe84951bbdb5bb114b3e567ac69631"} Dec 11 08:28:01 crc kubenswrapper[4881]: I1211 08:28:01.241098 4881 generic.go:334] "Generic (PLEG): container finished" podID="b1985fae-8a44-4865-b0bc-7d9e8d197a02" containerID="0797f246151d6e979090331439d8b042402f3793e5d00228e2232a9028eebab4" exitCode=0 Dec 11 08:28:01 crc kubenswrapper[4881]: I1211 08:28:01.241449 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463f2wqng" event={"ID":"b1985fae-8a44-4865-b0bc-7d9e8d197a02","Type":"ContainerDied","Data":"0797f246151d6e979090331439d8b042402f3793e5d00228e2232a9028eebab4"} Dec 11 08:28:01 crc kubenswrapper[4881]: I1211 08:28:01.246696 4881 generic.go:334] "Generic (PLEG): container finished" podID="e703c073-e40b-4293-9138-648e3d24c648" containerID="e675cfe00d0ebc0b528d2b9af352cb7ea51a7e4b57ef1b102e2dd74f3be1279e" exitCode=0 Dec 11 08:28:01 crc kubenswrapper[4881]: I1211 08:28:01.246818 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8sbtpr" event={"ID":"e703c073-e40b-4293-9138-648e3d24c648","Type":"ContainerDied","Data":"e675cfe00d0ebc0b528d2b9af352cb7ea51a7e4b57ef1b102e2dd74f3be1279e"} Dec 11 08:28:04 crc kubenswrapper[4881]: I1211 08:28:04.272030 4881 generic.go:334] "Generic (PLEG): container finished" podID="e703c073-e40b-4293-9138-648e3d24c648" containerID="760a2f0dc437de05c2640da04ec41d8d936c0613f8cdd62032fc409eb4423550" exitCode=0 Dec 11 08:28:04 crc kubenswrapper[4881]: I1211 08:28:04.272162 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8sbtpr" event={"ID":"e703c073-e40b-4293-9138-648e3d24c648","Type":"ContainerDied","Data":"760a2f0dc437de05c2640da04ec41d8d936c0613f8cdd62032fc409eb4423550"} Dec 11 08:28:04 crc kubenswrapper[4881]: I1211 08:28:04.274898 4881 generic.go:334] "Generic (PLEG): container finished" podID="b1985fae-8a44-4865-b0bc-7d9e8d197a02" containerID="a7c4194c8d4cac1e58d930b5005af04cb49eeabe96f796c5c3d110e7e09c93d6" exitCode=0 Dec 11 08:28:04 crc kubenswrapper[4881]: I1211 08:28:04.274941 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463f2wqng" event={"ID":"b1985fae-8a44-4865-b0bc-7d9e8d197a02","Type":"ContainerDied","Data":"a7c4194c8d4cac1e58d930b5005af04cb49eeabe96f796c5c3d110e7e09c93d6"} Dec 11 08:28:05 crc kubenswrapper[4881]: I1211 08:28:05.284308 4881 generic.go:334] "Generic (PLEG): container finished" podID="b1985fae-8a44-4865-b0bc-7d9e8d197a02" containerID="967316903ee0823e35a04b48aee1db059e08a7e32cce3a79bcb6f3f8f06f7957" exitCode=0 Dec 11 08:28:05 crc kubenswrapper[4881]: I1211 08:28:05.284393 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463f2wqng" event={"ID":"b1985fae-8a44-4865-b0bc-7d9e8d197a02","Type":"ContainerDied","Data":"967316903ee0823e35a04b48aee1db059e08a7e32cce3a79bcb6f3f8f06f7957"} Dec 11 08:28:05 crc kubenswrapper[4881]: I1211 08:28:05.287295 4881 generic.go:334] "Generic (PLEG): container finished" podID="e703c073-e40b-4293-9138-648e3d24c648" containerID="3ca4bc0018222334b4247639a21f8e99a7f7afccc432a34d7ec5ae4ae65d0a57" exitCode=0 Dec 11 08:28:05 crc kubenswrapper[4881]: I1211 08:28:05.287367 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8sbtpr" event={"ID":"e703c073-e40b-4293-9138-648e3d24c648","Type":"ContainerDied","Data":"3ca4bc0018222334b4247639a21f8e99a7f7afccc432a34d7ec5ae4ae65d0a57"} Dec 11 08:28:06 crc kubenswrapper[4881]: I1211 08:28:06.577980 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8sbtpr" Dec 11 08:28:06 crc kubenswrapper[4881]: I1211 08:28:06.583443 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463f2wqng" Dec 11 08:28:06 crc kubenswrapper[4881]: I1211 08:28:06.686877 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/b1985fae-8a44-4865-b0bc-7d9e8d197a02-bundle\") pod \"b1985fae-8a44-4865-b0bc-7d9e8d197a02\" (UID: \"b1985fae-8a44-4865-b0bc-7d9e8d197a02\") " Dec 11 08:28:06 crc kubenswrapper[4881]: I1211 08:28:06.686983 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/e703c073-e40b-4293-9138-648e3d24c648-util\") pod \"e703c073-e40b-4293-9138-648e3d24c648\" (UID: \"e703c073-e40b-4293-9138-648e3d24c648\") " Dec 11 08:28:06 crc kubenswrapper[4881]: I1211 08:28:06.687016 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/b1985fae-8a44-4865-b0bc-7d9e8d197a02-util\") pod \"b1985fae-8a44-4865-b0bc-7d9e8d197a02\" (UID: \"b1985fae-8a44-4865-b0bc-7d9e8d197a02\") " Dec 11 08:28:06 crc kubenswrapper[4881]: I1211 08:28:06.687053 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/e703c073-e40b-4293-9138-648e3d24c648-bundle\") pod \"e703c073-e40b-4293-9138-648e3d24c648\" (UID: \"e703c073-e40b-4293-9138-648e3d24c648\") " Dec 11 08:28:06 crc kubenswrapper[4881]: I1211 08:28:06.687108 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pdfwb\" (UniqueName: \"kubernetes.io/projected/b1985fae-8a44-4865-b0bc-7d9e8d197a02-kube-api-access-pdfwb\") pod \"b1985fae-8a44-4865-b0bc-7d9e8d197a02\" (UID: \"b1985fae-8a44-4865-b0bc-7d9e8d197a02\") " Dec 11 08:28:06 crc kubenswrapper[4881]: I1211 08:28:06.687183 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d4fh2\" (UniqueName: \"kubernetes.io/projected/e703c073-e40b-4293-9138-648e3d24c648-kube-api-access-d4fh2\") pod \"e703c073-e40b-4293-9138-648e3d24c648\" (UID: \"e703c073-e40b-4293-9138-648e3d24c648\") " Dec 11 08:28:06 crc kubenswrapper[4881]: I1211 08:28:06.688235 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e703c073-e40b-4293-9138-648e3d24c648-bundle" (OuterVolumeSpecName: "bundle") pod "e703c073-e40b-4293-9138-648e3d24c648" (UID: "e703c073-e40b-4293-9138-648e3d24c648"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 08:28:06 crc kubenswrapper[4881]: I1211 08:28:06.688291 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b1985fae-8a44-4865-b0bc-7d9e8d197a02-bundle" (OuterVolumeSpecName: "bundle") pod "b1985fae-8a44-4865-b0bc-7d9e8d197a02" (UID: "b1985fae-8a44-4865-b0bc-7d9e8d197a02"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 08:28:06 crc kubenswrapper[4881]: I1211 08:28:06.694604 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e703c073-e40b-4293-9138-648e3d24c648-kube-api-access-d4fh2" (OuterVolumeSpecName: "kube-api-access-d4fh2") pod "e703c073-e40b-4293-9138-648e3d24c648" (UID: "e703c073-e40b-4293-9138-648e3d24c648"). InnerVolumeSpecName "kube-api-access-d4fh2". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:28:06 crc kubenswrapper[4881]: I1211 08:28:06.694654 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b1985fae-8a44-4865-b0bc-7d9e8d197a02-kube-api-access-pdfwb" (OuterVolumeSpecName: "kube-api-access-pdfwb") pod "b1985fae-8a44-4865-b0bc-7d9e8d197a02" (UID: "b1985fae-8a44-4865-b0bc-7d9e8d197a02"). InnerVolumeSpecName "kube-api-access-pdfwb". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:28:06 crc kubenswrapper[4881]: I1211 08:28:06.698282 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b1985fae-8a44-4865-b0bc-7d9e8d197a02-util" (OuterVolumeSpecName: "util") pod "b1985fae-8a44-4865-b0bc-7d9e8d197a02" (UID: "b1985fae-8a44-4865-b0bc-7d9e8d197a02"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 08:28:06 crc kubenswrapper[4881]: I1211 08:28:06.702646 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e703c073-e40b-4293-9138-648e3d24c648-util" (OuterVolumeSpecName: "util") pod "e703c073-e40b-4293-9138-648e3d24c648" (UID: "e703c073-e40b-4293-9138-648e3d24c648"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 08:28:06 crc kubenswrapper[4881]: I1211 08:28:06.789216 4881 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/e703c073-e40b-4293-9138-648e3d24c648-util\") on node \"crc\" DevicePath \"\"" Dec 11 08:28:06 crc kubenswrapper[4881]: I1211 08:28:06.789256 4881 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/b1985fae-8a44-4865-b0bc-7d9e8d197a02-util\") on node \"crc\" DevicePath \"\"" Dec 11 08:28:06 crc kubenswrapper[4881]: I1211 08:28:06.789268 4881 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/e703c073-e40b-4293-9138-648e3d24c648-bundle\") on node \"crc\" DevicePath \"\"" Dec 11 08:28:06 crc kubenswrapper[4881]: I1211 08:28:06.789282 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pdfwb\" (UniqueName: \"kubernetes.io/projected/b1985fae-8a44-4865-b0bc-7d9e8d197a02-kube-api-access-pdfwb\") on node \"crc\" DevicePath \"\"" Dec 11 08:28:06 crc kubenswrapper[4881]: I1211 08:28:06.789297 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d4fh2\" (UniqueName: \"kubernetes.io/projected/e703c073-e40b-4293-9138-648e3d24c648-kube-api-access-d4fh2\") on node \"crc\" DevicePath \"\"" Dec 11 08:28:06 crc kubenswrapper[4881]: I1211 08:28:06.789308 4881 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/b1985fae-8a44-4865-b0bc-7d9e8d197a02-bundle\") on node \"crc\" DevicePath \"\"" Dec 11 08:28:07 crc kubenswrapper[4881]: I1211 08:28:07.304732 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463f2wqng" event={"ID":"b1985fae-8a44-4865-b0bc-7d9e8d197a02","Type":"ContainerDied","Data":"2e5ca11653b469a2c9295fb80db1f7b54f67906c1b78d971861639278f89bfb1"} Dec 11 08:28:07 crc kubenswrapper[4881]: I1211 08:28:07.304790 4881 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2e5ca11653b469a2c9295fb80db1f7b54f67906c1b78d971861639278f89bfb1" Dec 11 08:28:07 crc kubenswrapper[4881]: I1211 08:28:07.304761 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463f2wqng" Dec 11 08:28:07 crc kubenswrapper[4881]: I1211 08:28:07.306363 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8sbtpr" event={"ID":"e703c073-e40b-4293-9138-648e3d24c648","Type":"ContainerDied","Data":"f79a050676e63b29093d75849a4fb9d2a4fe84951bbdb5bb114b3e567ac69631"} Dec 11 08:28:07 crc kubenswrapper[4881]: I1211 08:28:07.306386 4881 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f79a050676e63b29093d75849a4fb9d2a4fe84951bbdb5bb114b3e567ac69631" Dec 11 08:28:07 crc kubenswrapper[4881]: I1211 08:28:07.306428 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8sbtpr" Dec 11 08:28:15 crc kubenswrapper[4881]: I1211 08:28:15.054846 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators-redhat/loki-operator-controller-manager-5895ddbf9f-qxpgx"] Dec 11 08:28:15 crc kubenswrapper[4881]: E1211 08:28:15.055749 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e703c073-e40b-4293-9138-648e3d24c648" containerName="pull" Dec 11 08:28:15 crc kubenswrapper[4881]: I1211 08:28:15.055861 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="e703c073-e40b-4293-9138-648e3d24c648" containerName="pull" Dec 11 08:28:15 crc kubenswrapper[4881]: E1211 08:28:15.055874 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b1985fae-8a44-4865-b0bc-7d9e8d197a02" containerName="pull" Dec 11 08:28:15 crc kubenswrapper[4881]: I1211 08:28:15.055882 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="b1985fae-8a44-4865-b0bc-7d9e8d197a02" containerName="pull" Dec 11 08:28:15 crc kubenswrapper[4881]: E1211 08:28:15.055903 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b1985fae-8a44-4865-b0bc-7d9e8d197a02" containerName="util" Dec 11 08:28:15 crc kubenswrapper[4881]: I1211 08:28:15.055910 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="b1985fae-8a44-4865-b0bc-7d9e8d197a02" containerName="util" Dec 11 08:28:15 crc kubenswrapper[4881]: E1211 08:28:15.055921 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e703c073-e40b-4293-9138-648e3d24c648" containerName="extract" Dec 11 08:28:15 crc kubenswrapper[4881]: I1211 08:28:15.055929 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="e703c073-e40b-4293-9138-648e3d24c648" containerName="extract" Dec 11 08:28:15 crc kubenswrapper[4881]: E1211 08:28:15.055942 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b1985fae-8a44-4865-b0bc-7d9e8d197a02" containerName="extract" Dec 11 08:28:15 crc kubenswrapper[4881]: I1211 08:28:15.055949 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="b1985fae-8a44-4865-b0bc-7d9e8d197a02" containerName="extract" Dec 11 08:28:15 crc kubenswrapper[4881]: E1211 08:28:15.055960 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e703c073-e40b-4293-9138-648e3d24c648" containerName="util" Dec 11 08:28:15 crc kubenswrapper[4881]: I1211 08:28:15.055967 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="e703c073-e40b-4293-9138-648e3d24c648" containerName="util" Dec 11 08:28:15 crc kubenswrapper[4881]: I1211 08:28:15.056136 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="e703c073-e40b-4293-9138-648e3d24c648" containerName="extract" Dec 11 08:28:15 crc kubenswrapper[4881]: I1211 08:28:15.056161 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="b1985fae-8a44-4865-b0bc-7d9e8d197a02" containerName="extract" Dec 11 08:28:15 crc kubenswrapper[4881]: I1211 08:28:15.056957 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators-redhat/loki-operator-controller-manager-5895ddbf9f-qxpgx" Dec 11 08:28:15 crc kubenswrapper[4881]: I1211 08:28:15.059479 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operators-redhat"/"kube-root-ca.crt" Dec 11 08:28:15 crc kubenswrapper[4881]: I1211 08:28:15.059518 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operators-redhat"/"openshift-service-ca.crt" Dec 11 08:28:15 crc kubenswrapper[4881]: I1211 08:28:15.060213 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators-redhat"/"loki-operator-controller-manager-service-cert" Dec 11 08:28:15 crc kubenswrapper[4881]: I1211 08:28:15.060270 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators-redhat"/"loki-operator-controller-manager-dockercfg-hzbmm" Dec 11 08:28:15 crc kubenswrapper[4881]: I1211 08:28:15.060374 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operators-redhat"/"loki-operator-manager-config" Dec 11 08:28:15 crc kubenswrapper[4881]: I1211 08:28:15.060830 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators-redhat"/"loki-operator-metrics" Dec 11 08:28:15 crc kubenswrapper[4881]: I1211 08:28:15.071102 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators-redhat/loki-operator-controller-manager-5895ddbf9f-qxpgx"] Dec 11 08:28:15 crc kubenswrapper[4881]: I1211 08:28:15.116022 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/71e1a3d0-ed67-45c6-8bfd-95237910c5c9-apiservice-cert\") pod \"loki-operator-controller-manager-5895ddbf9f-qxpgx\" (UID: \"71e1a3d0-ed67-45c6-8bfd-95237910c5c9\") " pod="openshift-operators-redhat/loki-operator-controller-manager-5895ddbf9f-qxpgx" Dec 11 08:28:15 crc kubenswrapper[4881]: I1211 08:28:15.116198 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"loki-operator-metrics-cert\" (UniqueName: \"kubernetes.io/secret/71e1a3d0-ed67-45c6-8bfd-95237910c5c9-loki-operator-metrics-cert\") pod \"loki-operator-controller-manager-5895ddbf9f-qxpgx\" (UID: \"71e1a3d0-ed67-45c6-8bfd-95237910c5c9\") " pod="openshift-operators-redhat/loki-operator-controller-manager-5895ddbf9f-qxpgx" Dec 11 08:28:15 crc kubenswrapper[4881]: I1211 08:28:15.116362 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"manager-config\" (UniqueName: \"kubernetes.io/configmap/71e1a3d0-ed67-45c6-8bfd-95237910c5c9-manager-config\") pod \"loki-operator-controller-manager-5895ddbf9f-qxpgx\" (UID: \"71e1a3d0-ed67-45c6-8bfd-95237910c5c9\") " pod="openshift-operators-redhat/loki-operator-controller-manager-5895ddbf9f-qxpgx" Dec 11 08:28:15 crc kubenswrapper[4881]: I1211 08:28:15.116400 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/71e1a3d0-ed67-45c6-8bfd-95237910c5c9-webhook-cert\") pod \"loki-operator-controller-manager-5895ddbf9f-qxpgx\" (UID: \"71e1a3d0-ed67-45c6-8bfd-95237910c5c9\") " pod="openshift-operators-redhat/loki-operator-controller-manager-5895ddbf9f-qxpgx" Dec 11 08:28:15 crc kubenswrapper[4881]: I1211 08:28:15.116489 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9cvss\" (UniqueName: \"kubernetes.io/projected/71e1a3d0-ed67-45c6-8bfd-95237910c5c9-kube-api-access-9cvss\") pod \"loki-operator-controller-manager-5895ddbf9f-qxpgx\" (UID: \"71e1a3d0-ed67-45c6-8bfd-95237910c5c9\") " pod="openshift-operators-redhat/loki-operator-controller-manager-5895ddbf9f-qxpgx" Dec 11 08:28:15 crc kubenswrapper[4881]: I1211 08:28:15.217142 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"manager-config\" (UniqueName: \"kubernetes.io/configmap/71e1a3d0-ed67-45c6-8bfd-95237910c5c9-manager-config\") pod \"loki-operator-controller-manager-5895ddbf9f-qxpgx\" (UID: \"71e1a3d0-ed67-45c6-8bfd-95237910c5c9\") " pod="openshift-operators-redhat/loki-operator-controller-manager-5895ddbf9f-qxpgx" Dec 11 08:28:15 crc kubenswrapper[4881]: I1211 08:28:15.217199 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/71e1a3d0-ed67-45c6-8bfd-95237910c5c9-webhook-cert\") pod \"loki-operator-controller-manager-5895ddbf9f-qxpgx\" (UID: \"71e1a3d0-ed67-45c6-8bfd-95237910c5c9\") " pod="openshift-operators-redhat/loki-operator-controller-manager-5895ddbf9f-qxpgx" Dec 11 08:28:15 crc kubenswrapper[4881]: I1211 08:28:15.217284 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9cvss\" (UniqueName: \"kubernetes.io/projected/71e1a3d0-ed67-45c6-8bfd-95237910c5c9-kube-api-access-9cvss\") pod \"loki-operator-controller-manager-5895ddbf9f-qxpgx\" (UID: \"71e1a3d0-ed67-45c6-8bfd-95237910c5c9\") " pod="openshift-operators-redhat/loki-operator-controller-manager-5895ddbf9f-qxpgx" Dec 11 08:28:15 crc kubenswrapper[4881]: I1211 08:28:15.217319 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/71e1a3d0-ed67-45c6-8bfd-95237910c5c9-apiservice-cert\") pod \"loki-operator-controller-manager-5895ddbf9f-qxpgx\" (UID: \"71e1a3d0-ed67-45c6-8bfd-95237910c5c9\") " pod="openshift-operators-redhat/loki-operator-controller-manager-5895ddbf9f-qxpgx" Dec 11 08:28:15 crc kubenswrapper[4881]: I1211 08:28:15.217374 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"loki-operator-metrics-cert\" (UniqueName: \"kubernetes.io/secret/71e1a3d0-ed67-45c6-8bfd-95237910c5c9-loki-operator-metrics-cert\") pod \"loki-operator-controller-manager-5895ddbf9f-qxpgx\" (UID: \"71e1a3d0-ed67-45c6-8bfd-95237910c5c9\") " pod="openshift-operators-redhat/loki-operator-controller-manager-5895ddbf9f-qxpgx" Dec 11 08:28:15 crc kubenswrapper[4881]: I1211 08:28:15.219299 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"manager-config\" (UniqueName: \"kubernetes.io/configmap/71e1a3d0-ed67-45c6-8bfd-95237910c5c9-manager-config\") pod \"loki-operator-controller-manager-5895ddbf9f-qxpgx\" (UID: \"71e1a3d0-ed67-45c6-8bfd-95237910c5c9\") " pod="openshift-operators-redhat/loki-operator-controller-manager-5895ddbf9f-qxpgx" Dec 11 08:28:15 crc kubenswrapper[4881]: I1211 08:28:15.222664 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/71e1a3d0-ed67-45c6-8bfd-95237910c5c9-apiservice-cert\") pod \"loki-operator-controller-manager-5895ddbf9f-qxpgx\" (UID: \"71e1a3d0-ed67-45c6-8bfd-95237910c5c9\") " pod="openshift-operators-redhat/loki-operator-controller-manager-5895ddbf9f-qxpgx" Dec 11 08:28:15 crc kubenswrapper[4881]: I1211 08:28:15.224921 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/71e1a3d0-ed67-45c6-8bfd-95237910c5c9-webhook-cert\") pod \"loki-operator-controller-manager-5895ddbf9f-qxpgx\" (UID: \"71e1a3d0-ed67-45c6-8bfd-95237910c5c9\") " pod="openshift-operators-redhat/loki-operator-controller-manager-5895ddbf9f-qxpgx" Dec 11 08:28:15 crc kubenswrapper[4881]: I1211 08:28:15.227008 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"loki-operator-metrics-cert\" (UniqueName: \"kubernetes.io/secret/71e1a3d0-ed67-45c6-8bfd-95237910c5c9-loki-operator-metrics-cert\") pod \"loki-operator-controller-manager-5895ddbf9f-qxpgx\" (UID: \"71e1a3d0-ed67-45c6-8bfd-95237910c5c9\") " pod="openshift-operators-redhat/loki-operator-controller-manager-5895ddbf9f-qxpgx" Dec 11 08:28:15 crc kubenswrapper[4881]: I1211 08:28:15.234087 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9cvss\" (UniqueName: \"kubernetes.io/projected/71e1a3d0-ed67-45c6-8bfd-95237910c5c9-kube-api-access-9cvss\") pod \"loki-operator-controller-manager-5895ddbf9f-qxpgx\" (UID: \"71e1a3d0-ed67-45c6-8bfd-95237910c5c9\") " pod="openshift-operators-redhat/loki-operator-controller-manager-5895ddbf9f-qxpgx" Dec 11 08:28:15 crc kubenswrapper[4881]: I1211 08:28:15.374379 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators-redhat/loki-operator-controller-manager-5895ddbf9f-qxpgx" Dec 11 08:28:15 crc kubenswrapper[4881]: I1211 08:28:15.822731 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators-redhat/loki-operator-controller-manager-5895ddbf9f-qxpgx"] Dec 11 08:28:16 crc kubenswrapper[4881]: I1211 08:28:16.385318 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators-redhat/loki-operator-controller-manager-5895ddbf9f-qxpgx" event={"ID":"71e1a3d0-ed67-45c6-8bfd-95237910c5c9","Type":"ContainerStarted","Data":"667547a398e018a4d513cbdb5df2fff6956443ba801ff8e2f9e5193d43646761"} Dec 11 08:28:19 crc kubenswrapper[4881]: I1211 08:28:19.411672 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-logging/cluster-logging-operator-ff9846bd-w8lh7"] Dec 11 08:28:19 crc kubenswrapper[4881]: I1211 08:28:19.413050 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/cluster-logging-operator-ff9846bd-w8lh7" Dec 11 08:28:19 crc kubenswrapper[4881]: I1211 08:28:19.414563 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"cluster-logging-operator-dockercfg-ttthp" Dec 11 08:28:19 crc kubenswrapper[4881]: I1211 08:28:19.414912 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-logging"/"openshift-service-ca.crt" Dec 11 08:28:19 crc kubenswrapper[4881]: I1211 08:28:19.417078 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-logging"/"kube-root-ca.crt" Dec 11 08:28:19 crc kubenswrapper[4881]: I1211 08:28:19.428602 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/cluster-logging-operator-ff9846bd-w8lh7"] Dec 11 08:28:19 crc kubenswrapper[4881]: I1211 08:28:19.577730 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g2l92\" (UniqueName: \"kubernetes.io/projected/e9a4de57-461a-4db3-b12e-5d9eb9fd0a60-kube-api-access-g2l92\") pod \"cluster-logging-operator-ff9846bd-w8lh7\" (UID: \"e9a4de57-461a-4db3-b12e-5d9eb9fd0a60\") " pod="openshift-logging/cluster-logging-operator-ff9846bd-w8lh7" Dec 11 08:28:19 crc kubenswrapper[4881]: I1211 08:28:19.679511 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g2l92\" (UniqueName: \"kubernetes.io/projected/e9a4de57-461a-4db3-b12e-5d9eb9fd0a60-kube-api-access-g2l92\") pod \"cluster-logging-operator-ff9846bd-w8lh7\" (UID: \"e9a4de57-461a-4db3-b12e-5d9eb9fd0a60\") " pod="openshift-logging/cluster-logging-operator-ff9846bd-w8lh7" Dec 11 08:28:19 crc kubenswrapper[4881]: I1211 08:28:19.709375 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g2l92\" (UniqueName: \"kubernetes.io/projected/e9a4de57-461a-4db3-b12e-5d9eb9fd0a60-kube-api-access-g2l92\") pod \"cluster-logging-operator-ff9846bd-w8lh7\" (UID: \"e9a4de57-461a-4db3-b12e-5d9eb9fd0a60\") " pod="openshift-logging/cluster-logging-operator-ff9846bd-w8lh7" Dec 11 08:28:19 crc kubenswrapper[4881]: I1211 08:28:19.734967 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/cluster-logging-operator-ff9846bd-w8lh7" Dec 11 08:28:21 crc kubenswrapper[4881]: I1211 08:28:21.205983 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/cluster-logging-operator-ff9846bd-w8lh7"] Dec 11 08:28:21 crc kubenswrapper[4881]: W1211 08:28:21.215678 4881 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode9a4de57_461a_4db3_b12e_5d9eb9fd0a60.slice/crio-adb7e5853c00d49f2262ed384086d839f2ce7a2631eadb409952ee2f55f74348 WatchSource:0}: Error finding container adb7e5853c00d49f2262ed384086d839f2ce7a2631eadb409952ee2f55f74348: Status 404 returned error can't find the container with id adb7e5853c00d49f2262ed384086d839f2ce7a2631eadb409952ee2f55f74348 Dec 11 08:28:21 crc kubenswrapper[4881]: I1211 08:28:21.425618 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/cluster-logging-operator-ff9846bd-w8lh7" event={"ID":"e9a4de57-461a-4db3-b12e-5d9eb9fd0a60","Type":"ContainerStarted","Data":"adb7e5853c00d49f2262ed384086d839f2ce7a2631eadb409952ee2f55f74348"} Dec 11 08:28:21 crc kubenswrapper[4881]: I1211 08:28:21.427914 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators-redhat/loki-operator-controller-manager-5895ddbf9f-qxpgx" event={"ID":"71e1a3d0-ed67-45c6-8bfd-95237910c5c9","Type":"ContainerStarted","Data":"16f5c12396cfc0d2ca670f51ead3a9533c3bbbf9380a914e30c0c379d0351b1b"} Dec 11 08:28:30 crc kubenswrapper[4881]: I1211 08:28:30.495472 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators-redhat/loki-operator-controller-manager-5895ddbf9f-qxpgx" event={"ID":"71e1a3d0-ed67-45c6-8bfd-95237910c5c9","Type":"ContainerStarted","Data":"0ce36dd54559e3e944ef58f45b07cbda642f0c6c2a4663642c1ab3c8d4c1bede"} Dec 11 08:28:30 crc kubenswrapper[4881]: I1211 08:28:30.504595 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operators-redhat/loki-operator-controller-manager-5895ddbf9f-qxpgx" Dec 11 08:28:30 crc kubenswrapper[4881]: I1211 08:28:30.504636 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/cluster-logging-operator-ff9846bd-w8lh7" event={"ID":"e9a4de57-461a-4db3-b12e-5d9eb9fd0a60","Type":"ContainerStarted","Data":"20f5d66e6f79da4527f476e3a04ffa97ba009274c152bd69472c8a67d3d35a76"} Dec 11 08:28:30 crc kubenswrapper[4881]: I1211 08:28:30.504708 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operators-redhat/loki-operator-controller-manager-5895ddbf9f-qxpgx" Dec 11 08:28:30 crc kubenswrapper[4881]: I1211 08:28:30.532926 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators-redhat/loki-operator-controller-manager-5895ddbf9f-qxpgx" podStartSLOduration=2.074570936 podStartE2EDuration="15.532906537s" podCreationTimestamp="2025-12-11 08:28:15 +0000 UTC" firstStartedPulling="2025-12-11 08:28:15.830600028 +0000 UTC m=+744.207968735" lastFinishedPulling="2025-12-11 08:28:29.288935639 +0000 UTC m=+757.666304336" observedRunningTime="2025-12-11 08:28:30.529562201 +0000 UTC m=+758.906930908" watchObservedRunningTime="2025-12-11 08:28:30.532906537 +0000 UTC m=+758.910275234" Dec 11 08:28:30 crc kubenswrapper[4881]: I1211 08:28:30.580024 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-logging/cluster-logging-operator-ff9846bd-w8lh7" podStartSLOduration=3.527270241 podStartE2EDuration="11.579999187s" podCreationTimestamp="2025-12-11 08:28:19 +0000 UTC" firstStartedPulling="2025-12-11 08:28:21.219022605 +0000 UTC m=+749.596391302" lastFinishedPulling="2025-12-11 08:28:29.271751511 +0000 UTC m=+757.649120248" observedRunningTime="2025-12-11 08:28:30.57737661 +0000 UTC m=+758.954745327" watchObservedRunningTime="2025-12-11 08:28:30.579999187 +0000 UTC m=+758.957367884" Dec 11 08:28:36 crc kubenswrapper[4881]: I1211 08:28:36.062760 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["minio-dev/minio"] Dec 11 08:28:36 crc kubenswrapper[4881]: I1211 08:28:36.064043 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="minio-dev/minio" Dec 11 08:28:36 crc kubenswrapper[4881]: I1211 08:28:36.066633 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"minio-dev"/"openshift-service-ca.crt" Dec 11 08:28:36 crc kubenswrapper[4881]: I1211 08:28:36.066696 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"minio-dev"/"kube-root-ca.crt" Dec 11 08:28:36 crc kubenswrapper[4881]: I1211 08:28:36.074884 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["minio-dev/minio"] Dec 11 08:28:36 crc kubenswrapper[4881]: I1211 08:28:36.219550 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-a36333db-9377-4b48-95c0-58167a432163\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-a36333db-9377-4b48-95c0-58167a432163\") pod \"minio\" (UID: \"6a6a1868-0ae9-4644-8b1a-95ba2ec58040\") " pod="minio-dev/minio" Dec 11 08:28:36 crc kubenswrapper[4881]: I1211 08:28:36.220020 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n56c6\" (UniqueName: \"kubernetes.io/projected/6a6a1868-0ae9-4644-8b1a-95ba2ec58040-kube-api-access-n56c6\") pod \"minio\" (UID: \"6a6a1868-0ae9-4644-8b1a-95ba2ec58040\") " pod="minio-dev/minio" Dec 11 08:28:36 crc kubenswrapper[4881]: I1211 08:28:36.320909 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-a36333db-9377-4b48-95c0-58167a432163\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-a36333db-9377-4b48-95c0-58167a432163\") pod \"minio\" (UID: \"6a6a1868-0ae9-4644-8b1a-95ba2ec58040\") " pod="minio-dev/minio" Dec 11 08:28:36 crc kubenswrapper[4881]: I1211 08:28:36.320982 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n56c6\" (UniqueName: \"kubernetes.io/projected/6a6a1868-0ae9-4644-8b1a-95ba2ec58040-kube-api-access-n56c6\") pod \"minio\" (UID: \"6a6a1868-0ae9-4644-8b1a-95ba2ec58040\") " pod="minio-dev/minio" Dec 11 08:28:36 crc kubenswrapper[4881]: I1211 08:28:36.323947 4881 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Dec 11 08:28:36 crc kubenswrapper[4881]: I1211 08:28:36.323985 4881 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-a36333db-9377-4b48-95c0-58167a432163\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-a36333db-9377-4b48-95c0-58167a432163\") pod \"minio\" (UID: \"6a6a1868-0ae9-4644-8b1a-95ba2ec58040\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/0afa9572be9b6431048eb48d828f7259ea664deb2b8fef436e2c47da3225a85c/globalmount\"" pod="minio-dev/minio" Dec 11 08:28:36 crc kubenswrapper[4881]: I1211 08:28:36.342870 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n56c6\" (UniqueName: \"kubernetes.io/projected/6a6a1868-0ae9-4644-8b1a-95ba2ec58040-kube-api-access-n56c6\") pod \"minio\" (UID: \"6a6a1868-0ae9-4644-8b1a-95ba2ec58040\") " pod="minio-dev/minio" Dec 11 08:28:36 crc kubenswrapper[4881]: I1211 08:28:36.349436 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-a36333db-9377-4b48-95c0-58167a432163\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-a36333db-9377-4b48-95c0-58167a432163\") pod \"minio\" (UID: \"6a6a1868-0ae9-4644-8b1a-95ba2ec58040\") " pod="minio-dev/minio" Dec 11 08:28:36 crc kubenswrapper[4881]: I1211 08:28:36.380298 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="minio-dev/minio" Dec 11 08:28:36 crc kubenswrapper[4881]: I1211 08:28:36.817212 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["minio-dev/minio"] Dec 11 08:28:37 crc kubenswrapper[4881]: I1211 08:28:37.541513 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="minio-dev/minio" event={"ID":"6a6a1868-0ae9-4644-8b1a-95ba2ec58040","Type":"ContainerStarted","Data":"5f69c1da04bed42aed5a75e7329fa0d76c394abc927535c78a8dd0c51e3fc7f0"} Dec 11 08:28:43 crc kubenswrapper[4881]: I1211 08:28:43.383065 4881 dynamic_cafile_content.go:123] "Loaded a new CA Bundle and Verifier" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Dec 11 08:28:45 crc kubenswrapper[4881]: I1211 08:28:45.596653 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="minio-dev/minio" event={"ID":"6a6a1868-0ae9-4644-8b1a-95ba2ec58040","Type":"ContainerStarted","Data":"29d4ff454cc6d765181993acfef50b1e7db819fea2ff815bbbaa23f9e31f3035"} Dec 11 08:28:45 crc kubenswrapper[4881]: I1211 08:28:45.622138 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="minio-dev/minio" podStartSLOduration=4.495417527 podStartE2EDuration="12.622109189s" podCreationTimestamp="2025-12-11 08:28:33 +0000 UTC" firstStartedPulling="2025-12-11 08:28:36.823961386 +0000 UTC m=+765.201330083" lastFinishedPulling="2025-12-11 08:28:44.950653048 +0000 UTC m=+773.328021745" observedRunningTime="2025-12-11 08:28:45.617354847 +0000 UTC m=+773.994723554" watchObservedRunningTime="2025-12-11 08:28:45.622109189 +0000 UTC m=+773.999477906" Dec 11 08:28:49 crc kubenswrapper[4881]: I1211 08:28:49.317920 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-logging/logging-loki-distributor-76cc67bf56-7wtq5"] Dec 11 08:28:49 crc kubenswrapper[4881]: I1211 08:28:49.319863 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-distributor-76cc67bf56-7wtq5" Dec 11 08:28:49 crc kubenswrapper[4881]: I1211 08:28:49.324917 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-logging"/"logging-loki-ca-bundle" Dec 11 08:28:49 crc kubenswrapper[4881]: I1211 08:28:49.325149 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-distributor-grpc" Dec 11 08:28:49 crc kubenswrapper[4881]: I1211 08:28:49.325320 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-distributor-http" Dec 11 08:28:49 crc kubenswrapper[4881]: I1211 08:28:49.325997 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-dockercfg-h8cv7" Dec 11 08:28:49 crc kubenswrapper[4881]: I1211 08:28:49.326239 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-logging"/"logging-loki-config" Dec 11 08:28:49 crc kubenswrapper[4881]: I1211 08:28:49.330853 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-distributor-76cc67bf56-7wtq5"] Dec 11 08:28:49 crc kubenswrapper[4881]: I1211 08:28:49.431472 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-distributor-grpc\" (UniqueName: \"kubernetes.io/secret/3d172162-6309-4035-b574-842fa40d6db6-logging-loki-distributor-grpc\") pod \"logging-loki-distributor-76cc67bf56-7wtq5\" (UID: \"3d172162-6309-4035-b574-842fa40d6db6\") " pod="openshift-logging/logging-loki-distributor-76cc67bf56-7wtq5" Dec 11 08:28:49 crc kubenswrapper[4881]: I1211 08:28:49.431775 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6mfk2\" (UniqueName: \"kubernetes.io/projected/3d172162-6309-4035-b574-842fa40d6db6-kube-api-access-6mfk2\") pod \"logging-loki-distributor-76cc67bf56-7wtq5\" (UID: \"3d172162-6309-4035-b574-842fa40d6db6\") " pod="openshift-logging/logging-loki-distributor-76cc67bf56-7wtq5" Dec 11 08:28:49 crc kubenswrapper[4881]: I1211 08:28:49.431821 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-distributor-http\" (UniqueName: \"kubernetes.io/secret/3d172162-6309-4035-b574-842fa40d6db6-logging-loki-distributor-http\") pod \"logging-loki-distributor-76cc67bf56-7wtq5\" (UID: \"3d172162-6309-4035-b574-842fa40d6db6\") " pod="openshift-logging/logging-loki-distributor-76cc67bf56-7wtq5" Dec 11 08:28:49 crc kubenswrapper[4881]: I1211 08:28:49.431844 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3d172162-6309-4035-b574-842fa40d6db6-config\") pod \"logging-loki-distributor-76cc67bf56-7wtq5\" (UID: \"3d172162-6309-4035-b574-842fa40d6db6\") " pod="openshift-logging/logging-loki-distributor-76cc67bf56-7wtq5" Dec 11 08:28:49 crc kubenswrapper[4881]: I1211 08:28:49.431908 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/3d172162-6309-4035-b574-842fa40d6db6-logging-loki-ca-bundle\") pod \"logging-loki-distributor-76cc67bf56-7wtq5\" (UID: \"3d172162-6309-4035-b574-842fa40d6db6\") " pod="openshift-logging/logging-loki-distributor-76cc67bf56-7wtq5" Dec 11 08:28:49 crc kubenswrapper[4881]: I1211 08:28:49.533145 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-distributor-http\" (UniqueName: \"kubernetes.io/secret/3d172162-6309-4035-b574-842fa40d6db6-logging-loki-distributor-http\") pod \"logging-loki-distributor-76cc67bf56-7wtq5\" (UID: \"3d172162-6309-4035-b574-842fa40d6db6\") " pod="openshift-logging/logging-loki-distributor-76cc67bf56-7wtq5" Dec 11 08:28:49 crc kubenswrapper[4881]: I1211 08:28:49.533198 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3d172162-6309-4035-b574-842fa40d6db6-config\") pod \"logging-loki-distributor-76cc67bf56-7wtq5\" (UID: \"3d172162-6309-4035-b574-842fa40d6db6\") " pod="openshift-logging/logging-loki-distributor-76cc67bf56-7wtq5" Dec 11 08:28:49 crc kubenswrapper[4881]: I1211 08:28:49.534202 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3d172162-6309-4035-b574-842fa40d6db6-config\") pod \"logging-loki-distributor-76cc67bf56-7wtq5\" (UID: \"3d172162-6309-4035-b574-842fa40d6db6\") " pod="openshift-logging/logging-loki-distributor-76cc67bf56-7wtq5" Dec 11 08:28:49 crc kubenswrapper[4881]: I1211 08:28:49.534245 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/3d172162-6309-4035-b574-842fa40d6db6-logging-loki-ca-bundle\") pod \"logging-loki-distributor-76cc67bf56-7wtq5\" (UID: \"3d172162-6309-4035-b574-842fa40d6db6\") " pod="openshift-logging/logging-loki-distributor-76cc67bf56-7wtq5" Dec 11 08:28:49 crc kubenswrapper[4881]: I1211 08:28:49.534308 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-distributor-grpc\" (UniqueName: \"kubernetes.io/secret/3d172162-6309-4035-b574-842fa40d6db6-logging-loki-distributor-grpc\") pod \"logging-loki-distributor-76cc67bf56-7wtq5\" (UID: \"3d172162-6309-4035-b574-842fa40d6db6\") " pod="openshift-logging/logging-loki-distributor-76cc67bf56-7wtq5" Dec 11 08:28:49 crc kubenswrapper[4881]: I1211 08:28:49.534306 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/3d172162-6309-4035-b574-842fa40d6db6-logging-loki-ca-bundle\") pod \"logging-loki-distributor-76cc67bf56-7wtq5\" (UID: \"3d172162-6309-4035-b574-842fa40d6db6\") " pod="openshift-logging/logging-loki-distributor-76cc67bf56-7wtq5" Dec 11 08:28:49 crc kubenswrapper[4881]: I1211 08:28:49.534328 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6mfk2\" (UniqueName: \"kubernetes.io/projected/3d172162-6309-4035-b574-842fa40d6db6-kube-api-access-6mfk2\") pod \"logging-loki-distributor-76cc67bf56-7wtq5\" (UID: \"3d172162-6309-4035-b574-842fa40d6db6\") " pod="openshift-logging/logging-loki-distributor-76cc67bf56-7wtq5" Dec 11 08:28:49 crc kubenswrapper[4881]: I1211 08:28:49.540110 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-distributor-grpc\" (UniqueName: \"kubernetes.io/secret/3d172162-6309-4035-b574-842fa40d6db6-logging-loki-distributor-grpc\") pod \"logging-loki-distributor-76cc67bf56-7wtq5\" (UID: \"3d172162-6309-4035-b574-842fa40d6db6\") " pod="openshift-logging/logging-loki-distributor-76cc67bf56-7wtq5" Dec 11 08:28:49 crc kubenswrapper[4881]: I1211 08:28:49.543606 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-distributor-http\" (UniqueName: \"kubernetes.io/secret/3d172162-6309-4035-b574-842fa40d6db6-logging-loki-distributor-http\") pod \"logging-loki-distributor-76cc67bf56-7wtq5\" (UID: \"3d172162-6309-4035-b574-842fa40d6db6\") " pod="openshift-logging/logging-loki-distributor-76cc67bf56-7wtq5" Dec 11 08:28:49 crc kubenswrapper[4881]: I1211 08:28:49.574506 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6mfk2\" (UniqueName: \"kubernetes.io/projected/3d172162-6309-4035-b574-842fa40d6db6-kube-api-access-6mfk2\") pod \"logging-loki-distributor-76cc67bf56-7wtq5\" (UID: \"3d172162-6309-4035-b574-842fa40d6db6\") " pod="openshift-logging/logging-loki-distributor-76cc67bf56-7wtq5" Dec 11 08:28:49 crc kubenswrapper[4881]: I1211 08:28:49.599193 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-logging/logging-loki-querier-5895d59bb8-87hfb"] Dec 11 08:28:49 crc kubenswrapper[4881]: I1211 08:28:49.599974 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-querier-5895d59bb8-87hfb" Dec 11 08:28:49 crc kubenswrapper[4881]: I1211 08:28:49.615205 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-querier-http" Dec 11 08:28:49 crc kubenswrapper[4881]: I1211 08:28:49.615494 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-querier-grpc" Dec 11 08:28:49 crc kubenswrapper[4881]: I1211 08:28:49.660181 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-s3" Dec 11 08:28:49 crc kubenswrapper[4881]: I1211 08:28:49.722098 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-querier-5895d59bb8-87hfb"] Dec 11 08:28:49 crc kubenswrapper[4881]: I1211 08:28:49.746916 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-distributor-76cc67bf56-7wtq5" Dec 11 08:28:49 crc kubenswrapper[4881]: I1211 08:28:49.747208 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-querier-grpc\" (UniqueName: \"kubernetes.io/secret/b4ff84df-3a3b-4346-84a2-56f79c1aac44-logging-loki-querier-grpc\") pod \"logging-loki-querier-5895d59bb8-87hfb\" (UID: \"b4ff84df-3a3b-4346-84a2-56f79c1aac44\") " pod="openshift-logging/logging-loki-querier-5895d59bb8-87hfb" Dec 11 08:28:49 crc kubenswrapper[4881]: I1211 08:28:49.747287 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b4ff84df-3a3b-4346-84a2-56f79c1aac44-config\") pod \"logging-loki-querier-5895d59bb8-87hfb\" (UID: \"b4ff84df-3a3b-4346-84a2-56f79c1aac44\") " pod="openshift-logging/logging-loki-querier-5895d59bb8-87hfb" Dec 11 08:28:49 crc kubenswrapper[4881]: I1211 08:28:49.747321 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/b4ff84df-3a3b-4346-84a2-56f79c1aac44-logging-loki-ca-bundle\") pod \"logging-loki-querier-5895d59bb8-87hfb\" (UID: \"b4ff84df-3a3b-4346-84a2-56f79c1aac44\") " pod="openshift-logging/logging-loki-querier-5895d59bb8-87hfb" Dec 11 08:28:49 crc kubenswrapper[4881]: I1211 08:28:49.747368 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fzgw6\" (UniqueName: \"kubernetes.io/projected/b4ff84df-3a3b-4346-84a2-56f79c1aac44-kube-api-access-fzgw6\") pod \"logging-loki-querier-5895d59bb8-87hfb\" (UID: \"b4ff84df-3a3b-4346-84a2-56f79c1aac44\") " pod="openshift-logging/logging-loki-querier-5895d59bb8-87hfb" Dec 11 08:28:49 crc kubenswrapper[4881]: I1211 08:28:49.747387 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-querier-http\" (UniqueName: \"kubernetes.io/secret/b4ff84df-3a3b-4346-84a2-56f79c1aac44-logging-loki-querier-http\") pod \"logging-loki-querier-5895d59bb8-87hfb\" (UID: \"b4ff84df-3a3b-4346-84a2-56f79c1aac44\") " pod="openshift-logging/logging-loki-querier-5895d59bb8-87hfb" Dec 11 08:28:49 crc kubenswrapper[4881]: I1211 08:28:49.747425 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-s3\" (UniqueName: \"kubernetes.io/secret/b4ff84df-3a3b-4346-84a2-56f79c1aac44-logging-loki-s3\") pod \"logging-loki-querier-5895d59bb8-87hfb\" (UID: \"b4ff84df-3a3b-4346-84a2-56f79c1aac44\") " pod="openshift-logging/logging-loki-querier-5895d59bb8-87hfb" Dec 11 08:28:49 crc kubenswrapper[4881]: I1211 08:28:49.849541 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b4ff84df-3a3b-4346-84a2-56f79c1aac44-config\") pod \"logging-loki-querier-5895d59bb8-87hfb\" (UID: \"b4ff84df-3a3b-4346-84a2-56f79c1aac44\") " pod="openshift-logging/logging-loki-querier-5895d59bb8-87hfb" Dec 11 08:28:49 crc kubenswrapper[4881]: I1211 08:28:49.849608 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/b4ff84df-3a3b-4346-84a2-56f79c1aac44-logging-loki-ca-bundle\") pod \"logging-loki-querier-5895d59bb8-87hfb\" (UID: \"b4ff84df-3a3b-4346-84a2-56f79c1aac44\") " pod="openshift-logging/logging-loki-querier-5895d59bb8-87hfb" Dec 11 08:28:49 crc kubenswrapper[4881]: I1211 08:28:49.849640 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fzgw6\" (UniqueName: \"kubernetes.io/projected/b4ff84df-3a3b-4346-84a2-56f79c1aac44-kube-api-access-fzgw6\") pod \"logging-loki-querier-5895d59bb8-87hfb\" (UID: \"b4ff84df-3a3b-4346-84a2-56f79c1aac44\") " pod="openshift-logging/logging-loki-querier-5895d59bb8-87hfb" Dec 11 08:28:49 crc kubenswrapper[4881]: I1211 08:28:49.849664 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-querier-http\" (UniqueName: \"kubernetes.io/secret/b4ff84df-3a3b-4346-84a2-56f79c1aac44-logging-loki-querier-http\") pod \"logging-loki-querier-5895d59bb8-87hfb\" (UID: \"b4ff84df-3a3b-4346-84a2-56f79c1aac44\") " pod="openshift-logging/logging-loki-querier-5895d59bb8-87hfb" Dec 11 08:28:49 crc kubenswrapper[4881]: I1211 08:28:49.849703 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-s3\" (UniqueName: \"kubernetes.io/secret/b4ff84df-3a3b-4346-84a2-56f79c1aac44-logging-loki-s3\") pod \"logging-loki-querier-5895d59bb8-87hfb\" (UID: \"b4ff84df-3a3b-4346-84a2-56f79c1aac44\") " pod="openshift-logging/logging-loki-querier-5895d59bb8-87hfb" Dec 11 08:28:49 crc kubenswrapper[4881]: I1211 08:28:49.849731 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-querier-grpc\" (UniqueName: \"kubernetes.io/secret/b4ff84df-3a3b-4346-84a2-56f79c1aac44-logging-loki-querier-grpc\") pod \"logging-loki-querier-5895d59bb8-87hfb\" (UID: \"b4ff84df-3a3b-4346-84a2-56f79c1aac44\") " pod="openshift-logging/logging-loki-querier-5895d59bb8-87hfb" Dec 11 08:28:49 crc kubenswrapper[4881]: I1211 08:28:49.853985 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-querier-grpc\" (UniqueName: \"kubernetes.io/secret/b4ff84df-3a3b-4346-84a2-56f79c1aac44-logging-loki-querier-grpc\") pod \"logging-loki-querier-5895d59bb8-87hfb\" (UID: \"b4ff84df-3a3b-4346-84a2-56f79c1aac44\") " pod="openshift-logging/logging-loki-querier-5895d59bb8-87hfb" Dec 11 08:28:49 crc kubenswrapper[4881]: I1211 08:28:49.854824 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b4ff84df-3a3b-4346-84a2-56f79c1aac44-config\") pod \"logging-loki-querier-5895d59bb8-87hfb\" (UID: \"b4ff84df-3a3b-4346-84a2-56f79c1aac44\") " pod="openshift-logging/logging-loki-querier-5895d59bb8-87hfb" Dec 11 08:28:49 crc kubenswrapper[4881]: I1211 08:28:49.855547 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/b4ff84df-3a3b-4346-84a2-56f79c1aac44-logging-loki-ca-bundle\") pod \"logging-loki-querier-5895d59bb8-87hfb\" (UID: \"b4ff84df-3a3b-4346-84a2-56f79c1aac44\") " pod="openshift-logging/logging-loki-querier-5895d59bb8-87hfb" Dec 11 08:28:49 crc kubenswrapper[4881]: I1211 08:28:49.879877 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-s3\" (UniqueName: \"kubernetes.io/secret/b4ff84df-3a3b-4346-84a2-56f79c1aac44-logging-loki-s3\") pod \"logging-loki-querier-5895d59bb8-87hfb\" (UID: \"b4ff84df-3a3b-4346-84a2-56f79c1aac44\") " pod="openshift-logging/logging-loki-querier-5895d59bb8-87hfb" Dec 11 08:28:49 crc kubenswrapper[4881]: I1211 08:28:49.880391 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-querier-http\" (UniqueName: \"kubernetes.io/secret/b4ff84df-3a3b-4346-84a2-56f79c1aac44-logging-loki-querier-http\") pod \"logging-loki-querier-5895d59bb8-87hfb\" (UID: \"b4ff84df-3a3b-4346-84a2-56f79c1aac44\") " pod="openshift-logging/logging-loki-querier-5895d59bb8-87hfb" Dec 11 08:28:49 crc kubenswrapper[4881]: I1211 08:28:49.911535 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fzgw6\" (UniqueName: \"kubernetes.io/projected/b4ff84df-3a3b-4346-84a2-56f79c1aac44-kube-api-access-fzgw6\") pod \"logging-loki-querier-5895d59bb8-87hfb\" (UID: \"b4ff84df-3a3b-4346-84a2-56f79c1aac44\") " pod="openshift-logging/logging-loki-querier-5895d59bb8-87hfb" Dec 11 08:28:49 crc kubenswrapper[4881]: I1211 08:28:49.940761 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-querier-5895d59bb8-87hfb" Dec 11 08:28:50 crc kubenswrapper[4881]: I1211 08:28:50.008404 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-logging/logging-loki-query-frontend-84558f7c9f-sswft"] Dec 11 08:28:50 crc kubenswrapper[4881]: I1211 08:28:50.009595 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-sswft" Dec 11 08:28:50 crc kubenswrapper[4881]: I1211 08:28:50.016276 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-query-frontend-http" Dec 11 08:28:50 crc kubenswrapper[4881]: I1211 08:28:50.016434 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-query-frontend-grpc" Dec 11 08:28:50 crc kubenswrapper[4881]: I1211 08:28:50.046839 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-query-frontend-84558f7c9f-sswft"] Dec 11 08:28:50 crc kubenswrapper[4881]: I1211 08:28:50.146551 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-logging/logging-loki-gateway-5f65744c89-tvqmf"] Dec 11 08:28:50 crc kubenswrapper[4881]: I1211 08:28:50.147924 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-gateway-5f65744c89-tvqmf" Dec 11 08:28:50 crc kubenswrapper[4881]: I1211 08:28:50.157546 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-gateway" Dec 11 08:28:50 crc kubenswrapper[4881]: I1211 08:28:50.157945 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-logging"/"logging-loki-gateway-ca-bundle" Dec 11 08:28:50 crc kubenswrapper[4881]: I1211 08:28:50.158470 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-gateway-http" Dec 11 08:28:50 crc kubenswrapper[4881]: I1211 08:28:50.158727 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-gateway-client-http" Dec 11 08:28:50 crc kubenswrapper[4881]: I1211 08:28:50.160057 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-logging"/"logging-loki-gateway" Dec 11 08:28:50 crc kubenswrapper[4881]: I1211 08:28:50.167308 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-query-frontend-grpc\" (UniqueName: \"kubernetes.io/secret/b4e3cba0-44c9-46ee-97f1-fb3b34b37cb3-logging-loki-query-frontend-grpc\") pod \"logging-loki-query-frontend-84558f7c9f-sswft\" (UID: \"b4e3cba0-44c9-46ee-97f1-fb3b34b37cb3\") " pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-sswft" Dec 11 08:28:50 crc kubenswrapper[4881]: I1211 08:28:50.167476 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b4e3cba0-44c9-46ee-97f1-fb3b34b37cb3-config\") pod \"logging-loki-query-frontend-84558f7c9f-sswft\" (UID: \"b4e3cba0-44c9-46ee-97f1-fb3b34b37cb3\") " pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-sswft" Dec 11 08:28:50 crc kubenswrapper[4881]: I1211 08:28:50.167563 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-query-frontend-http\" (UniqueName: \"kubernetes.io/secret/b4e3cba0-44c9-46ee-97f1-fb3b34b37cb3-logging-loki-query-frontend-http\") pod \"logging-loki-query-frontend-84558f7c9f-sswft\" (UID: \"b4e3cba0-44c9-46ee-97f1-fb3b34b37cb3\") " pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-sswft" Dec 11 08:28:50 crc kubenswrapper[4881]: I1211 08:28:50.167605 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7mvsd\" (UniqueName: \"kubernetes.io/projected/b4e3cba0-44c9-46ee-97f1-fb3b34b37cb3-kube-api-access-7mvsd\") pod \"logging-loki-query-frontend-84558f7c9f-sswft\" (UID: \"b4e3cba0-44c9-46ee-97f1-fb3b34b37cb3\") " pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-sswft" Dec 11 08:28:50 crc kubenswrapper[4881]: I1211 08:28:50.167646 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/b4e3cba0-44c9-46ee-97f1-fb3b34b37cb3-logging-loki-ca-bundle\") pod \"logging-loki-query-frontend-84558f7c9f-sswft\" (UID: \"b4e3cba0-44c9-46ee-97f1-fb3b34b37cb3\") " pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-sswft" Dec 11 08:28:50 crc kubenswrapper[4881]: I1211 08:28:50.240934 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-logging/logging-loki-gateway-5f65744c89-srnq9"] Dec 11 08:28:50 crc kubenswrapper[4881]: I1211 08:28:50.267112 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-gateway-5f65744c89-srnq9" Dec 11 08:28:50 crc kubenswrapper[4881]: I1211 08:28:50.269075 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lokistack-gateway\" (UniqueName: \"kubernetes.io/configmap/166ecd73-e9b9-4aa0-b09c-7ad373aea239-lokistack-gateway\") pod \"logging-loki-gateway-5f65744c89-tvqmf\" (UID: \"166ecd73-e9b9-4aa0-b09c-7ad373aea239\") " pod="openshift-logging/logging-loki-gateway-5f65744c89-tvqmf" Dec 11 08:28:50 crc kubenswrapper[4881]: I1211 08:28:50.269119 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/b4e3cba0-44c9-46ee-97f1-fb3b34b37cb3-logging-loki-ca-bundle\") pod \"logging-loki-query-frontend-84558f7c9f-sswft\" (UID: \"b4e3cba0-44c9-46ee-97f1-fb3b34b37cb3\") " pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-sswft" Dec 11 08:28:50 crc kubenswrapper[4881]: I1211 08:28:50.269137 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-gateway-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/166ecd73-e9b9-4aa0-b09c-7ad373aea239-logging-loki-gateway-ca-bundle\") pod \"logging-loki-gateway-5f65744c89-tvqmf\" (UID: \"166ecd73-e9b9-4aa0-b09c-7ad373aea239\") " pod="openshift-logging/logging-loki-gateway-5f65744c89-tvqmf" Dec 11 08:28:50 crc kubenswrapper[4881]: I1211 08:28:50.269163 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-gateway-client-http\" (UniqueName: \"kubernetes.io/secret/166ecd73-e9b9-4aa0-b09c-7ad373aea239-logging-loki-gateway-client-http\") pod \"logging-loki-gateway-5f65744c89-tvqmf\" (UID: \"166ecd73-e9b9-4aa0-b09c-7ad373aea239\") " pod="openshift-logging/logging-loki-gateway-5f65744c89-tvqmf" Dec 11 08:28:50 crc kubenswrapper[4881]: I1211 08:28:50.269310 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tenants\" (UniqueName: \"kubernetes.io/secret/166ecd73-e9b9-4aa0-b09c-7ad373aea239-tenants\") pod \"logging-loki-gateway-5f65744c89-tvqmf\" (UID: \"166ecd73-e9b9-4aa0-b09c-7ad373aea239\") " pod="openshift-logging/logging-loki-gateway-5f65744c89-tvqmf" Dec 11 08:28:50 crc kubenswrapper[4881]: I1211 08:28:50.269440 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-query-frontend-grpc\" (UniqueName: \"kubernetes.io/secret/b4e3cba0-44c9-46ee-97f1-fb3b34b37cb3-logging-loki-query-frontend-grpc\") pod \"logging-loki-query-frontend-84558f7c9f-sswft\" (UID: \"b4e3cba0-44c9-46ee-97f1-fb3b34b37cb3\") " pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-sswft" Dec 11 08:28:50 crc kubenswrapper[4881]: I1211 08:28:50.269493 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/166ecd73-e9b9-4aa0-b09c-7ad373aea239-logging-loki-ca-bundle\") pod \"logging-loki-gateway-5f65744c89-tvqmf\" (UID: \"166ecd73-e9b9-4aa0-b09c-7ad373aea239\") " pod="openshift-logging/logging-loki-gateway-5f65744c89-tvqmf" Dec 11 08:28:50 crc kubenswrapper[4881]: I1211 08:28:50.269606 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b4e3cba0-44c9-46ee-97f1-fb3b34b37cb3-config\") pod \"logging-loki-query-frontend-84558f7c9f-sswft\" (UID: \"b4e3cba0-44c9-46ee-97f1-fb3b34b37cb3\") " pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-sswft" Dec 11 08:28:50 crc kubenswrapper[4881]: I1211 08:28:50.269646 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qrmwj\" (UniqueName: \"kubernetes.io/projected/166ecd73-e9b9-4aa0-b09c-7ad373aea239-kube-api-access-qrmwj\") pod \"logging-loki-gateway-5f65744c89-tvqmf\" (UID: \"166ecd73-e9b9-4aa0-b09c-7ad373aea239\") " pod="openshift-logging/logging-loki-gateway-5f65744c89-tvqmf" Dec 11 08:28:50 crc kubenswrapper[4881]: I1211 08:28:50.269667 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-secret\" (UniqueName: \"kubernetes.io/secret/166ecd73-e9b9-4aa0-b09c-7ad373aea239-tls-secret\") pod \"logging-loki-gateway-5f65744c89-tvqmf\" (UID: \"166ecd73-e9b9-4aa0-b09c-7ad373aea239\") " pod="openshift-logging/logging-loki-gateway-5f65744c89-tvqmf" Dec 11 08:28:50 crc kubenswrapper[4881]: I1211 08:28:50.269689 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rbac\" (UniqueName: \"kubernetes.io/configmap/166ecd73-e9b9-4aa0-b09c-7ad373aea239-rbac\") pod \"logging-loki-gateway-5f65744c89-tvqmf\" (UID: \"166ecd73-e9b9-4aa0-b09c-7ad373aea239\") " pod="openshift-logging/logging-loki-gateway-5f65744c89-tvqmf" Dec 11 08:28:50 crc kubenswrapper[4881]: I1211 08:28:50.269737 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-query-frontend-http\" (UniqueName: \"kubernetes.io/secret/b4e3cba0-44c9-46ee-97f1-fb3b34b37cb3-logging-loki-query-frontend-http\") pod \"logging-loki-query-frontend-84558f7c9f-sswft\" (UID: \"b4e3cba0-44c9-46ee-97f1-fb3b34b37cb3\") " pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-sswft" Dec 11 08:28:50 crc kubenswrapper[4881]: I1211 08:28:50.269780 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7mvsd\" (UniqueName: \"kubernetes.io/projected/b4e3cba0-44c9-46ee-97f1-fb3b34b37cb3-kube-api-access-7mvsd\") pod \"logging-loki-query-frontend-84558f7c9f-sswft\" (UID: \"b4e3cba0-44c9-46ee-97f1-fb3b34b37cb3\") " pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-sswft" Dec 11 08:28:50 crc kubenswrapper[4881]: I1211 08:28:50.270257 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/b4e3cba0-44c9-46ee-97f1-fb3b34b37cb3-logging-loki-ca-bundle\") pod \"logging-loki-query-frontend-84558f7c9f-sswft\" (UID: \"b4e3cba0-44c9-46ee-97f1-fb3b34b37cb3\") " pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-sswft" Dec 11 08:28:50 crc kubenswrapper[4881]: I1211 08:28:50.271115 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-gateway-dockercfg-crqbf" Dec 11 08:28:50 crc kubenswrapper[4881]: I1211 08:28:50.271823 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b4e3cba0-44c9-46ee-97f1-fb3b34b37cb3-config\") pod \"logging-loki-query-frontend-84558f7c9f-sswft\" (UID: \"b4e3cba0-44c9-46ee-97f1-fb3b34b37cb3\") " pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-sswft" Dec 11 08:28:50 crc kubenswrapper[4881]: I1211 08:28:50.278557 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-gateway-5f65744c89-tvqmf"] Dec 11 08:28:50 crc kubenswrapper[4881]: I1211 08:28:50.280648 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-query-frontend-grpc\" (UniqueName: \"kubernetes.io/secret/b4e3cba0-44c9-46ee-97f1-fb3b34b37cb3-logging-loki-query-frontend-grpc\") pod \"logging-loki-query-frontend-84558f7c9f-sswft\" (UID: \"b4e3cba0-44c9-46ee-97f1-fb3b34b37cb3\") " pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-sswft" Dec 11 08:28:50 crc kubenswrapper[4881]: I1211 08:28:50.280747 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-query-frontend-http\" (UniqueName: \"kubernetes.io/secret/b4e3cba0-44c9-46ee-97f1-fb3b34b37cb3-logging-loki-query-frontend-http\") pod \"logging-loki-query-frontend-84558f7c9f-sswft\" (UID: \"b4e3cba0-44c9-46ee-97f1-fb3b34b37cb3\") " pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-sswft" Dec 11 08:28:50 crc kubenswrapper[4881]: I1211 08:28:50.283068 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-gateway-5f65744c89-srnq9"] Dec 11 08:28:50 crc kubenswrapper[4881]: I1211 08:28:50.288823 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7mvsd\" (UniqueName: \"kubernetes.io/projected/b4e3cba0-44c9-46ee-97f1-fb3b34b37cb3-kube-api-access-7mvsd\") pod \"logging-loki-query-frontend-84558f7c9f-sswft\" (UID: \"b4e3cba0-44c9-46ee-97f1-fb3b34b37cb3\") " pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-sswft" Dec 11 08:28:50 crc kubenswrapper[4881]: I1211 08:28:50.371703 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/166ecd73-e9b9-4aa0-b09c-7ad373aea239-logging-loki-ca-bundle\") pod \"logging-loki-gateway-5f65744c89-tvqmf\" (UID: \"166ecd73-e9b9-4aa0-b09c-7ad373aea239\") " pod="openshift-logging/logging-loki-gateway-5f65744c89-tvqmf" Dec 11 08:28:50 crc kubenswrapper[4881]: I1211 08:28:50.371792 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/3470b561-e428-417b-bd76-92642ba561d8-logging-loki-ca-bundle\") pod \"logging-loki-gateway-5f65744c89-srnq9\" (UID: \"3470b561-e428-417b-bd76-92642ba561d8\") " pod="openshift-logging/logging-loki-gateway-5f65744c89-srnq9" Dec 11 08:28:50 crc kubenswrapper[4881]: I1211 08:28:50.371829 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-gateway-client-http\" (UniqueName: \"kubernetes.io/secret/3470b561-e428-417b-bd76-92642ba561d8-logging-loki-gateway-client-http\") pod \"logging-loki-gateway-5f65744c89-srnq9\" (UID: \"3470b561-e428-417b-bd76-92642ba561d8\") " pod="openshift-logging/logging-loki-gateway-5f65744c89-srnq9" Dec 11 08:28:50 crc kubenswrapper[4881]: I1211 08:28:50.371852 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qrmwj\" (UniqueName: \"kubernetes.io/projected/166ecd73-e9b9-4aa0-b09c-7ad373aea239-kube-api-access-qrmwj\") pod \"logging-loki-gateway-5f65744c89-tvqmf\" (UID: \"166ecd73-e9b9-4aa0-b09c-7ad373aea239\") " pod="openshift-logging/logging-loki-gateway-5f65744c89-tvqmf" Dec 11 08:28:50 crc kubenswrapper[4881]: I1211 08:28:50.371875 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-secret\" (UniqueName: \"kubernetes.io/secret/166ecd73-e9b9-4aa0-b09c-7ad373aea239-tls-secret\") pod \"logging-loki-gateway-5f65744c89-tvqmf\" (UID: \"166ecd73-e9b9-4aa0-b09c-7ad373aea239\") " pod="openshift-logging/logging-loki-gateway-5f65744c89-tvqmf" Dec 11 08:28:50 crc kubenswrapper[4881]: I1211 08:28:50.371896 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rbac\" (UniqueName: \"kubernetes.io/configmap/166ecd73-e9b9-4aa0-b09c-7ad373aea239-rbac\") pod \"logging-loki-gateway-5f65744c89-tvqmf\" (UID: \"166ecd73-e9b9-4aa0-b09c-7ad373aea239\") " pod="openshift-logging/logging-loki-gateway-5f65744c89-tvqmf" Dec 11 08:28:50 crc kubenswrapper[4881]: I1211 08:28:50.371918 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-94klc\" (UniqueName: \"kubernetes.io/projected/3470b561-e428-417b-bd76-92642ba561d8-kube-api-access-94klc\") pod \"logging-loki-gateway-5f65744c89-srnq9\" (UID: \"3470b561-e428-417b-bd76-92642ba561d8\") " pod="openshift-logging/logging-loki-gateway-5f65744c89-srnq9" Dec 11 08:28:50 crc kubenswrapper[4881]: I1211 08:28:50.371951 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lokistack-gateway\" (UniqueName: \"kubernetes.io/configmap/3470b561-e428-417b-bd76-92642ba561d8-lokistack-gateway\") pod \"logging-loki-gateway-5f65744c89-srnq9\" (UID: \"3470b561-e428-417b-bd76-92642ba561d8\") " pod="openshift-logging/logging-loki-gateway-5f65744c89-srnq9" Dec 11 08:28:50 crc kubenswrapper[4881]: I1211 08:28:50.371981 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rbac\" (UniqueName: \"kubernetes.io/configmap/3470b561-e428-417b-bd76-92642ba561d8-rbac\") pod \"logging-loki-gateway-5f65744c89-srnq9\" (UID: \"3470b561-e428-417b-bd76-92642ba561d8\") " pod="openshift-logging/logging-loki-gateway-5f65744c89-srnq9" Dec 11 08:28:50 crc kubenswrapper[4881]: I1211 08:28:50.372002 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lokistack-gateway\" (UniqueName: \"kubernetes.io/configmap/166ecd73-e9b9-4aa0-b09c-7ad373aea239-lokistack-gateway\") pod \"logging-loki-gateway-5f65744c89-tvqmf\" (UID: \"166ecd73-e9b9-4aa0-b09c-7ad373aea239\") " pod="openshift-logging/logging-loki-gateway-5f65744c89-tvqmf" Dec 11 08:28:50 crc kubenswrapper[4881]: I1211 08:28:50.372026 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-gateway-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/166ecd73-e9b9-4aa0-b09c-7ad373aea239-logging-loki-gateway-ca-bundle\") pod \"logging-loki-gateway-5f65744c89-tvqmf\" (UID: \"166ecd73-e9b9-4aa0-b09c-7ad373aea239\") " pod="openshift-logging/logging-loki-gateway-5f65744c89-tvqmf" Dec 11 08:28:50 crc kubenswrapper[4881]: I1211 08:28:50.372052 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-gateway-client-http\" (UniqueName: \"kubernetes.io/secret/166ecd73-e9b9-4aa0-b09c-7ad373aea239-logging-loki-gateway-client-http\") pod \"logging-loki-gateway-5f65744c89-tvqmf\" (UID: \"166ecd73-e9b9-4aa0-b09c-7ad373aea239\") " pod="openshift-logging/logging-loki-gateway-5f65744c89-tvqmf" Dec 11 08:28:50 crc kubenswrapper[4881]: I1211 08:28:50.372072 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-gateway-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/3470b561-e428-417b-bd76-92642ba561d8-logging-loki-gateway-ca-bundle\") pod \"logging-loki-gateway-5f65744c89-srnq9\" (UID: \"3470b561-e428-417b-bd76-92642ba561d8\") " pod="openshift-logging/logging-loki-gateway-5f65744c89-srnq9" Dec 11 08:28:50 crc kubenswrapper[4881]: I1211 08:28:50.372113 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-secret\" (UniqueName: \"kubernetes.io/secret/3470b561-e428-417b-bd76-92642ba561d8-tls-secret\") pod \"logging-loki-gateway-5f65744c89-srnq9\" (UID: \"3470b561-e428-417b-bd76-92642ba561d8\") " pod="openshift-logging/logging-loki-gateway-5f65744c89-srnq9" Dec 11 08:28:50 crc kubenswrapper[4881]: I1211 08:28:50.372135 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tenants\" (UniqueName: \"kubernetes.io/secret/166ecd73-e9b9-4aa0-b09c-7ad373aea239-tenants\") pod \"logging-loki-gateway-5f65744c89-tvqmf\" (UID: \"166ecd73-e9b9-4aa0-b09c-7ad373aea239\") " pod="openshift-logging/logging-loki-gateway-5f65744c89-tvqmf" Dec 11 08:28:50 crc kubenswrapper[4881]: I1211 08:28:50.372157 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tenants\" (UniqueName: \"kubernetes.io/secret/3470b561-e428-417b-bd76-92642ba561d8-tenants\") pod \"logging-loki-gateway-5f65744c89-srnq9\" (UID: \"3470b561-e428-417b-bd76-92642ba561d8\") " pod="openshift-logging/logging-loki-gateway-5f65744c89-srnq9" Dec 11 08:28:50 crc kubenswrapper[4881]: I1211 08:28:50.372882 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-sswft" Dec 11 08:28:50 crc kubenswrapper[4881]: I1211 08:28:50.373297 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/166ecd73-e9b9-4aa0-b09c-7ad373aea239-logging-loki-ca-bundle\") pod \"logging-loki-gateway-5f65744c89-tvqmf\" (UID: \"166ecd73-e9b9-4aa0-b09c-7ad373aea239\") " pod="openshift-logging/logging-loki-gateway-5f65744c89-tvqmf" Dec 11 08:28:50 crc kubenswrapper[4881]: I1211 08:28:50.375328 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-gateway-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/166ecd73-e9b9-4aa0-b09c-7ad373aea239-logging-loki-gateway-ca-bundle\") pod \"logging-loki-gateway-5f65744c89-tvqmf\" (UID: \"166ecd73-e9b9-4aa0-b09c-7ad373aea239\") " pod="openshift-logging/logging-loki-gateway-5f65744c89-tvqmf" Dec 11 08:28:50 crc kubenswrapper[4881]: I1211 08:28:50.376112 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lokistack-gateway\" (UniqueName: \"kubernetes.io/configmap/166ecd73-e9b9-4aa0-b09c-7ad373aea239-lokistack-gateway\") pod \"logging-loki-gateway-5f65744c89-tvqmf\" (UID: \"166ecd73-e9b9-4aa0-b09c-7ad373aea239\") " pod="openshift-logging/logging-loki-gateway-5f65744c89-tvqmf" Dec 11 08:28:50 crc kubenswrapper[4881]: I1211 08:28:50.376950 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rbac\" (UniqueName: \"kubernetes.io/configmap/166ecd73-e9b9-4aa0-b09c-7ad373aea239-rbac\") pod \"logging-loki-gateway-5f65744c89-tvqmf\" (UID: \"166ecd73-e9b9-4aa0-b09c-7ad373aea239\") " pod="openshift-logging/logging-loki-gateway-5f65744c89-tvqmf" Dec 11 08:28:50 crc kubenswrapper[4881]: I1211 08:28:50.381173 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-gateway-client-http\" (UniqueName: \"kubernetes.io/secret/166ecd73-e9b9-4aa0-b09c-7ad373aea239-logging-loki-gateway-client-http\") pod \"logging-loki-gateway-5f65744c89-tvqmf\" (UID: \"166ecd73-e9b9-4aa0-b09c-7ad373aea239\") " pod="openshift-logging/logging-loki-gateway-5f65744c89-tvqmf" Dec 11 08:28:50 crc kubenswrapper[4881]: I1211 08:28:50.383936 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-secret\" (UniqueName: \"kubernetes.io/secret/166ecd73-e9b9-4aa0-b09c-7ad373aea239-tls-secret\") pod \"logging-loki-gateway-5f65744c89-tvqmf\" (UID: \"166ecd73-e9b9-4aa0-b09c-7ad373aea239\") " pod="openshift-logging/logging-loki-gateway-5f65744c89-tvqmf" Dec 11 08:28:50 crc kubenswrapper[4881]: I1211 08:28:50.384787 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tenants\" (UniqueName: \"kubernetes.io/secret/166ecd73-e9b9-4aa0-b09c-7ad373aea239-tenants\") pod \"logging-loki-gateway-5f65744c89-tvqmf\" (UID: \"166ecd73-e9b9-4aa0-b09c-7ad373aea239\") " pod="openshift-logging/logging-loki-gateway-5f65744c89-tvqmf" Dec 11 08:28:50 crc kubenswrapper[4881]: I1211 08:28:50.394750 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qrmwj\" (UniqueName: \"kubernetes.io/projected/166ecd73-e9b9-4aa0-b09c-7ad373aea239-kube-api-access-qrmwj\") pod \"logging-loki-gateway-5f65744c89-tvqmf\" (UID: \"166ecd73-e9b9-4aa0-b09c-7ad373aea239\") " pod="openshift-logging/logging-loki-gateway-5f65744c89-tvqmf" Dec 11 08:28:50 crc kubenswrapper[4881]: I1211 08:28:50.474117 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/3470b561-e428-417b-bd76-92642ba561d8-logging-loki-ca-bundle\") pod \"logging-loki-gateway-5f65744c89-srnq9\" (UID: \"3470b561-e428-417b-bd76-92642ba561d8\") " pod="openshift-logging/logging-loki-gateway-5f65744c89-srnq9" Dec 11 08:28:50 crc kubenswrapper[4881]: I1211 08:28:50.474191 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-gateway-client-http\" (UniqueName: \"kubernetes.io/secret/3470b561-e428-417b-bd76-92642ba561d8-logging-loki-gateway-client-http\") pod \"logging-loki-gateway-5f65744c89-srnq9\" (UID: \"3470b561-e428-417b-bd76-92642ba561d8\") " pod="openshift-logging/logging-loki-gateway-5f65744c89-srnq9" Dec 11 08:28:50 crc kubenswrapper[4881]: I1211 08:28:50.474228 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-94klc\" (UniqueName: \"kubernetes.io/projected/3470b561-e428-417b-bd76-92642ba561d8-kube-api-access-94klc\") pod \"logging-loki-gateway-5f65744c89-srnq9\" (UID: \"3470b561-e428-417b-bd76-92642ba561d8\") " pod="openshift-logging/logging-loki-gateway-5f65744c89-srnq9" Dec 11 08:28:50 crc kubenswrapper[4881]: I1211 08:28:50.474259 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lokistack-gateway\" (UniqueName: \"kubernetes.io/configmap/3470b561-e428-417b-bd76-92642ba561d8-lokistack-gateway\") pod \"logging-loki-gateway-5f65744c89-srnq9\" (UID: \"3470b561-e428-417b-bd76-92642ba561d8\") " pod="openshift-logging/logging-loki-gateway-5f65744c89-srnq9" Dec 11 08:28:50 crc kubenswrapper[4881]: I1211 08:28:50.474411 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rbac\" (UniqueName: \"kubernetes.io/configmap/3470b561-e428-417b-bd76-92642ba561d8-rbac\") pod \"logging-loki-gateway-5f65744c89-srnq9\" (UID: \"3470b561-e428-417b-bd76-92642ba561d8\") " pod="openshift-logging/logging-loki-gateway-5f65744c89-srnq9" Dec 11 08:28:50 crc kubenswrapper[4881]: I1211 08:28:50.474455 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-gateway-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/3470b561-e428-417b-bd76-92642ba561d8-logging-loki-gateway-ca-bundle\") pod \"logging-loki-gateway-5f65744c89-srnq9\" (UID: \"3470b561-e428-417b-bd76-92642ba561d8\") " pod="openshift-logging/logging-loki-gateway-5f65744c89-srnq9" Dec 11 08:28:50 crc kubenswrapper[4881]: I1211 08:28:50.474492 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-secret\" (UniqueName: \"kubernetes.io/secret/3470b561-e428-417b-bd76-92642ba561d8-tls-secret\") pod \"logging-loki-gateway-5f65744c89-srnq9\" (UID: \"3470b561-e428-417b-bd76-92642ba561d8\") " pod="openshift-logging/logging-loki-gateway-5f65744c89-srnq9" Dec 11 08:28:50 crc kubenswrapper[4881]: I1211 08:28:50.474518 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tenants\" (UniqueName: \"kubernetes.io/secret/3470b561-e428-417b-bd76-92642ba561d8-tenants\") pod \"logging-loki-gateway-5f65744c89-srnq9\" (UID: \"3470b561-e428-417b-bd76-92642ba561d8\") " pod="openshift-logging/logging-loki-gateway-5f65744c89-srnq9" Dec 11 08:28:50 crc kubenswrapper[4881]: I1211 08:28:50.475734 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/3470b561-e428-417b-bd76-92642ba561d8-logging-loki-ca-bundle\") pod \"logging-loki-gateway-5f65744c89-srnq9\" (UID: \"3470b561-e428-417b-bd76-92642ba561d8\") " pod="openshift-logging/logging-loki-gateway-5f65744c89-srnq9" Dec 11 08:28:50 crc kubenswrapper[4881]: I1211 08:28:50.477197 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rbac\" (UniqueName: \"kubernetes.io/configmap/3470b561-e428-417b-bd76-92642ba561d8-rbac\") pod \"logging-loki-gateway-5f65744c89-srnq9\" (UID: \"3470b561-e428-417b-bd76-92642ba561d8\") " pod="openshift-logging/logging-loki-gateway-5f65744c89-srnq9" Dec 11 08:28:50 crc kubenswrapper[4881]: I1211 08:28:50.477526 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-gateway-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/3470b561-e428-417b-bd76-92642ba561d8-logging-loki-gateway-ca-bundle\") pod \"logging-loki-gateway-5f65744c89-srnq9\" (UID: \"3470b561-e428-417b-bd76-92642ba561d8\") " pod="openshift-logging/logging-loki-gateway-5f65744c89-srnq9" Dec 11 08:28:50 crc kubenswrapper[4881]: I1211 08:28:50.477959 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tenants\" (UniqueName: \"kubernetes.io/secret/3470b561-e428-417b-bd76-92642ba561d8-tenants\") pod \"logging-loki-gateway-5f65744c89-srnq9\" (UID: \"3470b561-e428-417b-bd76-92642ba561d8\") " pod="openshift-logging/logging-loki-gateway-5f65744c89-srnq9" Dec 11 08:28:50 crc kubenswrapper[4881]: I1211 08:28:50.478209 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-gateway-client-http\" (UniqueName: \"kubernetes.io/secret/3470b561-e428-417b-bd76-92642ba561d8-logging-loki-gateway-client-http\") pod \"logging-loki-gateway-5f65744c89-srnq9\" (UID: \"3470b561-e428-417b-bd76-92642ba561d8\") " pod="openshift-logging/logging-loki-gateway-5f65744c89-srnq9" Dec 11 08:28:50 crc kubenswrapper[4881]: I1211 08:28:50.479046 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-secret\" (UniqueName: \"kubernetes.io/secret/3470b561-e428-417b-bd76-92642ba561d8-tls-secret\") pod \"logging-loki-gateway-5f65744c89-srnq9\" (UID: \"3470b561-e428-417b-bd76-92642ba561d8\") " pod="openshift-logging/logging-loki-gateway-5f65744c89-srnq9" Dec 11 08:28:50 crc kubenswrapper[4881]: I1211 08:28:50.480091 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lokistack-gateway\" (UniqueName: \"kubernetes.io/configmap/3470b561-e428-417b-bd76-92642ba561d8-lokistack-gateway\") pod \"logging-loki-gateway-5f65744c89-srnq9\" (UID: \"3470b561-e428-417b-bd76-92642ba561d8\") " pod="openshift-logging/logging-loki-gateway-5f65744c89-srnq9" Dec 11 08:28:50 crc kubenswrapper[4881]: I1211 08:28:50.492633 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-94klc\" (UniqueName: \"kubernetes.io/projected/3470b561-e428-417b-bd76-92642ba561d8-kube-api-access-94klc\") pod \"logging-loki-gateway-5f65744c89-srnq9\" (UID: \"3470b561-e428-417b-bd76-92642ba561d8\") " pod="openshift-logging/logging-loki-gateway-5f65744c89-srnq9" Dec 11 08:28:50 crc kubenswrapper[4881]: I1211 08:28:50.536467 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-gateway-5f65744c89-tvqmf" Dec 11 08:28:50 crc kubenswrapper[4881]: I1211 08:28:50.560463 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-logging/logging-loki-ingester-0"] Dec 11 08:28:50 crc kubenswrapper[4881]: I1211 08:28:50.564238 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-ingester-0" Dec 11 08:28:50 crc kubenswrapper[4881]: I1211 08:28:50.568622 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-ingester-http" Dec 11 08:28:50 crc kubenswrapper[4881]: I1211 08:28:50.568864 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-ingester-grpc" Dec 11 08:28:50 crc kubenswrapper[4881]: I1211 08:28:50.580294 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-ingester-0"] Dec 11 08:28:50 crc kubenswrapper[4881]: I1211 08:28:50.603776 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-gateway-5f65744c89-srnq9" Dec 11 08:28:50 crc kubenswrapper[4881]: I1211 08:28:50.609917 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-distributor-76cc67bf56-7wtq5"] Dec 11 08:28:50 crc kubenswrapper[4881]: I1211 08:28:50.683383 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-d024a448-9b07-40ca-877b-66d536073f5c\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-d024a448-9b07-40ca-877b-66d536073f5c\") pod \"logging-loki-ingester-0\" (UID: \"76043698-27ae-4a6d-af81-a7da0a14902d\") " pod="openshift-logging/logging-loki-ingester-0" Dec 11 08:28:50 crc kubenswrapper[4881]: I1211 08:28:50.683455 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-ingester-http\" (UniqueName: \"kubernetes.io/secret/76043698-27ae-4a6d-af81-a7da0a14902d-logging-loki-ingester-http\") pod \"logging-loki-ingester-0\" (UID: \"76043698-27ae-4a6d-af81-a7da0a14902d\") " pod="openshift-logging/logging-loki-ingester-0" Dec 11 08:28:50 crc kubenswrapper[4881]: I1211 08:28:50.683489 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/76043698-27ae-4a6d-af81-a7da0a14902d-config\") pod \"logging-loki-ingester-0\" (UID: \"76043698-27ae-4a6d-af81-a7da0a14902d\") " pod="openshift-logging/logging-loki-ingester-0" Dec 11 08:28:50 crc kubenswrapper[4881]: I1211 08:28:50.683516 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-527wk\" (UniqueName: \"kubernetes.io/projected/76043698-27ae-4a6d-af81-a7da0a14902d-kube-api-access-527wk\") pod \"logging-loki-ingester-0\" (UID: \"76043698-27ae-4a6d-af81-a7da0a14902d\") " pod="openshift-logging/logging-loki-ingester-0" Dec 11 08:28:50 crc kubenswrapper[4881]: I1211 08:28:50.683573 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-1cf9e97d-991d-4e04-89fe-2abbf812f995\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-1cf9e97d-991d-4e04-89fe-2abbf812f995\") pod \"logging-loki-ingester-0\" (UID: \"76043698-27ae-4a6d-af81-a7da0a14902d\") " pod="openshift-logging/logging-loki-ingester-0" Dec 11 08:28:50 crc kubenswrapper[4881]: I1211 08:28:50.683606 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-s3\" (UniqueName: \"kubernetes.io/secret/76043698-27ae-4a6d-af81-a7da0a14902d-logging-loki-s3\") pod \"logging-loki-ingester-0\" (UID: \"76043698-27ae-4a6d-af81-a7da0a14902d\") " pod="openshift-logging/logging-loki-ingester-0" Dec 11 08:28:50 crc kubenswrapper[4881]: I1211 08:28:50.683625 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/76043698-27ae-4a6d-af81-a7da0a14902d-logging-loki-ca-bundle\") pod \"logging-loki-ingester-0\" (UID: \"76043698-27ae-4a6d-af81-a7da0a14902d\") " pod="openshift-logging/logging-loki-ingester-0" Dec 11 08:28:50 crc kubenswrapper[4881]: I1211 08:28:50.683683 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-ingester-grpc\" (UniqueName: \"kubernetes.io/secret/76043698-27ae-4a6d-af81-a7da0a14902d-logging-loki-ingester-grpc\") pod \"logging-loki-ingester-0\" (UID: \"76043698-27ae-4a6d-af81-a7da0a14902d\") " pod="openshift-logging/logging-loki-ingester-0" Dec 11 08:28:50 crc kubenswrapper[4881]: I1211 08:28:50.722099 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-distributor-76cc67bf56-7wtq5" event={"ID":"3d172162-6309-4035-b574-842fa40d6db6","Type":"ContainerStarted","Data":"e0ba608bd0a9661536a544c7aa0f45d90d1c128570f79b224780798e509b6b2c"} Dec 11 08:28:50 crc kubenswrapper[4881]: I1211 08:28:50.737898 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-querier-5895d59bb8-87hfb"] Dec 11 08:28:50 crc kubenswrapper[4881]: I1211 08:28:50.787395 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-ingester-grpc\" (UniqueName: \"kubernetes.io/secret/76043698-27ae-4a6d-af81-a7da0a14902d-logging-loki-ingester-grpc\") pod \"logging-loki-ingester-0\" (UID: \"76043698-27ae-4a6d-af81-a7da0a14902d\") " pod="openshift-logging/logging-loki-ingester-0" Dec 11 08:28:50 crc kubenswrapper[4881]: I1211 08:28:50.787474 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-d024a448-9b07-40ca-877b-66d536073f5c\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-d024a448-9b07-40ca-877b-66d536073f5c\") pod \"logging-loki-ingester-0\" (UID: \"76043698-27ae-4a6d-af81-a7da0a14902d\") " pod="openshift-logging/logging-loki-ingester-0" Dec 11 08:28:50 crc kubenswrapper[4881]: I1211 08:28:50.787520 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-ingester-http\" (UniqueName: \"kubernetes.io/secret/76043698-27ae-4a6d-af81-a7da0a14902d-logging-loki-ingester-http\") pod \"logging-loki-ingester-0\" (UID: \"76043698-27ae-4a6d-af81-a7da0a14902d\") " pod="openshift-logging/logging-loki-ingester-0" Dec 11 08:28:50 crc kubenswrapper[4881]: I1211 08:28:50.787558 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/76043698-27ae-4a6d-af81-a7da0a14902d-config\") pod \"logging-loki-ingester-0\" (UID: \"76043698-27ae-4a6d-af81-a7da0a14902d\") " pod="openshift-logging/logging-loki-ingester-0" Dec 11 08:28:50 crc kubenswrapper[4881]: I1211 08:28:50.787586 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-527wk\" (UniqueName: \"kubernetes.io/projected/76043698-27ae-4a6d-af81-a7da0a14902d-kube-api-access-527wk\") pod \"logging-loki-ingester-0\" (UID: \"76043698-27ae-4a6d-af81-a7da0a14902d\") " pod="openshift-logging/logging-loki-ingester-0" Dec 11 08:28:50 crc kubenswrapper[4881]: I1211 08:28:50.788130 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-1cf9e97d-991d-4e04-89fe-2abbf812f995\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-1cf9e97d-991d-4e04-89fe-2abbf812f995\") pod \"logging-loki-ingester-0\" (UID: \"76043698-27ae-4a6d-af81-a7da0a14902d\") " pod="openshift-logging/logging-loki-ingester-0" Dec 11 08:28:50 crc kubenswrapper[4881]: I1211 08:28:50.788184 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-s3\" (UniqueName: \"kubernetes.io/secret/76043698-27ae-4a6d-af81-a7da0a14902d-logging-loki-s3\") pod \"logging-loki-ingester-0\" (UID: \"76043698-27ae-4a6d-af81-a7da0a14902d\") " pod="openshift-logging/logging-loki-ingester-0" Dec 11 08:28:50 crc kubenswrapper[4881]: I1211 08:28:50.788206 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/76043698-27ae-4a6d-af81-a7da0a14902d-logging-loki-ca-bundle\") pod \"logging-loki-ingester-0\" (UID: \"76043698-27ae-4a6d-af81-a7da0a14902d\") " pod="openshift-logging/logging-loki-ingester-0" Dec 11 08:28:50 crc kubenswrapper[4881]: I1211 08:28:50.789258 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/76043698-27ae-4a6d-af81-a7da0a14902d-logging-loki-ca-bundle\") pod \"logging-loki-ingester-0\" (UID: \"76043698-27ae-4a6d-af81-a7da0a14902d\") " pod="openshift-logging/logging-loki-ingester-0" Dec 11 08:28:50 crc kubenswrapper[4881]: I1211 08:28:50.790610 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/76043698-27ae-4a6d-af81-a7da0a14902d-config\") pod \"logging-loki-ingester-0\" (UID: \"76043698-27ae-4a6d-af81-a7da0a14902d\") " pod="openshift-logging/logging-loki-ingester-0" Dec 11 08:28:50 crc kubenswrapper[4881]: I1211 08:28:50.792828 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-ingester-grpc\" (UniqueName: \"kubernetes.io/secret/76043698-27ae-4a6d-af81-a7da0a14902d-logging-loki-ingester-grpc\") pod \"logging-loki-ingester-0\" (UID: \"76043698-27ae-4a6d-af81-a7da0a14902d\") " pod="openshift-logging/logging-loki-ingester-0" Dec 11 08:28:50 crc kubenswrapper[4881]: I1211 08:28:50.793359 4881 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Dec 11 08:28:50 crc kubenswrapper[4881]: I1211 08:28:50.793379 4881 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-d024a448-9b07-40ca-877b-66d536073f5c\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-d024a448-9b07-40ca-877b-66d536073f5c\") pod \"logging-loki-ingester-0\" (UID: \"76043698-27ae-4a6d-af81-a7da0a14902d\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/0c3f59302bbe6381adcaabeafc68b18c95542a4f168f3f4d65e354046d543bc2/globalmount\"" pod="openshift-logging/logging-loki-ingester-0" Dec 11 08:28:50 crc kubenswrapper[4881]: I1211 08:28:50.794009 4881 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Dec 11 08:28:50 crc kubenswrapper[4881]: I1211 08:28:50.794027 4881 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-1cf9e97d-991d-4e04-89fe-2abbf812f995\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-1cf9e97d-991d-4e04-89fe-2abbf812f995\") pod \"logging-loki-ingester-0\" (UID: \"76043698-27ae-4a6d-af81-a7da0a14902d\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/2c995a906818ef257d8b81a5b0a46abeffe9ccc46eaef53d8e9c4d7eb3e3b362/globalmount\"" pod="openshift-logging/logging-loki-ingester-0" Dec 11 08:28:50 crc kubenswrapper[4881]: I1211 08:28:50.797505 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-s3\" (UniqueName: \"kubernetes.io/secret/76043698-27ae-4a6d-af81-a7da0a14902d-logging-loki-s3\") pod \"logging-loki-ingester-0\" (UID: \"76043698-27ae-4a6d-af81-a7da0a14902d\") " pod="openshift-logging/logging-loki-ingester-0" Dec 11 08:28:50 crc kubenswrapper[4881]: I1211 08:28:50.811227 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-ingester-http\" (UniqueName: \"kubernetes.io/secret/76043698-27ae-4a6d-af81-a7da0a14902d-logging-loki-ingester-http\") pod \"logging-loki-ingester-0\" (UID: \"76043698-27ae-4a6d-af81-a7da0a14902d\") " pod="openshift-logging/logging-loki-ingester-0" Dec 11 08:28:50 crc kubenswrapper[4881]: I1211 08:28:50.814045 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-527wk\" (UniqueName: \"kubernetes.io/projected/76043698-27ae-4a6d-af81-a7da0a14902d-kube-api-access-527wk\") pod \"logging-loki-ingester-0\" (UID: \"76043698-27ae-4a6d-af81-a7da0a14902d\") " pod="openshift-logging/logging-loki-ingester-0" Dec 11 08:28:50 crc kubenswrapper[4881]: I1211 08:28:50.842314 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-query-frontend-84558f7c9f-sswft"] Dec 11 08:28:50 crc kubenswrapper[4881]: I1211 08:28:50.851004 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-1cf9e97d-991d-4e04-89fe-2abbf812f995\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-1cf9e97d-991d-4e04-89fe-2abbf812f995\") pod \"logging-loki-ingester-0\" (UID: \"76043698-27ae-4a6d-af81-a7da0a14902d\") " pod="openshift-logging/logging-loki-ingester-0" Dec 11 08:28:50 crc kubenswrapper[4881]: I1211 08:28:50.868103 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-d024a448-9b07-40ca-877b-66d536073f5c\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-d024a448-9b07-40ca-877b-66d536073f5c\") pod \"logging-loki-ingester-0\" (UID: \"76043698-27ae-4a6d-af81-a7da0a14902d\") " pod="openshift-logging/logging-loki-ingester-0" Dec 11 08:28:50 crc kubenswrapper[4881]: W1211 08:28:50.873100 4881 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb4e3cba0_44c9_46ee_97f1_fb3b34b37cb3.slice/crio-d31cdd12ac3de57442445bfb8533fdc9b120d9681aaa0fdb9bcfab6f143ec3c4 WatchSource:0}: Error finding container d31cdd12ac3de57442445bfb8533fdc9b120d9681aaa0fdb9bcfab6f143ec3c4: Status 404 returned error can't find the container with id d31cdd12ac3de57442445bfb8533fdc9b120d9681aaa0fdb9bcfab6f143ec3c4 Dec 11 08:28:50 crc kubenswrapper[4881]: I1211 08:28:50.919889 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-logging/logging-loki-compactor-0"] Dec 11 08:28:50 crc kubenswrapper[4881]: I1211 08:28:50.920891 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-compactor-0" Dec 11 08:28:50 crc kubenswrapper[4881]: I1211 08:28:50.923169 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-compactor-http" Dec 11 08:28:50 crc kubenswrapper[4881]: I1211 08:28:50.923889 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-compactor-grpc" Dec 11 08:28:50 crc kubenswrapper[4881]: I1211 08:28:50.933738 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-compactor-0"] Dec 11 08:28:50 crc kubenswrapper[4881]: I1211 08:28:50.990821 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/19c28f0c-c6b1-4192-b769-35ce88232323-logging-loki-ca-bundle\") pod \"logging-loki-compactor-0\" (UID: \"19c28f0c-c6b1-4192-b769-35ce88232323\") " pod="openshift-logging/logging-loki-compactor-0" Dec 11 08:28:50 crc kubenswrapper[4881]: I1211 08:28:50.990882 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-compactor-grpc\" (UniqueName: \"kubernetes.io/secret/19c28f0c-c6b1-4192-b769-35ce88232323-logging-loki-compactor-grpc\") pod \"logging-loki-compactor-0\" (UID: \"19c28f0c-c6b1-4192-b769-35ce88232323\") " pod="openshift-logging/logging-loki-compactor-0" Dec 11 08:28:50 crc kubenswrapper[4881]: I1211 08:28:50.990939 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qbfks\" (UniqueName: \"kubernetes.io/projected/19c28f0c-c6b1-4192-b769-35ce88232323-kube-api-access-qbfks\") pod \"logging-loki-compactor-0\" (UID: \"19c28f0c-c6b1-4192-b769-35ce88232323\") " pod="openshift-logging/logging-loki-compactor-0" Dec 11 08:28:50 crc kubenswrapper[4881]: I1211 08:28:50.990973 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-08a956d4-8c1c-4604-9da8-2e46534ca8c9\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-08a956d4-8c1c-4604-9da8-2e46534ca8c9\") pod \"logging-loki-compactor-0\" (UID: \"19c28f0c-c6b1-4192-b769-35ce88232323\") " pod="openshift-logging/logging-loki-compactor-0" Dec 11 08:28:50 crc kubenswrapper[4881]: I1211 08:28:50.991161 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/19c28f0c-c6b1-4192-b769-35ce88232323-config\") pod \"logging-loki-compactor-0\" (UID: \"19c28f0c-c6b1-4192-b769-35ce88232323\") " pod="openshift-logging/logging-loki-compactor-0" Dec 11 08:28:50 crc kubenswrapper[4881]: I1211 08:28:50.991425 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-s3\" (UniqueName: \"kubernetes.io/secret/19c28f0c-c6b1-4192-b769-35ce88232323-logging-loki-s3\") pod \"logging-loki-compactor-0\" (UID: \"19c28f0c-c6b1-4192-b769-35ce88232323\") " pod="openshift-logging/logging-loki-compactor-0" Dec 11 08:28:50 crc kubenswrapper[4881]: I1211 08:28:50.991476 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-compactor-http\" (UniqueName: \"kubernetes.io/secret/19c28f0c-c6b1-4192-b769-35ce88232323-logging-loki-compactor-http\") pod \"logging-loki-compactor-0\" (UID: \"19c28f0c-c6b1-4192-b769-35ce88232323\") " pod="openshift-logging/logging-loki-compactor-0" Dec 11 08:28:51 crc kubenswrapper[4881]: I1211 08:28:51.035557 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-ingester-0" Dec 11 08:28:51 crc kubenswrapper[4881]: I1211 08:28:51.081643 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-logging/logging-loki-index-gateway-0"] Dec 11 08:28:51 crc kubenswrapper[4881]: I1211 08:28:51.083614 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-index-gateway-0" Dec 11 08:28:51 crc kubenswrapper[4881]: I1211 08:28:51.088567 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-index-gateway-http" Dec 11 08:28:51 crc kubenswrapper[4881]: I1211 08:28:51.090068 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-index-gateway-grpc" Dec 11 08:28:51 crc kubenswrapper[4881]: I1211 08:28:51.091136 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-index-gateway-0"] Dec 11 08:28:51 crc kubenswrapper[4881]: I1211 08:28:51.096755 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/19c28f0c-c6b1-4192-b769-35ce88232323-logging-loki-ca-bundle\") pod \"logging-loki-compactor-0\" (UID: \"19c28f0c-c6b1-4192-b769-35ce88232323\") " pod="openshift-logging/logging-loki-compactor-0" Dec 11 08:28:51 crc kubenswrapper[4881]: I1211 08:28:51.096812 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-compactor-grpc\" (UniqueName: \"kubernetes.io/secret/19c28f0c-c6b1-4192-b769-35ce88232323-logging-loki-compactor-grpc\") pod \"logging-loki-compactor-0\" (UID: \"19c28f0c-c6b1-4192-b769-35ce88232323\") " pod="openshift-logging/logging-loki-compactor-0" Dec 11 08:28:51 crc kubenswrapper[4881]: I1211 08:28:51.096877 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qbfks\" (UniqueName: \"kubernetes.io/projected/19c28f0c-c6b1-4192-b769-35ce88232323-kube-api-access-qbfks\") pod \"logging-loki-compactor-0\" (UID: \"19c28f0c-c6b1-4192-b769-35ce88232323\") " pod="openshift-logging/logging-loki-compactor-0" Dec 11 08:28:51 crc kubenswrapper[4881]: I1211 08:28:51.096919 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-08a956d4-8c1c-4604-9da8-2e46534ca8c9\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-08a956d4-8c1c-4604-9da8-2e46534ca8c9\") pod \"logging-loki-compactor-0\" (UID: \"19c28f0c-c6b1-4192-b769-35ce88232323\") " pod="openshift-logging/logging-loki-compactor-0" Dec 11 08:28:51 crc kubenswrapper[4881]: I1211 08:28:51.096979 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/19c28f0c-c6b1-4192-b769-35ce88232323-config\") pod \"logging-loki-compactor-0\" (UID: \"19c28f0c-c6b1-4192-b769-35ce88232323\") " pod="openshift-logging/logging-loki-compactor-0" Dec 11 08:28:51 crc kubenswrapper[4881]: I1211 08:28:51.097038 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-s3\" (UniqueName: \"kubernetes.io/secret/19c28f0c-c6b1-4192-b769-35ce88232323-logging-loki-s3\") pod \"logging-loki-compactor-0\" (UID: \"19c28f0c-c6b1-4192-b769-35ce88232323\") " pod="openshift-logging/logging-loki-compactor-0" Dec 11 08:28:51 crc kubenswrapper[4881]: I1211 08:28:51.097068 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-compactor-http\" (UniqueName: \"kubernetes.io/secret/19c28f0c-c6b1-4192-b769-35ce88232323-logging-loki-compactor-http\") pod \"logging-loki-compactor-0\" (UID: \"19c28f0c-c6b1-4192-b769-35ce88232323\") " pod="openshift-logging/logging-loki-compactor-0" Dec 11 08:28:51 crc kubenswrapper[4881]: I1211 08:28:51.097884 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/19c28f0c-c6b1-4192-b769-35ce88232323-logging-loki-ca-bundle\") pod \"logging-loki-compactor-0\" (UID: \"19c28f0c-c6b1-4192-b769-35ce88232323\") " pod="openshift-logging/logging-loki-compactor-0" Dec 11 08:28:51 crc kubenswrapper[4881]: I1211 08:28:51.098542 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/19c28f0c-c6b1-4192-b769-35ce88232323-config\") pod \"logging-loki-compactor-0\" (UID: \"19c28f0c-c6b1-4192-b769-35ce88232323\") " pod="openshift-logging/logging-loki-compactor-0" Dec 11 08:28:51 crc kubenswrapper[4881]: I1211 08:28:51.102951 4881 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Dec 11 08:28:51 crc kubenswrapper[4881]: I1211 08:28:51.103014 4881 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-08a956d4-8c1c-4604-9da8-2e46534ca8c9\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-08a956d4-8c1c-4604-9da8-2e46534ca8c9\") pod \"logging-loki-compactor-0\" (UID: \"19c28f0c-c6b1-4192-b769-35ce88232323\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/734c2c6326ea8392cede66dfc4278e22a40ee6be2003f05f3c2c40c6e5aeeeaf/globalmount\"" pod="openshift-logging/logging-loki-compactor-0" Dec 11 08:28:51 crc kubenswrapper[4881]: I1211 08:28:51.103132 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-compactor-http\" (UniqueName: \"kubernetes.io/secret/19c28f0c-c6b1-4192-b769-35ce88232323-logging-loki-compactor-http\") pod \"logging-loki-compactor-0\" (UID: \"19c28f0c-c6b1-4192-b769-35ce88232323\") " pod="openshift-logging/logging-loki-compactor-0" Dec 11 08:28:51 crc kubenswrapper[4881]: I1211 08:28:51.103820 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-compactor-grpc\" (UniqueName: \"kubernetes.io/secret/19c28f0c-c6b1-4192-b769-35ce88232323-logging-loki-compactor-grpc\") pod \"logging-loki-compactor-0\" (UID: \"19c28f0c-c6b1-4192-b769-35ce88232323\") " pod="openshift-logging/logging-loki-compactor-0" Dec 11 08:28:51 crc kubenswrapper[4881]: I1211 08:28:51.104885 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-s3\" (UniqueName: \"kubernetes.io/secret/19c28f0c-c6b1-4192-b769-35ce88232323-logging-loki-s3\") pod \"logging-loki-compactor-0\" (UID: \"19c28f0c-c6b1-4192-b769-35ce88232323\") " pod="openshift-logging/logging-loki-compactor-0" Dec 11 08:28:51 crc kubenswrapper[4881]: I1211 08:28:51.118769 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qbfks\" (UniqueName: \"kubernetes.io/projected/19c28f0c-c6b1-4192-b769-35ce88232323-kube-api-access-qbfks\") pod \"logging-loki-compactor-0\" (UID: \"19c28f0c-c6b1-4192-b769-35ce88232323\") " pod="openshift-logging/logging-loki-compactor-0" Dec 11 08:28:51 crc kubenswrapper[4881]: I1211 08:28:51.141883 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-08a956d4-8c1c-4604-9da8-2e46534ca8c9\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-08a956d4-8c1c-4604-9da8-2e46534ca8c9\") pod \"logging-loki-compactor-0\" (UID: \"19c28f0c-c6b1-4192-b769-35ce88232323\") " pod="openshift-logging/logging-loki-compactor-0" Dec 11 08:28:51 crc kubenswrapper[4881]: I1211 08:28:51.197261 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-gateway-5f65744c89-srnq9"] Dec 11 08:28:51 crc kubenswrapper[4881]: I1211 08:28:51.198053 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-44158878-8251-4525-b3b4-95f8ab1af9e1\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-44158878-8251-4525-b3b4-95f8ab1af9e1\") pod \"logging-loki-index-gateway-0\" (UID: \"7d91da1c-1fcb-4d73-b7ad-838bd4b3eb6b\") " pod="openshift-logging/logging-loki-index-gateway-0" Dec 11 08:28:51 crc kubenswrapper[4881]: I1211 08:28:51.198084 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-index-gateway-grpc\" (UniqueName: \"kubernetes.io/secret/7d91da1c-1fcb-4d73-b7ad-838bd4b3eb6b-logging-loki-index-gateway-grpc\") pod \"logging-loki-index-gateway-0\" (UID: \"7d91da1c-1fcb-4d73-b7ad-838bd4b3eb6b\") " pod="openshift-logging/logging-loki-index-gateway-0" Dec 11 08:28:51 crc kubenswrapper[4881]: I1211 08:28:51.198105 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-index-gateway-http\" (UniqueName: \"kubernetes.io/secret/7d91da1c-1fcb-4d73-b7ad-838bd4b3eb6b-logging-loki-index-gateway-http\") pod \"logging-loki-index-gateway-0\" (UID: \"7d91da1c-1fcb-4d73-b7ad-838bd4b3eb6b\") " pod="openshift-logging/logging-loki-index-gateway-0" Dec 11 08:28:51 crc kubenswrapper[4881]: I1211 08:28:51.198129 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7d91da1c-1fcb-4d73-b7ad-838bd4b3eb6b-config\") pod \"logging-loki-index-gateway-0\" (UID: \"7d91da1c-1fcb-4d73-b7ad-838bd4b3eb6b\") " pod="openshift-logging/logging-loki-index-gateway-0" Dec 11 08:28:51 crc kubenswrapper[4881]: I1211 08:28:51.198204 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/7d91da1c-1fcb-4d73-b7ad-838bd4b3eb6b-logging-loki-ca-bundle\") pod \"logging-loki-index-gateway-0\" (UID: \"7d91da1c-1fcb-4d73-b7ad-838bd4b3eb6b\") " pod="openshift-logging/logging-loki-index-gateway-0" Dec 11 08:28:51 crc kubenswrapper[4881]: I1211 08:28:51.198306 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8gnsv\" (UniqueName: \"kubernetes.io/projected/7d91da1c-1fcb-4d73-b7ad-838bd4b3eb6b-kube-api-access-8gnsv\") pod \"logging-loki-index-gateway-0\" (UID: \"7d91da1c-1fcb-4d73-b7ad-838bd4b3eb6b\") " pod="openshift-logging/logging-loki-index-gateway-0" Dec 11 08:28:51 crc kubenswrapper[4881]: I1211 08:28:51.198363 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-s3\" (UniqueName: \"kubernetes.io/secret/7d91da1c-1fcb-4d73-b7ad-838bd4b3eb6b-logging-loki-s3\") pod \"logging-loki-index-gateway-0\" (UID: \"7d91da1c-1fcb-4d73-b7ad-838bd4b3eb6b\") " pod="openshift-logging/logging-loki-index-gateway-0" Dec 11 08:28:51 crc kubenswrapper[4881]: W1211 08:28:51.204153 4881 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3470b561_e428_417b_bd76_92642ba561d8.slice/crio-44c6e06bcd3a196ceaac89ee0094ed1ca8a47616430265702175d6e1f99921c3 WatchSource:0}: Error finding container 44c6e06bcd3a196ceaac89ee0094ed1ca8a47616430265702175d6e1f99921c3: Status 404 returned error can't find the container with id 44c6e06bcd3a196ceaac89ee0094ed1ca8a47616430265702175d6e1f99921c3 Dec 11 08:28:51 crc kubenswrapper[4881]: I1211 08:28:51.240689 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-compactor-0" Dec 11 08:28:51 crc kubenswrapper[4881]: I1211 08:28:51.250183 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-gateway-5f65744c89-tvqmf"] Dec 11 08:28:51 crc kubenswrapper[4881]: I1211 08:28:51.299651 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-44158878-8251-4525-b3b4-95f8ab1af9e1\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-44158878-8251-4525-b3b4-95f8ab1af9e1\") pod \"logging-loki-index-gateway-0\" (UID: \"7d91da1c-1fcb-4d73-b7ad-838bd4b3eb6b\") " pod="openshift-logging/logging-loki-index-gateway-0" Dec 11 08:28:51 crc kubenswrapper[4881]: I1211 08:28:51.299705 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-index-gateway-grpc\" (UniqueName: \"kubernetes.io/secret/7d91da1c-1fcb-4d73-b7ad-838bd4b3eb6b-logging-loki-index-gateway-grpc\") pod \"logging-loki-index-gateway-0\" (UID: \"7d91da1c-1fcb-4d73-b7ad-838bd4b3eb6b\") " pod="openshift-logging/logging-loki-index-gateway-0" Dec 11 08:28:51 crc kubenswrapper[4881]: I1211 08:28:51.299731 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-index-gateway-http\" (UniqueName: \"kubernetes.io/secret/7d91da1c-1fcb-4d73-b7ad-838bd4b3eb6b-logging-loki-index-gateway-http\") pod \"logging-loki-index-gateway-0\" (UID: \"7d91da1c-1fcb-4d73-b7ad-838bd4b3eb6b\") " pod="openshift-logging/logging-loki-index-gateway-0" Dec 11 08:28:51 crc kubenswrapper[4881]: I1211 08:28:51.299761 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7d91da1c-1fcb-4d73-b7ad-838bd4b3eb6b-config\") pod \"logging-loki-index-gateway-0\" (UID: \"7d91da1c-1fcb-4d73-b7ad-838bd4b3eb6b\") " pod="openshift-logging/logging-loki-index-gateway-0" Dec 11 08:28:51 crc kubenswrapper[4881]: I1211 08:28:51.299788 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/7d91da1c-1fcb-4d73-b7ad-838bd4b3eb6b-logging-loki-ca-bundle\") pod \"logging-loki-index-gateway-0\" (UID: \"7d91da1c-1fcb-4d73-b7ad-838bd4b3eb6b\") " pod="openshift-logging/logging-loki-index-gateway-0" Dec 11 08:28:51 crc kubenswrapper[4881]: I1211 08:28:51.299837 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8gnsv\" (UniqueName: \"kubernetes.io/projected/7d91da1c-1fcb-4d73-b7ad-838bd4b3eb6b-kube-api-access-8gnsv\") pod \"logging-loki-index-gateway-0\" (UID: \"7d91da1c-1fcb-4d73-b7ad-838bd4b3eb6b\") " pod="openshift-logging/logging-loki-index-gateway-0" Dec 11 08:28:51 crc kubenswrapper[4881]: I1211 08:28:51.299871 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-s3\" (UniqueName: \"kubernetes.io/secret/7d91da1c-1fcb-4d73-b7ad-838bd4b3eb6b-logging-loki-s3\") pod \"logging-loki-index-gateway-0\" (UID: \"7d91da1c-1fcb-4d73-b7ad-838bd4b3eb6b\") " pod="openshift-logging/logging-loki-index-gateway-0" Dec 11 08:28:51 crc kubenswrapper[4881]: I1211 08:28:51.300880 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/7d91da1c-1fcb-4d73-b7ad-838bd4b3eb6b-logging-loki-ca-bundle\") pod \"logging-loki-index-gateway-0\" (UID: \"7d91da1c-1fcb-4d73-b7ad-838bd4b3eb6b\") " pod="openshift-logging/logging-loki-index-gateway-0" Dec 11 08:28:51 crc kubenswrapper[4881]: I1211 08:28:51.303071 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7d91da1c-1fcb-4d73-b7ad-838bd4b3eb6b-config\") pod \"logging-loki-index-gateway-0\" (UID: \"7d91da1c-1fcb-4d73-b7ad-838bd4b3eb6b\") " pod="openshift-logging/logging-loki-index-gateway-0" Dec 11 08:28:51 crc kubenswrapper[4881]: I1211 08:28:51.303497 4881 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Dec 11 08:28:51 crc kubenswrapper[4881]: I1211 08:28:51.303525 4881 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-44158878-8251-4525-b3b4-95f8ab1af9e1\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-44158878-8251-4525-b3b4-95f8ab1af9e1\") pod \"logging-loki-index-gateway-0\" (UID: \"7d91da1c-1fcb-4d73-b7ad-838bd4b3eb6b\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/953ea72279156cc32ce4ec387e0b834befe85de28058ed139b783419d3d707fa/globalmount\"" pod="openshift-logging/logging-loki-index-gateway-0" Dec 11 08:28:51 crc kubenswrapper[4881]: I1211 08:28:51.304832 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-s3\" (UniqueName: \"kubernetes.io/secret/7d91da1c-1fcb-4d73-b7ad-838bd4b3eb6b-logging-loki-s3\") pod \"logging-loki-index-gateway-0\" (UID: \"7d91da1c-1fcb-4d73-b7ad-838bd4b3eb6b\") " pod="openshift-logging/logging-loki-index-gateway-0" Dec 11 08:28:51 crc kubenswrapper[4881]: I1211 08:28:51.305016 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-index-gateway-http\" (UniqueName: \"kubernetes.io/secret/7d91da1c-1fcb-4d73-b7ad-838bd4b3eb6b-logging-loki-index-gateway-http\") pod \"logging-loki-index-gateway-0\" (UID: \"7d91da1c-1fcb-4d73-b7ad-838bd4b3eb6b\") " pod="openshift-logging/logging-loki-index-gateway-0" Dec 11 08:28:51 crc kubenswrapper[4881]: I1211 08:28:51.305247 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-index-gateway-grpc\" (UniqueName: \"kubernetes.io/secret/7d91da1c-1fcb-4d73-b7ad-838bd4b3eb6b-logging-loki-index-gateway-grpc\") pod \"logging-loki-index-gateway-0\" (UID: \"7d91da1c-1fcb-4d73-b7ad-838bd4b3eb6b\") " pod="openshift-logging/logging-loki-index-gateway-0" Dec 11 08:28:51 crc kubenswrapper[4881]: I1211 08:28:51.319696 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8gnsv\" (UniqueName: \"kubernetes.io/projected/7d91da1c-1fcb-4d73-b7ad-838bd4b3eb6b-kube-api-access-8gnsv\") pod \"logging-loki-index-gateway-0\" (UID: \"7d91da1c-1fcb-4d73-b7ad-838bd4b3eb6b\") " pod="openshift-logging/logging-loki-index-gateway-0" Dec 11 08:28:51 crc kubenswrapper[4881]: I1211 08:28:51.339913 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-44158878-8251-4525-b3b4-95f8ab1af9e1\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-44158878-8251-4525-b3b4-95f8ab1af9e1\") pod \"logging-loki-index-gateway-0\" (UID: \"7d91da1c-1fcb-4d73-b7ad-838bd4b3eb6b\") " pod="openshift-logging/logging-loki-index-gateway-0" Dec 11 08:28:51 crc kubenswrapper[4881]: I1211 08:28:51.404842 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-index-gateway-0" Dec 11 08:28:51 crc kubenswrapper[4881]: I1211 08:28:51.450070 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-ingester-0"] Dec 11 08:28:51 crc kubenswrapper[4881]: W1211 08:28:51.453592 4881 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod76043698_27ae_4a6d_af81_a7da0a14902d.slice/crio-df2d944828b878f9ed824aa2b1f8756914b862a76f0524df9da14d1cd71264b7 WatchSource:0}: Error finding container df2d944828b878f9ed824aa2b1f8756914b862a76f0524df9da14d1cd71264b7: Status 404 returned error can't find the container with id df2d944828b878f9ed824aa2b1f8756914b862a76f0524df9da14d1cd71264b7 Dec 11 08:28:51 crc kubenswrapper[4881]: I1211 08:28:51.642238 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-compactor-0"] Dec 11 08:28:51 crc kubenswrapper[4881]: I1211 08:28:51.734743 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-gateway-5f65744c89-srnq9" event={"ID":"3470b561-e428-417b-bd76-92642ba561d8","Type":"ContainerStarted","Data":"44c6e06bcd3a196ceaac89ee0094ed1ca8a47616430265702175d6e1f99921c3"} Dec 11 08:28:51 crc kubenswrapper[4881]: I1211 08:28:51.736271 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-querier-5895d59bb8-87hfb" event={"ID":"b4ff84df-3a3b-4346-84a2-56f79c1aac44","Type":"ContainerStarted","Data":"876eb983f850c0c5fbefbccc78c1d9010b6a0ab73c33f46309039f2ba191fbd2"} Dec 11 08:28:51 crc kubenswrapper[4881]: I1211 08:28:51.737916 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-ingester-0" event={"ID":"76043698-27ae-4a6d-af81-a7da0a14902d","Type":"ContainerStarted","Data":"df2d944828b878f9ed824aa2b1f8756914b862a76f0524df9da14d1cd71264b7"} Dec 11 08:28:51 crc kubenswrapper[4881]: I1211 08:28:51.738856 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-compactor-0" event={"ID":"19c28f0c-c6b1-4192-b769-35ce88232323","Type":"ContainerStarted","Data":"7a8a6399ed1961ddb08b58ff762fdd7748025b7eb6bcab66ebb9e07eb3d54378"} Dec 11 08:28:51 crc kubenswrapper[4881]: I1211 08:28:51.739692 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-sswft" event={"ID":"b4e3cba0-44c9-46ee-97f1-fb3b34b37cb3","Type":"ContainerStarted","Data":"d31cdd12ac3de57442445bfb8533fdc9b120d9681aaa0fdb9bcfab6f143ec3c4"} Dec 11 08:28:51 crc kubenswrapper[4881]: I1211 08:28:51.746949 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-gateway-5f65744c89-tvqmf" event={"ID":"166ecd73-e9b9-4aa0-b09c-7ad373aea239","Type":"ContainerStarted","Data":"580c32552f7b68b57e93bddb4a62a2276467812bf9ed23244651703a4ce0d384"} Dec 11 08:28:51 crc kubenswrapper[4881]: I1211 08:28:51.821731 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-index-gateway-0"] Dec 11 08:28:52 crc kubenswrapper[4881]: I1211 08:28:52.755418 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-index-gateway-0" event={"ID":"7d91da1c-1fcb-4d73-b7ad-838bd4b3eb6b","Type":"ContainerStarted","Data":"de8372b6fc236ebbc3baa2404b5f32d6337ed9a0dbb41bbe4074c6535f8502fc"} Dec 11 08:28:55 crc kubenswrapper[4881]: I1211 08:28:55.784823 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-gateway-5f65744c89-tvqmf" event={"ID":"166ecd73-e9b9-4aa0-b09c-7ad373aea239","Type":"ContainerStarted","Data":"7a5d3fdee57cec68cb22e408f05cd9e463a8fa299aead0de1b7ee2b9742a25ae"} Dec 11 08:28:55 crc kubenswrapper[4881]: I1211 08:28:55.787562 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-gateway-5f65744c89-srnq9" event={"ID":"3470b561-e428-417b-bd76-92642ba561d8","Type":"ContainerStarted","Data":"50f9200a8dbe1a1fb7a6a58f8fb8e98d1935183dbc6ade56a1a835f517814919"} Dec 11 08:28:55 crc kubenswrapper[4881]: I1211 08:28:55.789575 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-querier-5895d59bb8-87hfb" event={"ID":"b4ff84df-3a3b-4346-84a2-56f79c1aac44","Type":"ContainerStarted","Data":"b1c3fa3a2a4c8126347adf313d962643a1369705d8430bef3d9fb6f2d54076fe"} Dec 11 08:28:55 crc kubenswrapper[4881]: I1211 08:28:55.789860 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-logging/logging-loki-querier-5895d59bb8-87hfb" Dec 11 08:28:55 crc kubenswrapper[4881]: I1211 08:28:55.791410 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-ingester-0" event={"ID":"76043698-27ae-4a6d-af81-a7da0a14902d","Type":"ContainerStarted","Data":"ace84e67020f747d8160ce56aae0af984b7145cddb5f4dd55356acb667c79b19"} Dec 11 08:28:55 crc kubenswrapper[4881]: I1211 08:28:55.791589 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-logging/logging-loki-ingester-0" Dec 11 08:28:55 crc kubenswrapper[4881]: I1211 08:28:55.795024 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-compactor-0" event={"ID":"19c28f0c-c6b1-4192-b769-35ce88232323","Type":"ContainerStarted","Data":"589fa6fa0c54b5a5ae717ac2551d289f17fb022d2b846bc09b972d632f51af93"} Dec 11 08:28:55 crc kubenswrapper[4881]: I1211 08:28:55.795983 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-logging/logging-loki-compactor-0" Dec 11 08:28:55 crc kubenswrapper[4881]: I1211 08:28:55.797942 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-sswft" event={"ID":"b4e3cba0-44c9-46ee-97f1-fb3b34b37cb3","Type":"ContainerStarted","Data":"6c6d39ed7c2ee3e289be60a0f36f19c3f77488fa736caa3ad5741b94cb72625d"} Dec 11 08:28:55 crc kubenswrapper[4881]: I1211 08:28:55.798146 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-sswft" Dec 11 08:28:55 crc kubenswrapper[4881]: I1211 08:28:55.799503 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-distributor-76cc67bf56-7wtq5" event={"ID":"3d172162-6309-4035-b574-842fa40d6db6","Type":"ContainerStarted","Data":"a487df232b1e9b3d82551428c104f2488a6422d5bb7f87e0271e18d2c7feb1c3"} Dec 11 08:28:55 crc kubenswrapper[4881]: I1211 08:28:55.799592 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-logging/logging-loki-distributor-76cc67bf56-7wtq5" Dec 11 08:28:55 crc kubenswrapper[4881]: I1211 08:28:55.800931 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-index-gateway-0" event={"ID":"7d91da1c-1fcb-4d73-b7ad-838bd4b3eb6b","Type":"ContainerStarted","Data":"beeb2c9191515cad83c4190d3351e375d5d38c0b8d6f6e163149e11f0706b403"} Dec 11 08:28:55 crc kubenswrapper[4881]: I1211 08:28:55.801432 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-logging/logging-loki-index-gateway-0" Dec 11 08:28:55 crc kubenswrapper[4881]: I1211 08:28:55.819515 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-logging/logging-loki-querier-5895d59bb8-87hfb" podStartSLOduration=2.600733063 podStartE2EDuration="6.819482636s" podCreationTimestamp="2025-12-11 08:28:49 +0000 UTC" firstStartedPulling="2025-12-11 08:28:50.769296014 +0000 UTC m=+779.146664711" lastFinishedPulling="2025-12-11 08:28:54.988045567 +0000 UTC m=+783.365414284" observedRunningTime="2025-12-11 08:28:55.81258136 +0000 UTC m=+784.189950057" watchObservedRunningTime="2025-12-11 08:28:55.819482636 +0000 UTC m=+784.196851353" Dec 11 08:28:55 crc kubenswrapper[4881]: I1211 08:28:55.837164 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-logging/logging-loki-index-gateway-0" podStartSLOduration=2.67810244 podStartE2EDuration="5.837148176s" podCreationTimestamp="2025-12-11 08:28:50 +0000 UTC" firstStartedPulling="2025-12-11 08:28:51.832891041 +0000 UTC m=+780.210259738" lastFinishedPulling="2025-12-11 08:28:54.991936767 +0000 UTC m=+783.369305474" observedRunningTime="2025-12-11 08:28:55.835027683 +0000 UTC m=+784.212396380" watchObservedRunningTime="2025-12-11 08:28:55.837148176 +0000 UTC m=+784.214516873" Dec 11 08:28:55 crc kubenswrapper[4881]: I1211 08:28:55.863118 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-logging/logging-loki-distributor-76cc67bf56-7wtq5" podStartSLOduration=2.5660568379999997 podStartE2EDuration="6.863097148s" podCreationTimestamp="2025-12-11 08:28:49 +0000 UTC" firstStartedPulling="2025-12-11 08:28:50.684708526 +0000 UTC m=+779.062077223" lastFinishedPulling="2025-12-11 08:28:54.981748816 +0000 UTC m=+783.359117533" observedRunningTime="2025-12-11 08:28:55.860980724 +0000 UTC m=+784.238349431" watchObservedRunningTime="2025-12-11 08:28:55.863097148 +0000 UTC m=+784.240465845" Dec 11 08:28:55 crc kubenswrapper[4881]: I1211 08:28:55.882346 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-logging/logging-loki-ingester-0" podStartSLOduration=3.263866438 podStartE2EDuration="6.882313627s" podCreationTimestamp="2025-12-11 08:28:49 +0000 UTC" firstStartedPulling="2025-12-11 08:28:51.456161346 +0000 UTC m=+779.833530033" lastFinishedPulling="2025-12-11 08:28:55.074608525 +0000 UTC m=+783.451977222" observedRunningTime="2025-12-11 08:28:55.880518512 +0000 UTC m=+784.257887209" watchObservedRunningTime="2025-12-11 08:28:55.882313627 +0000 UTC m=+784.259682324" Dec 11 08:28:55 crc kubenswrapper[4881]: I1211 08:28:55.911654 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-logging/logging-loki-compactor-0" podStartSLOduration=3.6629056540000002 podStartE2EDuration="6.911638085s" podCreationTimestamp="2025-12-11 08:28:49 +0000 UTC" firstStartedPulling="2025-12-11 08:28:51.652742369 +0000 UTC m=+780.030111066" lastFinishedPulling="2025-12-11 08:28:54.90147478 +0000 UTC m=+783.278843497" observedRunningTime="2025-12-11 08:28:55.906079553 +0000 UTC m=+784.283448260" watchObservedRunningTime="2025-12-11 08:28:55.911638085 +0000 UTC m=+784.289006782" Dec 11 08:28:55 crc kubenswrapper[4881]: I1211 08:28:55.927957 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-sswft" podStartSLOduration=2.72692307 podStartE2EDuration="6.927937891s" podCreationTimestamp="2025-12-11 08:28:49 +0000 UTC" firstStartedPulling="2025-12-11 08:28:50.875591484 +0000 UTC m=+779.252960181" lastFinishedPulling="2025-12-11 08:28:55.076606305 +0000 UTC m=+783.453975002" observedRunningTime="2025-12-11 08:28:55.923211141 +0000 UTC m=+784.300579838" watchObservedRunningTime="2025-12-11 08:28:55.927937891 +0000 UTC m=+784.305306578" Dec 11 08:28:57 crc kubenswrapper[4881]: I1211 08:28:57.831281 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-gateway-5f65744c89-tvqmf" event={"ID":"166ecd73-e9b9-4aa0-b09c-7ad373aea239","Type":"ContainerStarted","Data":"bcdc84b5a0a7e0f5f008440ca8bf432e317b5f976810780f4a2e0651d192b217"} Dec 11 08:28:57 crc kubenswrapper[4881]: I1211 08:28:57.831649 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-logging/logging-loki-gateway-5f65744c89-tvqmf" Dec 11 08:28:57 crc kubenswrapper[4881]: I1211 08:28:57.834847 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-gateway-5f65744c89-srnq9" event={"ID":"3470b561-e428-417b-bd76-92642ba561d8","Type":"ContainerStarted","Data":"90535474252e64e1e1f4d972700c157dbffb9c8ece483108e14357c3dead19a0"} Dec 11 08:28:57 crc kubenswrapper[4881]: I1211 08:28:57.834951 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-logging/logging-loki-gateway-5f65744c89-srnq9" Dec 11 08:28:57 crc kubenswrapper[4881]: I1211 08:28:57.835320 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-logging/logging-loki-gateway-5f65744c89-srnq9" Dec 11 08:28:57 crc kubenswrapper[4881]: I1211 08:28:57.843189 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-logging/logging-loki-gateway-5f65744c89-srnq9" Dec 11 08:28:57 crc kubenswrapper[4881]: I1211 08:28:57.846274 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-logging/logging-loki-gateway-5f65744c89-tvqmf" Dec 11 08:28:57 crc kubenswrapper[4881]: I1211 08:28:57.856893 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-logging/logging-loki-gateway-5f65744c89-tvqmf" podStartSLOduration=1.721932233 podStartE2EDuration="7.856872633s" podCreationTimestamp="2025-12-11 08:28:50 +0000 UTC" firstStartedPulling="2025-12-11 08:28:51.252699219 +0000 UTC m=+779.630067916" lastFinishedPulling="2025-12-11 08:28:57.387639619 +0000 UTC m=+785.765008316" observedRunningTime="2025-12-11 08:28:57.850536601 +0000 UTC m=+786.227905308" watchObservedRunningTime="2025-12-11 08:28:57.856872633 +0000 UTC m=+786.234241340" Dec 11 08:28:57 crc kubenswrapper[4881]: I1211 08:28:57.857166 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-logging/logging-loki-gateway-5f65744c89-srnq9" Dec 11 08:28:57 crc kubenswrapper[4881]: I1211 08:28:57.923051 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-logging/logging-loki-gateway-5f65744c89-srnq9" podStartSLOduration=1.735745074 podStartE2EDuration="7.923017469s" podCreationTimestamp="2025-12-11 08:28:50 +0000 UTC" firstStartedPulling="2025-12-11 08:28:51.20689619 +0000 UTC m=+779.584264887" lastFinishedPulling="2025-12-11 08:28:57.394168585 +0000 UTC m=+785.771537282" observedRunningTime="2025-12-11 08:28:57.920297519 +0000 UTC m=+786.297666216" watchObservedRunningTime="2025-12-11 08:28:57.923017469 +0000 UTC m=+786.300386166" Dec 11 08:28:58 crc kubenswrapper[4881]: I1211 08:28:58.846668 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-logging/logging-loki-gateway-5f65744c89-tvqmf" Dec 11 08:28:58 crc kubenswrapper[4881]: I1211 08:28:58.864573 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-logging/logging-loki-gateway-5f65744c89-tvqmf" Dec 11 08:28:59 crc kubenswrapper[4881]: I1211 08:28:59.397124 4881 patch_prober.go:28] interesting pod/machine-config-daemon-z9nnh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 11 08:28:59 crc kubenswrapper[4881]: I1211 08:28:59.397211 4881 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 11 08:29:10 crc kubenswrapper[4881]: I1211 08:29:10.380994 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-sswft" Dec 11 08:29:11 crc kubenswrapper[4881]: I1211 08:29:11.045442 4881 patch_prober.go:28] interesting pod/logging-loki-ingester-0 container/loki-ingester namespace/openshift-logging: Readiness probe status=failure output="HTTP probe failed with statuscode: 503" start-of-body=Ingester not ready: this instance owns no tokens Dec 11 08:29:11 crc kubenswrapper[4881]: I1211 08:29:11.045819 4881 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-logging/logging-loki-ingester-0" podUID="76043698-27ae-4a6d-af81-a7da0a14902d" containerName="loki-ingester" probeResult="failure" output="HTTP probe failed with statuscode: 503" Dec 11 08:29:11 crc kubenswrapper[4881]: I1211 08:29:11.248035 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-logging/logging-loki-compactor-0" Dec 11 08:29:11 crc kubenswrapper[4881]: I1211 08:29:11.411755 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-logging/logging-loki-index-gateway-0" Dec 11 08:29:19 crc kubenswrapper[4881]: I1211 08:29:19.760017 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-logging/logging-loki-distributor-76cc67bf56-7wtq5" Dec 11 08:29:19 crc kubenswrapper[4881]: I1211 08:29:19.952235 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-logging/logging-loki-querier-5895d59bb8-87hfb" Dec 11 08:29:21 crc kubenswrapper[4881]: I1211 08:29:21.041001 4881 patch_prober.go:28] interesting pod/logging-loki-ingester-0 container/loki-ingester namespace/openshift-logging: Readiness probe status=failure output="HTTP probe failed with statuscode: 503" start-of-body=Ingester not ready: this instance owns no tokens Dec 11 08:29:21 crc kubenswrapper[4881]: I1211 08:29:21.041078 4881 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-logging/logging-loki-ingester-0" podUID="76043698-27ae-4a6d-af81-a7da0a14902d" containerName="loki-ingester" probeResult="failure" output="HTTP probe failed with statuscode: 503" Dec 11 08:29:26 crc kubenswrapper[4881]: I1211 08:29:26.968079 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-snbs6"] Dec 11 08:29:26 crc kubenswrapper[4881]: I1211 08:29:26.970538 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-snbs6" Dec 11 08:29:26 crc kubenswrapper[4881]: I1211 08:29:26.977915 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-snbs6"] Dec 11 08:29:27 crc kubenswrapper[4881]: I1211 08:29:27.163964 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c98e1d84-a1bb-4fee-96cf-48ae43d0ffa1-catalog-content\") pod \"community-operators-snbs6\" (UID: \"c98e1d84-a1bb-4fee-96cf-48ae43d0ffa1\") " pod="openshift-marketplace/community-operators-snbs6" Dec 11 08:29:27 crc kubenswrapper[4881]: I1211 08:29:27.164143 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mmfrf\" (UniqueName: \"kubernetes.io/projected/c98e1d84-a1bb-4fee-96cf-48ae43d0ffa1-kube-api-access-mmfrf\") pod \"community-operators-snbs6\" (UID: \"c98e1d84-a1bb-4fee-96cf-48ae43d0ffa1\") " pod="openshift-marketplace/community-operators-snbs6" Dec 11 08:29:27 crc kubenswrapper[4881]: I1211 08:29:27.164196 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c98e1d84-a1bb-4fee-96cf-48ae43d0ffa1-utilities\") pod \"community-operators-snbs6\" (UID: \"c98e1d84-a1bb-4fee-96cf-48ae43d0ffa1\") " pod="openshift-marketplace/community-operators-snbs6" Dec 11 08:29:27 crc kubenswrapper[4881]: I1211 08:29:27.265326 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c98e1d84-a1bb-4fee-96cf-48ae43d0ffa1-catalog-content\") pod \"community-operators-snbs6\" (UID: \"c98e1d84-a1bb-4fee-96cf-48ae43d0ffa1\") " pod="openshift-marketplace/community-operators-snbs6" Dec 11 08:29:27 crc kubenswrapper[4881]: I1211 08:29:27.265397 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mmfrf\" (UniqueName: \"kubernetes.io/projected/c98e1d84-a1bb-4fee-96cf-48ae43d0ffa1-kube-api-access-mmfrf\") pod \"community-operators-snbs6\" (UID: \"c98e1d84-a1bb-4fee-96cf-48ae43d0ffa1\") " pod="openshift-marketplace/community-operators-snbs6" Dec 11 08:29:27 crc kubenswrapper[4881]: I1211 08:29:27.265423 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c98e1d84-a1bb-4fee-96cf-48ae43d0ffa1-utilities\") pod \"community-operators-snbs6\" (UID: \"c98e1d84-a1bb-4fee-96cf-48ae43d0ffa1\") " pod="openshift-marketplace/community-operators-snbs6" Dec 11 08:29:27 crc kubenswrapper[4881]: I1211 08:29:27.265758 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c98e1d84-a1bb-4fee-96cf-48ae43d0ffa1-catalog-content\") pod \"community-operators-snbs6\" (UID: \"c98e1d84-a1bb-4fee-96cf-48ae43d0ffa1\") " pod="openshift-marketplace/community-operators-snbs6" Dec 11 08:29:27 crc kubenswrapper[4881]: I1211 08:29:27.265866 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c98e1d84-a1bb-4fee-96cf-48ae43d0ffa1-utilities\") pod \"community-operators-snbs6\" (UID: \"c98e1d84-a1bb-4fee-96cf-48ae43d0ffa1\") " pod="openshift-marketplace/community-operators-snbs6" Dec 11 08:29:27 crc kubenswrapper[4881]: I1211 08:29:27.294458 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mmfrf\" (UniqueName: \"kubernetes.io/projected/c98e1d84-a1bb-4fee-96cf-48ae43d0ffa1-kube-api-access-mmfrf\") pod \"community-operators-snbs6\" (UID: \"c98e1d84-a1bb-4fee-96cf-48ae43d0ffa1\") " pod="openshift-marketplace/community-operators-snbs6" Dec 11 08:29:27 crc kubenswrapper[4881]: I1211 08:29:27.297065 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-snbs6" Dec 11 08:29:27 crc kubenswrapper[4881]: I1211 08:29:27.809303 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-snbs6"] Dec 11 08:29:28 crc kubenswrapper[4881]: I1211 08:29:28.075431 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-snbs6" event={"ID":"c98e1d84-a1bb-4fee-96cf-48ae43d0ffa1","Type":"ContainerStarted","Data":"12c39bc2d529f43a5a3f3fef6ed96b693108660ce43949985daf2026a57511eb"} Dec 11 08:29:28 crc kubenswrapper[4881]: I1211 08:29:28.075765 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-snbs6" event={"ID":"c98e1d84-a1bb-4fee-96cf-48ae43d0ffa1","Type":"ContainerStarted","Data":"ca908fc14ae42101dc6a5d20d4b5cc994e5a3033bd8bef639b9fa4b3377505fb"} Dec 11 08:29:29 crc kubenswrapper[4881]: I1211 08:29:29.083876 4881 generic.go:334] "Generic (PLEG): container finished" podID="c98e1d84-a1bb-4fee-96cf-48ae43d0ffa1" containerID="12c39bc2d529f43a5a3f3fef6ed96b693108660ce43949985daf2026a57511eb" exitCode=0 Dec 11 08:29:29 crc kubenswrapper[4881]: I1211 08:29:29.084153 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-snbs6" event={"ID":"c98e1d84-a1bb-4fee-96cf-48ae43d0ffa1","Type":"ContainerDied","Data":"12c39bc2d529f43a5a3f3fef6ed96b693108660ce43949985daf2026a57511eb"} Dec 11 08:29:29 crc kubenswrapper[4881]: I1211 08:29:29.084250 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-snbs6" event={"ID":"c98e1d84-a1bb-4fee-96cf-48ae43d0ffa1","Type":"ContainerStarted","Data":"132ad161cd994757bb762a5b3bfecda6e6b027b8f608a1e07de49c9b6dcec2ab"} Dec 11 08:29:29 crc kubenswrapper[4881]: I1211 08:29:29.396777 4881 patch_prober.go:28] interesting pod/machine-config-daemon-z9nnh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 11 08:29:29 crc kubenswrapper[4881]: I1211 08:29:29.397051 4881 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 11 08:29:30 crc kubenswrapper[4881]: I1211 08:29:30.094621 4881 generic.go:334] "Generic (PLEG): container finished" podID="c98e1d84-a1bb-4fee-96cf-48ae43d0ffa1" containerID="132ad161cd994757bb762a5b3bfecda6e6b027b8f608a1e07de49c9b6dcec2ab" exitCode=0 Dec 11 08:29:30 crc kubenswrapper[4881]: I1211 08:29:30.094673 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-snbs6" event={"ID":"c98e1d84-a1bb-4fee-96cf-48ae43d0ffa1","Type":"ContainerDied","Data":"132ad161cd994757bb762a5b3bfecda6e6b027b8f608a1e07de49c9b6dcec2ab"} Dec 11 08:29:31 crc kubenswrapper[4881]: I1211 08:29:31.039831 4881 patch_prober.go:28] interesting pod/logging-loki-ingester-0 container/loki-ingester namespace/openshift-logging: Readiness probe status=failure output="HTTP probe failed with statuscode: 503" start-of-body=Ingester not ready: waiting for 15s after being ready Dec 11 08:29:31 crc kubenswrapper[4881]: I1211 08:29:31.040120 4881 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-logging/logging-loki-ingester-0" podUID="76043698-27ae-4a6d-af81-a7da0a14902d" containerName="loki-ingester" probeResult="failure" output="HTTP probe failed with statuscode: 503" Dec 11 08:29:32 crc kubenswrapper[4881]: I1211 08:29:32.108513 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-snbs6" event={"ID":"c98e1d84-a1bb-4fee-96cf-48ae43d0ffa1","Type":"ContainerStarted","Data":"06574c6dc7c166df49eb061b3cdd63ece7e66f7e2a0ce3dd4025b4b8b0786865"} Dec 11 08:29:32 crc kubenswrapper[4881]: I1211 08:29:32.133633 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-snbs6" podStartSLOduration=3.297200477 podStartE2EDuration="6.133612096s" podCreationTimestamp="2025-12-11 08:29:26 +0000 UTC" firstStartedPulling="2025-12-11 08:29:28.07753844 +0000 UTC m=+816.454907147" lastFinishedPulling="2025-12-11 08:29:30.913950049 +0000 UTC m=+819.291318766" observedRunningTime="2025-12-11 08:29:32.131183274 +0000 UTC m=+820.508551981" watchObservedRunningTime="2025-12-11 08:29:32.133612096 +0000 UTC m=+820.510980793" Dec 11 08:29:37 crc kubenswrapper[4881]: I1211 08:29:37.298278 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-snbs6" Dec 11 08:29:37 crc kubenswrapper[4881]: I1211 08:29:37.298692 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-snbs6" Dec 11 08:29:37 crc kubenswrapper[4881]: I1211 08:29:37.360464 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-snbs6" Dec 11 08:29:38 crc kubenswrapper[4881]: I1211 08:29:38.229027 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-snbs6" Dec 11 08:29:38 crc kubenswrapper[4881]: I1211 08:29:38.295015 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-snbs6"] Dec 11 08:29:40 crc kubenswrapper[4881]: I1211 08:29:40.183613 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-snbs6" podUID="c98e1d84-a1bb-4fee-96cf-48ae43d0ffa1" containerName="registry-server" containerID="cri-o://06574c6dc7c166df49eb061b3cdd63ece7e66f7e2a0ce3dd4025b4b8b0786865" gracePeriod=2 Dec 11 08:29:41 crc kubenswrapper[4881]: I1211 08:29:41.040419 4881 patch_prober.go:28] interesting pod/logging-loki-ingester-0 container/loki-ingester namespace/openshift-logging: Readiness probe status=failure output="HTTP probe failed with statuscode: 503" start-of-body=Ingester not ready: waiting for 15s after being ready Dec 11 08:29:41 crc kubenswrapper[4881]: I1211 08:29:41.040842 4881 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-logging/logging-loki-ingester-0" podUID="76043698-27ae-4a6d-af81-a7da0a14902d" containerName="loki-ingester" probeResult="failure" output="HTTP probe failed with statuscode: 503" Dec 11 08:29:41 crc kubenswrapper[4881]: I1211 08:29:41.733324 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-snbs6" Dec 11 08:29:41 crc kubenswrapper[4881]: I1211 08:29:41.915671 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mmfrf\" (UniqueName: \"kubernetes.io/projected/c98e1d84-a1bb-4fee-96cf-48ae43d0ffa1-kube-api-access-mmfrf\") pod \"c98e1d84-a1bb-4fee-96cf-48ae43d0ffa1\" (UID: \"c98e1d84-a1bb-4fee-96cf-48ae43d0ffa1\") " Dec 11 08:29:41 crc kubenswrapper[4881]: I1211 08:29:41.915778 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c98e1d84-a1bb-4fee-96cf-48ae43d0ffa1-utilities\") pod \"c98e1d84-a1bb-4fee-96cf-48ae43d0ffa1\" (UID: \"c98e1d84-a1bb-4fee-96cf-48ae43d0ffa1\") " Dec 11 08:29:41 crc kubenswrapper[4881]: I1211 08:29:41.915843 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c98e1d84-a1bb-4fee-96cf-48ae43d0ffa1-catalog-content\") pod \"c98e1d84-a1bb-4fee-96cf-48ae43d0ffa1\" (UID: \"c98e1d84-a1bb-4fee-96cf-48ae43d0ffa1\") " Dec 11 08:29:41 crc kubenswrapper[4881]: I1211 08:29:41.917564 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c98e1d84-a1bb-4fee-96cf-48ae43d0ffa1-utilities" (OuterVolumeSpecName: "utilities") pod "c98e1d84-a1bb-4fee-96cf-48ae43d0ffa1" (UID: "c98e1d84-a1bb-4fee-96cf-48ae43d0ffa1"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 08:29:41 crc kubenswrapper[4881]: I1211 08:29:41.927404 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c98e1d84-a1bb-4fee-96cf-48ae43d0ffa1-kube-api-access-mmfrf" (OuterVolumeSpecName: "kube-api-access-mmfrf") pod "c98e1d84-a1bb-4fee-96cf-48ae43d0ffa1" (UID: "c98e1d84-a1bb-4fee-96cf-48ae43d0ffa1"). InnerVolumeSpecName "kube-api-access-mmfrf". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:29:41 crc kubenswrapper[4881]: I1211 08:29:41.967546 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c98e1d84-a1bb-4fee-96cf-48ae43d0ffa1-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "c98e1d84-a1bb-4fee-96cf-48ae43d0ffa1" (UID: "c98e1d84-a1bb-4fee-96cf-48ae43d0ffa1"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 08:29:42 crc kubenswrapper[4881]: I1211 08:29:42.018792 4881 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c98e1d84-a1bb-4fee-96cf-48ae43d0ffa1-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 11 08:29:42 crc kubenswrapper[4881]: I1211 08:29:42.018853 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mmfrf\" (UniqueName: \"kubernetes.io/projected/c98e1d84-a1bb-4fee-96cf-48ae43d0ffa1-kube-api-access-mmfrf\") on node \"crc\" DevicePath \"\"" Dec 11 08:29:42 crc kubenswrapper[4881]: I1211 08:29:42.018887 4881 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c98e1d84-a1bb-4fee-96cf-48ae43d0ffa1-utilities\") on node \"crc\" DevicePath \"\"" Dec 11 08:29:42 crc kubenswrapper[4881]: I1211 08:29:42.198688 4881 generic.go:334] "Generic (PLEG): container finished" podID="c98e1d84-a1bb-4fee-96cf-48ae43d0ffa1" containerID="06574c6dc7c166df49eb061b3cdd63ece7e66f7e2a0ce3dd4025b4b8b0786865" exitCode=0 Dec 11 08:29:42 crc kubenswrapper[4881]: I1211 08:29:42.198735 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-snbs6" event={"ID":"c98e1d84-a1bb-4fee-96cf-48ae43d0ffa1","Type":"ContainerDied","Data":"06574c6dc7c166df49eb061b3cdd63ece7e66f7e2a0ce3dd4025b4b8b0786865"} Dec 11 08:29:42 crc kubenswrapper[4881]: I1211 08:29:42.198761 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-snbs6" event={"ID":"c98e1d84-a1bb-4fee-96cf-48ae43d0ffa1","Type":"ContainerDied","Data":"ca908fc14ae42101dc6a5d20d4b5cc994e5a3033bd8bef639b9fa4b3377505fb"} Dec 11 08:29:42 crc kubenswrapper[4881]: I1211 08:29:42.198769 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-snbs6" Dec 11 08:29:42 crc kubenswrapper[4881]: I1211 08:29:42.198777 4881 scope.go:117] "RemoveContainer" containerID="06574c6dc7c166df49eb061b3cdd63ece7e66f7e2a0ce3dd4025b4b8b0786865" Dec 11 08:29:42 crc kubenswrapper[4881]: I1211 08:29:42.229554 4881 scope.go:117] "RemoveContainer" containerID="132ad161cd994757bb762a5b3bfecda6e6b027b8f608a1e07de49c9b6dcec2ab" Dec 11 08:29:42 crc kubenswrapper[4881]: I1211 08:29:42.246525 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-snbs6"] Dec 11 08:29:42 crc kubenswrapper[4881]: I1211 08:29:42.251396 4881 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-snbs6"] Dec 11 08:29:42 crc kubenswrapper[4881]: I1211 08:29:42.264600 4881 scope.go:117] "RemoveContainer" containerID="12c39bc2d529f43a5a3f3fef6ed96b693108660ce43949985daf2026a57511eb" Dec 11 08:29:42 crc kubenswrapper[4881]: I1211 08:29:42.284193 4881 scope.go:117] "RemoveContainer" containerID="06574c6dc7c166df49eb061b3cdd63ece7e66f7e2a0ce3dd4025b4b8b0786865" Dec 11 08:29:42 crc kubenswrapper[4881]: E1211 08:29:42.284631 4881 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"06574c6dc7c166df49eb061b3cdd63ece7e66f7e2a0ce3dd4025b4b8b0786865\": container with ID starting with 06574c6dc7c166df49eb061b3cdd63ece7e66f7e2a0ce3dd4025b4b8b0786865 not found: ID does not exist" containerID="06574c6dc7c166df49eb061b3cdd63ece7e66f7e2a0ce3dd4025b4b8b0786865" Dec 11 08:29:42 crc kubenswrapper[4881]: I1211 08:29:42.284665 4881 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"06574c6dc7c166df49eb061b3cdd63ece7e66f7e2a0ce3dd4025b4b8b0786865"} err="failed to get container status \"06574c6dc7c166df49eb061b3cdd63ece7e66f7e2a0ce3dd4025b4b8b0786865\": rpc error: code = NotFound desc = could not find container \"06574c6dc7c166df49eb061b3cdd63ece7e66f7e2a0ce3dd4025b4b8b0786865\": container with ID starting with 06574c6dc7c166df49eb061b3cdd63ece7e66f7e2a0ce3dd4025b4b8b0786865 not found: ID does not exist" Dec 11 08:29:42 crc kubenswrapper[4881]: I1211 08:29:42.284687 4881 scope.go:117] "RemoveContainer" containerID="132ad161cd994757bb762a5b3bfecda6e6b027b8f608a1e07de49c9b6dcec2ab" Dec 11 08:29:42 crc kubenswrapper[4881]: E1211 08:29:42.284936 4881 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"132ad161cd994757bb762a5b3bfecda6e6b027b8f608a1e07de49c9b6dcec2ab\": container with ID starting with 132ad161cd994757bb762a5b3bfecda6e6b027b8f608a1e07de49c9b6dcec2ab not found: ID does not exist" containerID="132ad161cd994757bb762a5b3bfecda6e6b027b8f608a1e07de49c9b6dcec2ab" Dec 11 08:29:42 crc kubenswrapper[4881]: I1211 08:29:42.284960 4881 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"132ad161cd994757bb762a5b3bfecda6e6b027b8f608a1e07de49c9b6dcec2ab"} err="failed to get container status \"132ad161cd994757bb762a5b3bfecda6e6b027b8f608a1e07de49c9b6dcec2ab\": rpc error: code = NotFound desc = could not find container \"132ad161cd994757bb762a5b3bfecda6e6b027b8f608a1e07de49c9b6dcec2ab\": container with ID starting with 132ad161cd994757bb762a5b3bfecda6e6b027b8f608a1e07de49c9b6dcec2ab not found: ID does not exist" Dec 11 08:29:42 crc kubenswrapper[4881]: I1211 08:29:42.284976 4881 scope.go:117] "RemoveContainer" containerID="12c39bc2d529f43a5a3f3fef6ed96b693108660ce43949985daf2026a57511eb" Dec 11 08:29:42 crc kubenswrapper[4881]: E1211 08:29:42.285271 4881 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"12c39bc2d529f43a5a3f3fef6ed96b693108660ce43949985daf2026a57511eb\": container with ID starting with 12c39bc2d529f43a5a3f3fef6ed96b693108660ce43949985daf2026a57511eb not found: ID does not exist" containerID="12c39bc2d529f43a5a3f3fef6ed96b693108660ce43949985daf2026a57511eb" Dec 11 08:29:42 crc kubenswrapper[4881]: I1211 08:29:42.285293 4881 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"12c39bc2d529f43a5a3f3fef6ed96b693108660ce43949985daf2026a57511eb"} err="failed to get container status \"12c39bc2d529f43a5a3f3fef6ed96b693108660ce43949985daf2026a57511eb\": rpc error: code = NotFound desc = could not find container \"12c39bc2d529f43a5a3f3fef6ed96b693108660ce43949985daf2026a57511eb\": container with ID starting with 12c39bc2d529f43a5a3f3fef6ed96b693108660ce43949985daf2026a57511eb not found: ID does not exist" Dec 11 08:29:43 crc kubenswrapper[4881]: I1211 08:29:43.021771 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c98e1d84-a1bb-4fee-96cf-48ae43d0ffa1" path="/var/lib/kubelet/pods/c98e1d84-a1bb-4fee-96cf-48ae43d0ffa1/volumes" Dec 11 08:29:47 crc kubenswrapper[4881]: I1211 08:29:47.545298 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-4vrs4"] Dec 11 08:29:47 crc kubenswrapper[4881]: E1211 08:29:47.545961 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c98e1d84-a1bb-4fee-96cf-48ae43d0ffa1" containerName="registry-server" Dec 11 08:29:47 crc kubenswrapper[4881]: I1211 08:29:47.545996 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="c98e1d84-a1bb-4fee-96cf-48ae43d0ffa1" containerName="registry-server" Dec 11 08:29:47 crc kubenswrapper[4881]: E1211 08:29:47.546025 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c98e1d84-a1bb-4fee-96cf-48ae43d0ffa1" containerName="extract-content" Dec 11 08:29:47 crc kubenswrapper[4881]: I1211 08:29:47.546033 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="c98e1d84-a1bb-4fee-96cf-48ae43d0ffa1" containerName="extract-content" Dec 11 08:29:47 crc kubenswrapper[4881]: E1211 08:29:47.546046 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c98e1d84-a1bb-4fee-96cf-48ae43d0ffa1" containerName="extract-utilities" Dec 11 08:29:47 crc kubenswrapper[4881]: I1211 08:29:47.546054 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="c98e1d84-a1bb-4fee-96cf-48ae43d0ffa1" containerName="extract-utilities" Dec 11 08:29:47 crc kubenswrapper[4881]: I1211 08:29:47.546215 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="c98e1d84-a1bb-4fee-96cf-48ae43d0ffa1" containerName="registry-server" Dec 11 08:29:47 crc kubenswrapper[4881]: I1211 08:29:47.547492 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-4vrs4" Dec 11 08:29:47 crc kubenswrapper[4881]: I1211 08:29:47.561969 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-4vrs4"] Dec 11 08:29:47 crc kubenswrapper[4881]: I1211 08:29:47.606534 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5mpjc\" (UniqueName: \"kubernetes.io/projected/75deeb3c-ba8a-44f9-9437-3d5c2e683dc0-kube-api-access-5mpjc\") pod \"redhat-marketplace-4vrs4\" (UID: \"75deeb3c-ba8a-44f9-9437-3d5c2e683dc0\") " pod="openshift-marketplace/redhat-marketplace-4vrs4" Dec 11 08:29:47 crc kubenswrapper[4881]: I1211 08:29:47.606720 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/75deeb3c-ba8a-44f9-9437-3d5c2e683dc0-catalog-content\") pod \"redhat-marketplace-4vrs4\" (UID: \"75deeb3c-ba8a-44f9-9437-3d5c2e683dc0\") " pod="openshift-marketplace/redhat-marketplace-4vrs4" Dec 11 08:29:47 crc kubenswrapper[4881]: I1211 08:29:47.606807 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/75deeb3c-ba8a-44f9-9437-3d5c2e683dc0-utilities\") pod \"redhat-marketplace-4vrs4\" (UID: \"75deeb3c-ba8a-44f9-9437-3d5c2e683dc0\") " pod="openshift-marketplace/redhat-marketplace-4vrs4" Dec 11 08:29:47 crc kubenswrapper[4881]: I1211 08:29:47.708174 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/75deeb3c-ba8a-44f9-9437-3d5c2e683dc0-utilities\") pod \"redhat-marketplace-4vrs4\" (UID: \"75deeb3c-ba8a-44f9-9437-3d5c2e683dc0\") " pod="openshift-marketplace/redhat-marketplace-4vrs4" Dec 11 08:29:47 crc kubenswrapper[4881]: I1211 08:29:47.708299 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5mpjc\" (UniqueName: \"kubernetes.io/projected/75deeb3c-ba8a-44f9-9437-3d5c2e683dc0-kube-api-access-5mpjc\") pod \"redhat-marketplace-4vrs4\" (UID: \"75deeb3c-ba8a-44f9-9437-3d5c2e683dc0\") " pod="openshift-marketplace/redhat-marketplace-4vrs4" Dec 11 08:29:47 crc kubenswrapper[4881]: I1211 08:29:47.708401 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/75deeb3c-ba8a-44f9-9437-3d5c2e683dc0-catalog-content\") pod \"redhat-marketplace-4vrs4\" (UID: \"75deeb3c-ba8a-44f9-9437-3d5c2e683dc0\") " pod="openshift-marketplace/redhat-marketplace-4vrs4" Dec 11 08:29:47 crc kubenswrapper[4881]: I1211 08:29:47.708963 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/75deeb3c-ba8a-44f9-9437-3d5c2e683dc0-catalog-content\") pod \"redhat-marketplace-4vrs4\" (UID: \"75deeb3c-ba8a-44f9-9437-3d5c2e683dc0\") " pod="openshift-marketplace/redhat-marketplace-4vrs4" Dec 11 08:29:47 crc kubenswrapper[4881]: I1211 08:29:47.709066 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/75deeb3c-ba8a-44f9-9437-3d5c2e683dc0-utilities\") pod \"redhat-marketplace-4vrs4\" (UID: \"75deeb3c-ba8a-44f9-9437-3d5c2e683dc0\") " pod="openshift-marketplace/redhat-marketplace-4vrs4" Dec 11 08:29:47 crc kubenswrapper[4881]: I1211 08:29:47.728590 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5mpjc\" (UniqueName: \"kubernetes.io/projected/75deeb3c-ba8a-44f9-9437-3d5c2e683dc0-kube-api-access-5mpjc\") pod \"redhat-marketplace-4vrs4\" (UID: \"75deeb3c-ba8a-44f9-9437-3d5c2e683dc0\") " pod="openshift-marketplace/redhat-marketplace-4vrs4" Dec 11 08:29:47 crc kubenswrapper[4881]: I1211 08:29:47.866631 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-4vrs4" Dec 11 08:29:48 crc kubenswrapper[4881]: I1211 08:29:48.165143 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-4vrs4"] Dec 11 08:29:48 crc kubenswrapper[4881]: I1211 08:29:48.251561 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4vrs4" event={"ID":"75deeb3c-ba8a-44f9-9437-3d5c2e683dc0","Type":"ContainerStarted","Data":"7331d516f7b700f323c060758c09e12f8c4ae7ce1f9e073dc8a66f288ec1aaa9"} Dec 11 08:29:49 crc kubenswrapper[4881]: I1211 08:29:49.261922 4881 generic.go:334] "Generic (PLEG): container finished" podID="75deeb3c-ba8a-44f9-9437-3d5c2e683dc0" containerID="f6b4ebfcd2331f09b00a693b8cbf3c2191e3ebde8bf0caa1fb62d3688c5aeb50" exitCode=0 Dec 11 08:29:49 crc kubenswrapper[4881]: I1211 08:29:49.262016 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4vrs4" event={"ID":"75deeb3c-ba8a-44f9-9437-3d5c2e683dc0","Type":"ContainerDied","Data":"f6b4ebfcd2331f09b00a693b8cbf3c2191e3ebde8bf0caa1fb62d3688c5aeb50"} Dec 11 08:29:51 crc kubenswrapper[4881]: I1211 08:29:51.041906 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-logging/logging-loki-ingester-0" Dec 11 08:29:51 crc kubenswrapper[4881]: I1211 08:29:51.288631 4881 generic.go:334] "Generic (PLEG): container finished" podID="75deeb3c-ba8a-44f9-9437-3d5c2e683dc0" containerID="952ad555dcd8f1013115ed8d866919a115306d7dc1252880da1ddfc39664bc8e" exitCode=0 Dec 11 08:29:51 crc kubenswrapper[4881]: I1211 08:29:51.288672 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4vrs4" event={"ID":"75deeb3c-ba8a-44f9-9437-3d5c2e683dc0","Type":"ContainerDied","Data":"952ad555dcd8f1013115ed8d866919a115306d7dc1252880da1ddfc39664bc8e"} Dec 11 08:29:52 crc kubenswrapper[4881]: I1211 08:29:52.301759 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4vrs4" event={"ID":"75deeb3c-ba8a-44f9-9437-3d5c2e683dc0","Type":"ContainerStarted","Data":"50649d287178cd07ff2512a7905b8893344a58751cc02f89a912cfe7d556770e"} Dec 11 08:29:52 crc kubenswrapper[4881]: I1211 08:29:52.324780 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-4vrs4" podStartSLOduration=2.8070903879999998 podStartE2EDuration="5.324741183s" podCreationTimestamp="2025-12-11 08:29:47 +0000 UTC" firstStartedPulling="2025-12-11 08:29:49.264643267 +0000 UTC m=+837.642011974" lastFinishedPulling="2025-12-11 08:29:51.782294062 +0000 UTC m=+840.159662769" observedRunningTime="2025-12-11 08:29:52.324685841 +0000 UTC m=+840.702054548" watchObservedRunningTime="2025-12-11 08:29:52.324741183 +0000 UTC m=+840.702109940" Dec 11 08:29:57 crc kubenswrapper[4881]: I1211 08:29:57.866912 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-4vrs4" Dec 11 08:29:57 crc kubenswrapper[4881]: I1211 08:29:57.867365 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-4vrs4" Dec 11 08:29:57 crc kubenswrapper[4881]: I1211 08:29:57.929437 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-4vrs4" Dec 11 08:29:58 crc kubenswrapper[4881]: I1211 08:29:58.409248 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-4vrs4" Dec 11 08:29:59 crc kubenswrapper[4881]: I1211 08:29:59.172204 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-4vrs4"] Dec 11 08:29:59 crc kubenswrapper[4881]: I1211 08:29:59.397323 4881 patch_prober.go:28] interesting pod/machine-config-daemon-z9nnh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 11 08:29:59 crc kubenswrapper[4881]: I1211 08:29:59.397724 4881 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 11 08:29:59 crc kubenswrapper[4881]: I1211 08:29:59.397779 4881 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" Dec 11 08:29:59 crc kubenswrapper[4881]: I1211 08:29:59.398508 4881 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"77814b1ae10c46f437c76c46bb8c5b9eb3fd105add11c725ab5d0afd4052b630"} pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Dec 11 08:29:59 crc kubenswrapper[4881]: I1211 08:29:59.398597 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" containerName="machine-config-daemon" containerID="cri-o://77814b1ae10c46f437c76c46bb8c5b9eb3fd105add11c725ab5d0afd4052b630" gracePeriod=600 Dec 11 08:30:00 crc kubenswrapper[4881]: I1211 08:30:00.157322 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29424030-hkdpr"] Dec 11 08:30:00 crc kubenswrapper[4881]: I1211 08:30:00.158660 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29424030-hkdpr" Dec 11 08:30:00 crc kubenswrapper[4881]: I1211 08:30:00.161724 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Dec 11 08:30:00 crc kubenswrapper[4881]: I1211 08:30:00.161736 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Dec 11 08:30:00 crc kubenswrapper[4881]: I1211 08:30:00.172065 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29424030-hkdpr"] Dec 11 08:30:00 crc kubenswrapper[4881]: I1211 08:30:00.338628 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/e4cf927d-1b71-4715-9161-bed7cb184f26-config-volume\") pod \"collect-profiles-29424030-hkdpr\" (UID: \"e4cf927d-1b71-4715-9161-bed7cb184f26\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29424030-hkdpr" Dec 11 08:30:00 crc kubenswrapper[4881]: I1211 08:30:00.338762 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jdgtm\" (UniqueName: \"kubernetes.io/projected/e4cf927d-1b71-4715-9161-bed7cb184f26-kube-api-access-jdgtm\") pod \"collect-profiles-29424030-hkdpr\" (UID: \"e4cf927d-1b71-4715-9161-bed7cb184f26\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29424030-hkdpr" Dec 11 08:30:00 crc kubenswrapper[4881]: I1211 08:30:00.338807 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/e4cf927d-1b71-4715-9161-bed7cb184f26-secret-volume\") pod \"collect-profiles-29424030-hkdpr\" (UID: \"e4cf927d-1b71-4715-9161-bed7cb184f26\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29424030-hkdpr" Dec 11 08:30:00 crc kubenswrapper[4881]: I1211 08:30:00.371673 4881 generic.go:334] "Generic (PLEG): container finished" podID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" containerID="77814b1ae10c46f437c76c46bb8c5b9eb3fd105add11c725ab5d0afd4052b630" exitCode=0 Dec 11 08:30:00 crc kubenswrapper[4881]: I1211 08:30:00.371874 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-4vrs4" podUID="75deeb3c-ba8a-44f9-9437-3d5c2e683dc0" containerName="registry-server" containerID="cri-o://50649d287178cd07ff2512a7905b8893344a58751cc02f89a912cfe7d556770e" gracePeriod=2 Dec 11 08:30:00 crc kubenswrapper[4881]: I1211 08:30:00.371943 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" event={"ID":"56d69133-af36-4cbd-af7d-3a58cc4dd8ca","Type":"ContainerDied","Data":"77814b1ae10c46f437c76c46bb8c5b9eb3fd105add11c725ab5d0afd4052b630"} Dec 11 08:30:00 crc kubenswrapper[4881]: I1211 08:30:00.371966 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" event={"ID":"56d69133-af36-4cbd-af7d-3a58cc4dd8ca","Type":"ContainerStarted","Data":"46ea07e17db7258f87fdf87e503b03bd1b18ffe826127eac80cec982af4ad0c0"} Dec 11 08:30:00 crc kubenswrapper[4881]: I1211 08:30:00.371983 4881 scope.go:117] "RemoveContainer" containerID="2378020365a7ffc5afa00424ace5b73c56a13d69da3d6d17d8336f551688833d" Dec 11 08:30:00 crc kubenswrapper[4881]: I1211 08:30:00.441056 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jdgtm\" (UniqueName: \"kubernetes.io/projected/e4cf927d-1b71-4715-9161-bed7cb184f26-kube-api-access-jdgtm\") pod \"collect-profiles-29424030-hkdpr\" (UID: \"e4cf927d-1b71-4715-9161-bed7cb184f26\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29424030-hkdpr" Dec 11 08:30:00 crc kubenswrapper[4881]: I1211 08:30:00.441202 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/e4cf927d-1b71-4715-9161-bed7cb184f26-secret-volume\") pod \"collect-profiles-29424030-hkdpr\" (UID: \"e4cf927d-1b71-4715-9161-bed7cb184f26\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29424030-hkdpr" Dec 11 08:30:00 crc kubenswrapper[4881]: I1211 08:30:00.442738 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/e4cf927d-1b71-4715-9161-bed7cb184f26-config-volume\") pod \"collect-profiles-29424030-hkdpr\" (UID: \"e4cf927d-1b71-4715-9161-bed7cb184f26\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29424030-hkdpr" Dec 11 08:30:00 crc kubenswrapper[4881]: I1211 08:30:00.443920 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/e4cf927d-1b71-4715-9161-bed7cb184f26-config-volume\") pod \"collect-profiles-29424030-hkdpr\" (UID: \"e4cf927d-1b71-4715-9161-bed7cb184f26\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29424030-hkdpr" Dec 11 08:30:00 crc kubenswrapper[4881]: I1211 08:30:00.447306 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/e4cf927d-1b71-4715-9161-bed7cb184f26-secret-volume\") pod \"collect-profiles-29424030-hkdpr\" (UID: \"e4cf927d-1b71-4715-9161-bed7cb184f26\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29424030-hkdpr" Dec 11 08:30:00 crc kubenswrapper[4881]: I1211 08:30:00.465548 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jdgtm\" (UniqueName: \"kubernetes.io/projected/e4cf927d-1b71-4715-9161-bed7cb184f26-kube-api-access-jdgtm\") pod \"collect-profiles-29424030-hkdpr\" (UID: \"e4cf927d-1b71-4715-9161-bed7cb184f26\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29424030-hkdpr" Dec 11 08:30:00 crc kubenswrapper[4881]: I1211 08:30:00.482181 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29424030-hkdpr" Dec 11 08:30:00 crc kubenswrapper[4881]: I1211 08:30:00.866101 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-4vrs4" Dec 11 08:30:00 crc kubenswrapper[4881]: I1211 08:30:00.992972 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29424030-hkdpr"] Dec 11 08:30:00 crc kubenswrapper[4881]: W1211 08:30:00.996695 4881 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pode4cf927d_1b71_4715_9161_bed7cb184f26.slice/crio-ffc6910062a5128dc3fea12023628041d29678dc5441894c66ea1788f11a450b WatchSource:0}: Error finding container ffc6910062a5128dc3fea12023628041d29678dc5441894c66ea1788f11a450b: Status 404 returned error can't find the container with id ffc6910062a5128dc3fea12023628041d29678dc5441894c66ea1788f11a450b Dec 11 08:30:01 crc kubenswrapper[4881]: I1211 08:30:01.051396 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/75deeb3c-ba8a-44f9-9437-3d5c2e683dc0-catalog-content\") pod \"75deeb3c-ba8a-44f9-9437-3d5c2e683dc0\" (UID: \"75deeb3c-ba8a-44f9-9437-3d5c2e683dc0\") " Dec 11 08:30:01 crc kubenswrapper[4881]: I1211 08:30:01.051731 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5mpjc\" (UniqueName: \"kubernetes.io/projected/75deeb3c-ba8a-44f9-9437-3d5c2e683dc0-kube-api-access-5mpjc\") pod \"75deeb3c-ba8a-44f9-9437-3d5c2e683dc0\" (UID: \"75deeb3c-ba8a-44f9-9437-3d5c2e683dc0\") " Dec 11 08:30:01 crc kubenswrapper[4881]: I1211 08:30:01.051890 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/75deeb3c-ba8a-44f9-9437-3d5c2e683dc0-utilities\") pod \"75deeb3c-ba8a-44f9-9437-3d5c2e683dc0\" (UID: \"75deeb3c-ba8a-44f9-9437-3d5c2e683dc0\") " Dec 11 08:30:01 crc kubenswrapper[4881]: I1211 08:30:01.052690 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/75deeb3c-ba8a-44f9-9437-3d5c2e683dc0-utilities" (OuterVolumeSpecName: "utilities") pod "75deeb3c-ba8a-44f9-9437-3d5c2e683dc0" (UID: "75deeb3c-ba8a-44f9-9437-3d5c2e683dc0"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 08:30:01 crc kubenswrapper[4881]: I1211 08:30:01.057514 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/75deeb3c-ba8a-44f9-9437-3d5c2e683dc0-kube-api-access-5mpjc" (OuterVolumeSpecName: "kube-api-access-5mpjc") pod "75deeb3c-ba8a-44f9-9437-3d5c2e683dc0" (UID: "75deeb3c-ba8a-44f9-9437-3d5c2e683dc0"). InnerVolumeSpecName "kube-api-access-5mpjc". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:30:01 crc kubenswrapper[4881]: I1211 08:30:01.072654 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/75deeb3c-ba8a-44f9-9437-3d5c2e683dc0-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "75deeb3c-ba8a-44f9-9437-3d5c2e683dc0" (UID: "75deeb3c-ba8a-44f9-9437-3d5c2e683dc0"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 08:30:01 crc kubenswrapper[4881]: I1211 08:30:01.154376 4881 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/75deeb3c-ba8a-44f9-9437-3d5c2e683dc0-utilities\") on node \"crc\" DevicePath \"\"" Dec 11 08:30:01 crc kubenswrapper[4881]: I1211 08:30:01.154511 4881 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/75deeb3c-ba8a-44f9-9437-3d5c2e683dc0-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 11 08:30:01 crc kubenswrapper[4881]: I1211 08:30:01.154589 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5mpjc\" (UniqueName: \"kubernetes.io/projected/75deeb3c-ba8a-44f9-9437-3d5c2e683dc0-kube-api-access-5mpjc\") on node \"crc\" DevicePath \"\"" Dec 11 08:30:01 crc kubenswrapper[4881]: I1211 08:30:01.380596 4881 generic.go:334] "Generic (PLEG): container finished" podID="e4cf927d-1b71-4715-9161-bed7cb184f26" containerID="984a7e4b52ad1536d0f9e36ebb4356f86ee31dfb21c39ed2b083d921637aefe2" exitCode=0 Dec 11 08:30:01 crc kubenswrapper[4881]: I1211 08:30:01.380678 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29424030-hkdpr" event={"ID":"e4cf927d-1b71-4715-9161-bed7cb184f26","Type":"ContainerDied","Data":"984a7e4b52ad1536d0f9e36ebb4356f86ee31dfb21c39ed2b083d921637aefe2"} Dec 11 08:30:01 crc kubenswrapper[4881]: I1211 08:30:01.380703 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29424030-hkdpr" event={"ID":"e4cf927d-1b71-4715-9161-bed7cb184f26","Type":"ContainerStarted","Data":"ffc6910062a5128dc3fea12023628041d29678dc5441894c66ea1788f11a450b"} Dec 11 08:30:01 crc kubenswrapper[4881]: I1211 08:30:01.383284 4881 generic.go:334] "Generic (PLEG): container finished" podID="75deeb3c-ba8a-44f9-9437-3d5c2e683dc0" containerID="50649d287178cd07ff2512a7905b8893344a58751cc02f89a912cfe7d556770e" exitCode=0 Dec 11 08:30:01 crc kubenswrapper[4881]: I1211 08:30:01.383427 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-4vrs4" Dec 11 08:30:01 crc kubenswrapper[4881]: I1211 08:30:01.383365 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4vrs4" event={"ID":"75deeb3c-ba8a-44f9-9437-3d5c2e683dc0","Type":"ContainerDied","Data":"50649d287178cd07ff2512a7905b8893344a58751cc02f89a912cfe7d556770e"} Dec 11 08:30:01 crc kubenswrapper[4881]: I1211 08:30:01.383588 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4vrs4" event={"ID":"75deeb3c-ba8a-44f9-9437-3d5c2e683dc0","Type":"ContainerDied","Data":"7331d516f7b700f323c060758c09e12f8c4ae7ce1f9e073dc8a66f288ec1aaa9"} Dec 11 08:30:01 crc kubenswrapper[4881]: I1211 08:30:01.383607 4881 scope.go:117] "RemoveContainer" containerID="50649d287178cd07ff2512a7905b8893344a58751cc02f89a912cfe7d556770e" Dec 11 08:30:01 crc kubenswrapper[4881]: I1211 08:30:01.404635 4881 scope.go:117] "RemoveContainer" containerID="952ad555dcd8f1013115ed8d866919a115306d7dc1252880da1ddfc39664bc8e" Dec 11 08:30:01 crc kubenswrapper[4881]: I1211 08:30:01.430494 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-4vrs4"] Dec 11 08:30:01 crc kubenswrapper[4881]: I1211 08:30:01.431378 4881 scope.go:117] "RemoveContainer" containerID="f6b4ebfcd2331f09b00a693b8cbf3c2191e3ebde8bf0caa1fb62d3688c5aeb50" Dec 11 08:30:01 crc kubenswrapper[4881]: I1211 08:30:01.434564 4881 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-4vrs4"] Dec 11 08:30:01 crc kubenswrapper[4881]: I1211 08:30:01.446017 4881 scope.go:117] "RemoveContainer" containerID="50649d287178cd07ff2512a7905b8893344a58751cc02f89a912cfe7d556770e" Dec 11 08:30:01 crc kubenswrapper[4881]: E1211 08:30:01.446521 4881 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"50649d287178cd07ff2512a7905b8893344a58751cc02f89a912cfe7d556770e\": container with ID starting with 50649d287178cd07ff2512a7905b8893344a58751cc02f89a912cfe7d556770e not found: ID does not exist" containerID="50649d287178cd07ff2512a7905b8893344a58751cc02f89a912cfe7d556770e" Dec 11 08:30:01 crc kubenswrapper[4881]: I1211 08:30:01.446618 4881 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"50649d287178cd07ff2512a7905b8893344a58751cc02f89a912cfe7d556770e"} err="failed to get container status \"50649d287178cd07ff2512a7905b8893344a58751cc02f89a912cfe7d556770e\": rpc error: code = NotFound desc = could not find container \"50649d287178cd07ff2512a7905b8893344a58751cc02f89a912cfe7d556770e\": container with ID starting with 50649d287178cd07ff2512a7905b8893344a58751cc02f89a912cfe7d556770e not found: ID does not exist" Dec 11 08:30:01 crc kubenswrapper[4881]: I1211 08:30:01.446695 4881 scope.go:117] "RemoveContainer" containerID="952ad555dcd8f1013115ed8d866919a115306d7dc1252880da1ddfc39664bc8e" Dec 11 08:30:01 crc kubenswrapper[4881]: E1211 08:30:01.447297 4881 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"952ad555dcd8f1013115ed8d866919a115306d7dc1252880da1ddfc39664bc8e\": container with ID starting with 952ad555dcd8f1013115ed8d866919a115306d7dc1252880da1ddfc39664bc8e not found: ID does not exist" containerID="952ad555dcd8f1013115ed8d866919a115306d7dc1252880da1ddfc39664bc8e" Dec 11 08:30:01 crc kubenswrapper[4881]: I1211 08:30:01.447403 4881 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"952ad555dcd8f1013115ed8d866919a115306d7dc1252880da1ddfc39664bc8e"} err="failed to get container status \"952ad555dcd8f1013115ed8d866919a115306d7dc1252880da1ddfc39664bc8e\": rpc error: code = NotFound desc = could not find container \"952ad555dcd8f1013115ed8d866919a115306d7dc1252880da1ddfc39664bc8e\": container with ID starting with 952ad555dcd8f1013115ed8d866919a115306d7dc1252880da1ddfc39664bc8e not found: ID does not exist" Dec 11 08:30:01 crc kubenswrapper[4881]: I1211 08:30:01.447474 4881 scope.go:117] "RemoveContainer" containerID="f6b4ebfcd2331f09b00a693b8cbf3c2191e3ebde8bf0caa1fb62d3688c5aeb50" Dec 11 08:30:01 crc kubenswrapper[4881]: E1211 08:30:01.447723 4881 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f6b4ebfcd2331f09b00a693b8cbf3c2191e3ebde8bf0caa1fb62d3688c5aeb50\": container with ID starting with f6b4ebfcd2331f09b00a693b8cbf3c2191e3ebde8bf0caa1fb62d3688c5aeb50 not found: ID does not exist" containerID="f6b4ebfcd2331f09b00a693b8cbf3c2191e3ebde8bf0caa1fb62d3688c5aeb50" Dec 11 08:30:01 crc kubenswrapper[4881]: I1211 08:30:01.447817 4881 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f6b4ebfcd2331f09b00a693b8cbf3c2191e3ebde8bf0caa1fb62d3688c5aeb50"} err="failed to get container status \"f6b4ebfcd2331f09b00a693b8cbf3c2191e3ebde8bf0caa1fb62d3688c5aeb50\": rpc error: code = NotFound desc = could not find container \"f6b4ebfcd2331f09b00a693b8cbf3c2191e3ebde8bf0caa1fb62d3688c5aeb50\": container with ID starting with f6b4ebfcd2331f09b00a693b8cbf3c2191e3ebde8bf0caa1fb62d3688c5aeb50 not found: ID does not exist" Dec 11 08:30:02 crc kubenswrapper[4881]: I1211 08:30:02.676784 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29424030-hkdpr" Dec 11 08:30:02 crc kubenswrapper[4881]: I1211 08:30:02.775980 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/e4cf927d-1b71-4715-9161-bed7cb184f26-secret-volume\") pod \"e4cf927d-1b71-4715-9161-bed7cb184f26\" (UID: \"e4cf927d-1b71-4715-9161-bed7cb184f26\") " Dec 11 08:30:02 crc kubenswrapper[4881]: I1211 08:30:02.776145 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/e4cf927d-1b71-4715-9161-bed7cb184f26-config-volume\") pod \"e4cf927d-1b71-4715-9161-bed7cb184f26\" (UID: \"e4cf927d-1b71-4715-9161-bed7cb184f26\") " Dec 11 08:30:02 crc kubenswrapper[4881]: I1211 08:30:02.776214 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jdgtm\" (UniqueName: \"kubernetes.io/projected/e4cf927d-1b71-4715-9161-bed7cb184f26-kube-api-access-jdgtm\") pod \"e4cf927d-1b71-4715-9161-bed7cb184f26\" (UID: \"e4cf927d-1b71-4715-9161-bed7cb184f26\") " Dec 11 08:30:02 crc kubenswrapper[4881]: I1211 08:30:02.777078 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e4cf927d-1b71-4715-9161-bed7cb184f26-config-volume" (OuterVolumeSpecName: "config-volume") pod "e4cf927d-1b71-4715-9161-bed7cb184f26" (UID: "e4cf927d-1b71-4715-9161-bed7cb184f26"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:30:02 crc kubenswrapper[4881]: I1211 08:30:02.782491 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e4cf927d-1b71-4715-9161-bed7cb184f26-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "e4cf927d-1b71-4715-9161-bed7cb184f26" (UID: "e4cf927d-1b71-4715-9161-bed7cb184f26"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:30:02 crc kubenswrapper[4881]: I1211 08:30:02.782604 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e4cf927d-1b71-4715-9161-bed7cb184f26-kube-api-access-jdgtm" (OuterVolumeSpecName: "kube-api-access-jdgtm") pod "e4cf927d-1b71-4715-9161-bed7cb184f26" (UID: "e4cf927d-1b71-4715-9161-bed7cb184f26"). InnerVolumeSpecName "kube-api-access-jdgtm". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:30:02 crc kubenswrapper[4881]: I1211 08:30:02.877796 4881 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/e4cf927d-1b71-4715-9161-bed7cb184f26-secret-volume\") on node \"crc\" DevicePath \"\"" Dec 11 08:30:02 crc kubenswrapper[4881]: I1211 08:30:02.877832 4881 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/e4cf927d-1b71-4715-9161-bed7cb184f26-config-volume\") on node \"crc\" DevicePath \"\"" Dec 11 08:30:02 crc kubenswrapper[4881]: I1211 08:30:02.877843 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jdgtm\" (UniqueName: \"kubernetes.io/projected/e4cf927d-1b71-4715-9161-bed7cb184f26-kube-api-access-jdgtm\") on node \"crc\" DevicePath \"\"" Dec 11 08:30:03 crc kubenswrapper[4881]: I1211 08:30:03.020284 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="75deeb3c-ba8a-44f9-9437-3d5c2e683dc0" path="/var/lib/kubelet/pods/75deeb3c-ba8a-44f9-9437-3d5c2e683dc0/volumes" Dec 11 08:30:03 crc kubenswrapper[4881]: I1211 08:30:03.402726 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29424030-hkdpr" event={"ID":"e4cf927d-1b71-4715-9161-bed7cb184f26","Type":"ContainerDied","Data":"ffc6910062a5128dc3fea12023628041d29678dc5441894c66ea1788f11a450b"} Dec 11 08:30:03 crc kubenswrapper[4881]: I1211 08:30:03.403024 4881 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ffc6910062a5128dc3fea12023628041d29678dc5441894c66ea1788f11a450b" Dec 11 08:30:03 crc kubenswrapper[4881]: I1211 08:30:03.402870 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29424030-hkdpr" Dec 11 08:30:10 crc kubenswrapper[4881]: I1211 08:30:10.561025 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-logging/collector-zfsc9"] Dec 11 08:30:10 crc kubenswrapper[4881]: E1211 08:30:10.562391 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="75deeb3c-ba8a-44f9-9437-3d5c2e683dc0" containerName="extract-utilities" Dec 11 08:30:10 crc kubenswrapper[4881]: I1211 08:30:10.562423 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="75deeb3c-ba8a-44f9-9437-3d5c2e683dc0" containerName="extract-utilities" Dec 11 08:30:10 crc kubenswrapper[4881]: E1211 08:30:10.562436 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="75deeb3c-ba8a-44f9-9437-3d5c2e683dc0" containerName="extract-content" Dec 11 08:30:10 crc kubenswrapper[4881]: I1211 08:30:10.562445 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="75deeb3c-ba8a-44f9-9437-3d5c2e683dc0" containerName="extract-content" Dec 11 08:30:10 crc kubenswrapper[4881]: E1211 08:30:10.562462 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e4cf927d-1b71-4715-9161-bed7cb184f26" containerName="collect-profiles" Dec 11 08:30:10 crc kubenswrapper[4881]: I1211 08:30:10.562471 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="e4cf927d-1b71-4715-9161-bed7cb184f26" containerName="collect-profiles" Dec 11 08:30:10 crc kubenswrapper[4881]: E1211 08:30:10.562491 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="75deeb3c-ba8a-44f9-9437-3d5c2e683dc0" containerName="registry-server" Dec 11 08:30:10 crc kubenswrapper[4881]: I1211 08:30:10.562498 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="75deeb3c-ba8a-44f9-9437-3d5c2e683dc0" containerName="registry-server" Dec 11 08:30:10 crc kubenswrapper[4881]: I1211 08:30:10.562824 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="75deeb3c-ba8a-44f9-9437-3d5c2e683dc0" containerName="registry-server" Dec 11 08:30:10 crc kubenswrapper[4881]: I1211 08:30:10.562839 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="e4cf927d-1b71-4715-9161-bed7cb184f26" containerName="collect-profiles" Dec 11 08:30:10 crc kubenswrapper[4881]: I1211 08:30:10.563794 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/collector-zfsc9" Dec 11 08:30:10 crc kubenswrapper[4881]: I1211 08:30:10.566968 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"collector-dockercfg-g5mvn" Dec 11 08:30:10 crc kubenswrapper[4881]: I1211 08:30:10.567505 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"collector-metrics" Dec 11 08:30:10 crc kubenswrapper[4881]: I1211 08:30:10.567702 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"collector-syslog-receiver" Dec 11 08:30:10 crc kubenswrapper[4881]: I1211 08:30:10.568976 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-logging"/"collector-config" Dec 11 08:30:10 crc kubenswrapper[4881]: I1211 08:30:10.569171 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"collector-token" Dec 11 08:30:10 crc kubenswrapper[4881]: I1211 08:30:10.579611 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/collector-zfsc9"] Dec 11 08:30:10 crc kubenswrapper[4881]: I1211 08:30:10.590354 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-logging"/"collector-trustbundle" Dec 11 08:30:10 crc kubenswrapper[4881]: I1211 08:30:10.607089 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-logging/collector-zfsc9"] Dec 11 08:30:10 crc kubenswrapper[4881]: E1211 08:30:10.609705 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[collector-syslog-receiver collector-token config config-openshift-service-cacrt datadir entrypoint kube-api-access-khslc metrics sa-token tmp trusted-ca], unattached volumes=[], failed to process volumes=[collector-syslog-receiver collector-token config config-openshift-service-cacrt datadir entrypoint kube-api-access-khslc metrics sa-token tmp trusted-ca]: context canceled" pod="openshift-logging/collector-zfsc9" podUID="1f1b9df4-9c3c-406a-863a-fa8eee41b4e5" Dec 11 08:30:10 crc kubenswrapper[4881]: I1211 08:30:10.612525 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"collector-token\" (UniqueName: \"kubernetes.io/secret/1f1b9df4-9c3c-406a-863a-fa8eee41b4e5-collector-token\") pod \"collector-zfsc9\" (UID: \"1f1b9df4-9c3c-406a-863a-fa8eee41b4e5\") " pod="openshift-logging/collector-zfsc9" Dec 11 08:30:10 crc kubenswrapper[4881]: I1211 08:30:10.612632 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sa-token\" (UniqueName: \"kubernetes.io/projected/1f1b9df4-9c3c-406a-863a-fa8eee41b4e5-sa-token\") pod \"collector-zfsc9\" (UID: \"1f1b9df4-9c3c-406a-863a-fa8eee41b4e5\") " pod="openshift-logging/collector-zfsc9" Dec 11 08:30:10 crc kubenswrapper[4881]: I1211 08:30:10.612688 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"collector-syslog-receiver\" (UniqueName: \"kubernetes.io/secret/1f1b9df4-9c3c-406a-863a-fa8eee41b4e5-collector-syslog-receiver\") pod \"collector-zfsc9\" (UID: \"1f1b9df4-9c3c-406a-863a-fa8eee41b4e5\") " pod="openshift-logging/collector-zfsc9" Dec 11 08:30:10 crc kubenswrapper[4881]: I1211 08:30:10.612716 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/secret/1f1b9df4-9c3c-406a-863a-fa8eee41b4e5-metrics\") pod \"collector-zfsc9\" (UID: \"1f1b9df4-9c3c-406a-863a-fa8eee41b4e5\") " pod="openshift-logging/collector-zfsc9" Dec 11 08:30:10 crc kubenswrapper[4881]: I1211 08:30:10.612748 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"datadir\" (UniqueName: \"kubernetes.io/host-path/1f1b9df4-9c3c-406a-863a-fa8eee41b4e5-datadir\") pod \"collector-zfsc9\" (UID: \"1f1b9df4-9c3c-406a-863a-fa8eee41b4e5\") " pod="openshift-logging/collector-zfsc9" Dec 11 08:30:10 crc kubenswrapper[4881]: I1211 08:30:10.612770 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-khslc\" (UniqueName: \"kubernetes.io/projected/1f1b9df4-9c3c-406a-863a-fa8eee41b4e5-kube-api-access-khslc\") pod \"collector-zfsc9\" (UID: \"1f1b9df4-9c3c-406a-863a-fa8eee41b4e5\") " pod="openshift-logging/collector-zfsc9" Dec 11 08:30:10 crc kubenswrapper[4881]: I1211 08:30:10.612815 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1f1b9df4-9c3c-406a-863a-fa8eee41b4e5-config\") pod \"collector-zfsc9\" (UID: \"1f1b9df4-9c3c-406a-863a-fa8eee41b4e5\") " pod="openshift-logging/collector-zfsc9" Dec 11 08:30:10 crc kubenswrapper[4881]: I1211 08:30:10.612908 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-openshift-service-cacrt\" (UniqueName: \"kubernetes.io/configmap/1f1b9df4-9c3c-406a-863a-fa8eee41b4e5-config-openshift-service-cacrt\") pod \"collector-zfsc9\" (UID: \"1f1b9df4-9c3c-406a-863a-fa8eee41b4e5\") " pod="openshift-logging/collector-zfsc9" Dec 11 08:30:10 crc kubenswrapper[4881]: I1211 08:30:10.612926 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/1f1b9df4-9c3c-406a-863a-fa8eee41b4e5-trusted-ca\") pod \"collector-zfsc9\" (UID: \"1f1b9df4-9c3c-406a-863a-fa8eee41b4e5\") " pod="openshift-logging/collector-zfsc9" Dec 11 08:30:10 crc kubenswrapper[4881]: I1211 08:30:10.613002 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/1f1b9df4-9c3c-406a-863a-fa8eee41b4e5-tmp\") pod \"collector-zfsc9\" (UID: \"1f1b9df4-9c3c-406a-863a-fa8eee41b4e5\") " pod="openshift-logging/collector-zfsc9" Dec 11 08:30:10 crc kubenswrapper[4881]: I1211 08:30:10.613040 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"entrypoint\" (UniqueName: \"kubernetes.io/configmap/1f1b9df4-9c3c-406a-863a-fa8eee41b4e5-entrypoint\") pod \"collector-zfsc9\" (UID: \"1f1b9df4-9c3c-406a-863a-fa8eee41b4e5\") " pod="openshift-logging/collector-zfsc9" Dec 11 08:30:10 crc kubenswrapper[4881]: I1211 08:30:10.715219 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/1f1b9df4-9c3c-406a-863a-fa8eee41b4e5-tmp\") pod \"collector-zfsc9\" (UID: \"1f1b9df4-9c3c-406a-863a-fa8eee41b4e5\") " pod="openshift-logging/collector-zfsc9" Dec 11 08:30:10 crc kubenswrapper[4881]: I1211 08:30:10.715734 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"entrypoint\" (UniqueName: \"kubernetes.io/configmap/1f1b9df4-9c3c-406a-863a-fa8eee41b4e5-entrypoint\") pod \"collector-zfsc9\" (UID: \"1f1b9df4-9c3c-406a-863a-fa8eee41b4e5\") " pod="openshift-logging/collector-zfsc9" Dec 11 08:30:10 crc kubenswrapper[4881]: I1211 08:30:10.715782 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"collector-token\" (UniqueName: \"kubernetes.io/secret/1f1b9df4-9c3c-406a-863a-fa8eee41b4e5-collector-token\") pod \"collector-zfsc9\" (UID: \"1f1b9df4-9c3c-406a-863a-fa8eee41b4e5\") " pod="openshift-logging/collector-zfsc9" Dec 11 08:30:10 crc kubenswrapper[4881]: I1211 08:30:10.715811 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sa-token\" (UniqueName: \"kubernetes.io/projected/1f1b9df4-9c3c-406a-863a-fa8eee41b4e5-sa-token\") pod \"collector-zfsc9\" (UID: \"1f1b9df4-9c3c-406a-863a-fa8eee41b4e5\") " pod="openshift-logging/collector-zfsc9" Dec 11 08:30:10 crc kubenswrapper[4881]: I1211 08:30:10.715861 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"collector-syslog-receiver\" (UniqueName: \"kubernetes.io/secret/1f1b9df4-9c3c-406a-863a-fa8eee41b4e5-collector-syslog-receiver\") pod \"collector-zfsc9\" (UID: \"1f1b9df4-9c3c-406a-863a-fa8eee41b4e5\") " pod="openshift-logging/collector-zfsc9" Dec 11 08:30:10 crc kubenswrapper[4881]: I1211 08:30:10.715899 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/secret/1f1b9df4-9c3c-406a-863a-fa8eee41b4e5-metrics\") pod \"collector-zfsc9\" (UID: \"1f1b9df4-9c3c-406a-863a-fa8eee41b4e5\") " pod="openshift-logging/collector-zfsc9" Dec 11 08:30:10 crc kubenswrapper[4881]: I1211 08:30:10.715945 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"datadir\" (UniqueName: \"kubernetes.io/host-path/1f1b9df4-9c3c-406a-863a-fa8eee41b4e5-datadir\") pod \"collector-zfsc9\" (UID: \"1f1b9df4-9c3c-406a-863a-fa8eee41b4e5\") " pod="openshift-logging/collector-zfsc9" Dec 11 08:30:10 crc kubenswrapper[4881]: I1211 08:30:10.715974 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-khslc\" (UniqueName: \"kubernetes.io/projected/1f1b9df4-9c3c-406a-863a-fa8eee41b4e5-kube-api-access-khslc\") pod \"collector-zfsc9\" (UID: \"1f1b9df4-9c3c-406a-863a-fa8eee41b4e5\") " pod="openshift-logging/collector-zfsc9" Dec 11 08:30:10 crc kubenswrapper[4881]: I1211 08:30:10.715991 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1f1b9df4-9c3c-406a-863a-fa8eee41b4e5-config\") pod \"collector-zfsc9\" (UID: \"1f1b9df4-9c3c-406a-863a-fa8eee41b4e5\") " pod="openshift-logging/collector-zfsc9" Dec 11 08:30:10 crc kubenswrapper[4881]: I1211 08:30:10.716019 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-openshift-service-cacrt\" (UniqueName: \"kubernetes.io/configmap/1f1b9df4-9c3c-406a-863a-fa8eee41b4e5-config-openshift-service-cacrt\") pod \"collector-zfsc9\" (UID: \"1f1b9df4-9c3c-406a-863a-fa8eee41b4e5\") " pod="openshift-logging/collector-zfsc9" Dec 11 08:30:10 crc kubenswrapper[4881]: I1211 08:30:10.716037 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/1f1b9df4-9c3c-406a-863a-fa8eee41b4e5-trusted-ca\") pod \"collector-zfsc9\" (UID: \"1f1b9df4-9c3c-406a-863a-fa8eee41b4e5\") " pod="openshift-logging/collector-zfsc9" Dec 11 08:30:10 crc kubenswrapper[4881]: E1211 08:30:10.716372 4881 secret.go:188] Couldn't get secret openshift-logging/collector-metrics: secret "collector-metrics" not found Dec 11 08:30:10 crc kubenswrapper[4881]: E1211 08:30:10.716517 4881 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/1f1b9df4-9c3c-406a-863a-fa8eee41b4e5-metrics podName:1f1b9df4-9c3c-406a-863a-fa8eee41b4e5 nodeName:}" failed. No retries permitted until 2025-12-11 08:30:11.21643264 +0000 UTC m=+859.593801337 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics" (UniqueName: "kubernetes.io/secret/1f1b9df4-9c3c-406a-863a-fa8eee41b4e5-metrics") pod "collector-zfsc9" (UID: "1f1b9df4-9c3c-406a-863a-fa8eee41b4e5") : secret "collector-metrics" not found Dec 11 08:30:10 crc kubenswrapper[4881]: I1211 08:30:10.716705 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"datadir\" (UniqueName: \"kubernetes.io/host-path/1f1b9df4-9c3c-406a-863a-fa8eee41b4e5-datadir\") pod \"collector-zfsc9\" (UID: \"1f1b9df4-9c3c-406a-863a-fa8eee41b4e5\") " pod="openshift-logging/collector-zfsc9" Dec 11 08:30:10 crc kubenswrapper[4881]: I1211 08:30:10.717476 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1f1b9df4-9c3c-406a-863a-fa8eee41b4e5-config\") pod \"collector-zfsc9\" (UID: \"1f1b9df4-9c3c-406a-863a-fa8eee41b4e5\") " pod="openshift-logging/collector-zfsc9" Dec 11 08:30:10 crc kubenswrapper[4881]: I1211 08:30:10.717938 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-openshift-service-cacrt\" (UniqueName: \"kubernetes.io/configmap/1f1b9df4-9c3c-406a-863a-fa8eee41b4e5-config-openshift-service-cacrt\") pod \"collector-zfsc9\" (UID: \"1f1b9df4-9c3c-406a-863a-fa8eee41b4e5\") " pod="openshift-logging/collector-zfsc9" Dec 11 08:30:10 crc kubenswrapper[4881]: I1211 08:30:10.719699 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"entrypoint\" (UniqueName: \"kubernetes.io/configmap/1f1b9df4-9c3c-406a-863a-fa8eee41b4e5-entrypoint\") pod \"collector-zfsc9\" (UID: \"1f1b9df4-9c3c-406a-863a-fa8eee41b4e5\") " pod="openshift-logging/collector-zfsc9" Dec 11 08:30:10 crc kubenswrapper[4881]: I1211 08:30:10.721959 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/1f1b9df4-9c3c-406a-863a-fa8eee41b4e5-trusted-ca\") pod \"collector-zfsc9\" (UID: \"1f1b9df4-9c3c-406a-863a-fa8eee41b4e5\") " pod="openshift-logging/collector-zfsc9" Dec 11 08:30:10 crc kubenswrapper[4881]: I1211 08:30:10.733741 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/1f1b9df4-9c3c-406a-863a-fa8eee41b4e5-tmp\") pod \"collector-zfsc9\" (UID: \"1f1b9df4-9c3c-406a-863a-fa8eee41b4e5\") " pod="openshift-logging/collector-zfsc9" Dec 11 08:30:10 crc kubenswrapper[4881]: I1211 08:30:10.734216 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"collector-syslog-receiver\" (UniqueName: \"kubernetes.io/secret/1f1b9df4-9c3c-406a-863a-fa8eee41b4e5-collector-syslog-receiver\") pod \"collector-zfsc9\" (UID: \"1f1b9df4-9c3c-406a-863a-fa8eee41b4e5\") " pod="openshift-logging/collector-zfsc9" Dec 11 08:30:10 crc kubenswrapper[4881]: I1211 08:30:10.734244 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"collector-token\" (UniqueName: \"kubernetes.io/secret/1f1b9df4-9c3c-406a-863a-fa8eee41b4e5-collector-token\") pod \"collector-zfsc9\" (UID: \"1f1b9df4-9c3c-406a-863a-fa8eee41b4e5\") " pod="openshift-logging/collector-zfsc9" Dec 11 08:30:10 crc kubenswrapper[4881]: I1211 08:30:10.737715 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-khslc\" (UniqueName: \"kubernetes.io/projected/1f1b9df4-9c3c-406a-863a-fa8eee41b4e5-kube-api-access-khslc\") pod \"collector-zfsc9\" (UID: \"1f1b9df4-9c3c-406a-863a-fa8eee41b4e5\") " pod="openshift-logging/collector-zfsc9" Dec 11 08:30:10 crc kubenswrapper[4881]: I1211 08:30:10.741711 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sa-token\" (UniqueName: \"kubernetes.io/projected/1f1b9df4-9c3c-406a-863a-fa8eee41b4e5-sa-token\") pod \"collector-zfsc9\" (UID: \"1f1b9df4-9c3c-406a-863a-fa8eee41b4e5\") " pod="openshift-logging/collector-zfsc9" Dec 11 08:30:11 crc kubenswrapper[4881]: I1211 08:30:11.222237 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/secret/1f1b9df4-9c3c-406a-863a-fa8eee41b4e5-metrics\") pod \"collector-zfsc9\" (UID: \"1f1b9df4-9c3c-406a-863a-fa8eee41b4e5\") " pod="openshift-logging/collector-zfsc9" Dec 11 08:30:11 crc kubenswrapper[4881]: I1211 08:30:11.225665 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics\" (UniqueName: \"kubernetes.io/secret/1f1b9df4-9c3c-406a-863a-fa8eee41b4e5-metrics\") pod \"collector-zfsc9\" (UID: \"1f1b9df4-9c3c-406a-863a-fa8eee41b4e5\") " pod="openshift-logging/collector-zfsc9" Dec 11 08:30:11 crc kubenswrapper[4881]: I1211 08:30:11.465469 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/collector-zfsc9" Dec 11 08:30:11 crc kubenswrapper[4881]: I1211 08:30:11.477901 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/collector-zfsc9" Dec 11 08:30:11 crc kubenswrapper[4881]: I1211 08:30:11.629053 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"datadir\" (UniqueName: \"kubernetes.io/host-path/1f1b9df4-9c3c-406a-863a-fa8eee41b4e5-datadir\") pod \"1f1b9df4-9c3c-406a-863a-fa8eee41b4e5\" (UID: \"1f1b9df4-9c3c-406a-863a-fa8eee41b4e5\") " Dec 11 08:30:11 crc kubenswrapper[4881]: I1211 08:30:11.629149 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/1f1b9df4-9c3c-406a-863a-fa8eee41b4e5-trusted-ca\") pod \"1f1b9df4-9c3c-406a-863a-fa8eee41b4e5\" (UID: \"1f1b9df4-9c3c-406a-863a-fa8eee41b4e5\") " Dec 11 08:30:11 crc kubenswrapper[4881]: I1211 08:30:11.629191 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/1f1b9df4-9c3c-406a-863a-fa8eee41b4e5-datadir" (OuterVolumeSpecName: "datadir") pod "1f1b9df4-9c3c-406a-863a-fa8eee41b4e5" (UID: "1f1b9df4-9c3c-406a-863a-fa8eee41b4e5"). InnerVolumeSpecName "datadir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 11 08:30:11 crc kubenswrapper[4881]: I1211 08:30:11.629250 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/secret/1f1b9df4-9c3c-406a-863a-fa8eee41b4e5-metrics\") pod \"1f1b9df4-9c3c-406a-863a-fa8eee41b4e5\" (UID: \"1f1b9df4-9c3c-406a-863a-fa8eee41b4e5\") " Dec 11 08:30:11 crc kubenswrapper[4881]: I1211 08:30:11.629302 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-khslc\" (UniqueName: \"kubernetes.io/projected/1f1b9df4-9c3c-406a-863a-fa8eee41b4e5-kube-api-access-khslc\") pod \"1f1b9df4-9c3c-406a-863a-fa8eee41b4e5\" (UID: \"1f1b9df4-9c3c-406a-863a-fa8eee41b4e5\") " Dec 11 08:30:11 crc kubenswrapper[4881]: I1211 08:30:11.629396 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"entrypoint\" (UniqueName: \"kubernetes.io/configmap/1f1b9df4-9c3c-406a-863a-fa8eee41b4e5-entrypoint\") pod \"1f1b9df4-9c3c-406a-863a-fa8eee41b4e5\" (UID: \"1f1b9df4-9c3c-406a-863a-fa8eee41b4e5\") " Dec 11 08:30:11 crc kubenswrapper[4881]: I1211 08:30:11.629529 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-openshift-service-cacrt\" (UniqueName: \"kubernetes.io/configmap/1f1b9df4-9c3c-406a-863a-fa8eee41b4e5-config-openshift-service-cacrt\") pod \"1f1b9df4-9c3c-406a-863a-fa8eee41b4e5\" (UID: \"1f1b9df4-9c3c-406a-863a-fa8eee41b4e5\") " Dec 11 08:30:11 crc kubenswrapper[4881]: I1211 08:30:11.629606 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"collector-token\" (UniqueName: \"kubernetes.io/secret/1f1b9df4-9c3c-406a-863a-fa8eee41b4e5-collector-token\") pod \"1f1b9df4-9c3c-406a-863a-fa8eee41b4e5\" (UID: \"1f1b9df4-9c3c-406a-863a-fa8eee41b4e5\") " Dec 11 08:30:11 crc kubenswrapper[4881]: I1211 08:30:11.629704 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1f1b9df4-9c3c-406a-863a-fa8eee41b4e5-config\") pod \"1f1b9df4-9c3c-406a-863a-fa8eee41b4e5\" (UID: \"1f1b9df4-9c3c-406a-863a-fa8eee41b4e5\") " Dec 11 08:30:11 crc kubenswrapper[4881]: I1211 08:30:11.629759 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"collector-syslog-receiver\" (UniqueName: \"kubernetes.io/secret/1f1b9df4-9c3c-406a-863a-fa8eee41b4e5-collector-syslog-receiver\") pod \"1f1b9df4-9c3c-406a-863a-fa8eee41b4e5\" (UID: \"1f1b9df4-9c3c-406a-863a-fa8eee41b4e5\") " Dec 11 08:30:11 crc kubenswrapper[4881]: I1211 08:30:11.629820 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sa-token\" (UniqueName: \"kubernetes.io/projected/1f1b9df4-9c3c-406a-863a-fa8eee41b4e5-sa-token\") pod \"1f1b9df4-9c3c-406a-863a-fa8eee41b4e5\" (UID: \"1f1b9df4-9c3c-406a-863a-fa8eee41b4e5\") " Dec 11 08:30:11 crc kubenswrapper[4881]: I1211 08:30:11.629889 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/1f1b9df4-9c3c-406a-863a-fa8eee41b4e5-tmp\") pod \"1f1b9df4-9c3c-406a-863a-fa8eee41b4e5\" (UID: \"1f1b9df4-9c3c-406a-863a-fa8eee41b4e5\") " Dec 11 08:30:11 crc kubenswrapper[4881]: I1211 08:30:11.630617 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1f1b9df4-9c3c-406a-863a-fa8eee41b4e5-entrypoint" (OuterVolumeSpecName: "entrypoint") pod "1f1b9df4-9c3c-406a-863a-fa8eee41b4e5" (UID: "1f1b9df4-9c3c-406a-863a-fa8eee41b4e5"). InnerVolumeSpecName "entrypoint". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:30:11 crc kubenswrapper[4881]: I1211 08:30:11.630668 4881 reconciler_common.go:293] "Volume detached for volume \"datadir\" (UniqueName: \"kubernetes.io/host-path/1f1b9df4-9c3c-406a-863a-fa8eee41b4e5-datadir\") on node \"crc\" DevicePath \"\"" Dec 11 08:30:11 crc kubenswrapper[4881]: I1211 08:30:11.630703 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1f1b9df4-9c3c-406a-863a-fa8eee41b4e5-config-openshift-service-cacrt" (OuterVolumeSpecName: "config-openshift-service-cacrt") pod "1f1b9df4-9c3c-406a-863a-fa8eee41b4e5" (UID: "1f1b9df4-9c3c-406a-863a-fa8eee41b4e5"). InnerVolumeSpecName "config-openshift-service-cacrt". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:30:11 crc kubenswrapper[4881]: I1211 08:30:11.631172 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1f1b9df4-9c3c-406a-863a-fa8eee41b4e5-config" (OuterVolumeSpecName: "config") pod "1f1b9df4-9c3c-406a-863a-fa8eee41b4e5" (UID: "1f1b9df4-9c3c-406a-863a-fa8eee41b4e5"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:30:11 crc kubenswrapper[4881]: I1211 08:30:11.631275 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1f1b9df4-9c3c-406a-863a-fa8eee41b4e5-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "1f1b9df4-9c3c-406a-863a-fa8eee41b4e5" (UID: "1f1b9df4-9c3c-406a-863a-fa8eee41b4e5"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:30:11 crc kubenswrapper[4881]: I1211 08:30:11.637547 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1f1b9df4-9c3c-406a-863a-fa8eee41b4e5-kube-api-access-khslc" (OuterVolumeSpecName: "kube-api-access-khslc") pod "1f1b9df4-9c3c-406a-863a-fa8eee41b4e5" (UID: "1f1b9df4-9c3c-406a-863a-fa8eee41b4e5"). InnerVolumeSpecName "kube-api-access-khslc". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:30:11 crc kubenswrapper[4881]: I1211 08:30:11.637709 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1f1b9df4-9c3c-406a-863a-fa8eee41b4e5-collector-token" (OuterVolumeSpecName: "collector-token") pod "1f1b9df4-9c3c-406a-863a-fa8eee41b4e5" (UID: "1f1b9df4-9c3c-406a-863a-fa8eee41b4e5"). InnerVolumeSpecName "collector-token". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:30:11 crc kubenswrapper[4881]: I1211 08:30:11.637713 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1f1b9df4-9c3c-406a-863a-fa8eee41b4e5-sa-token" (OuterVolumeSpecName: "sa-token") pod "1f1b9df4-9c3c-406a-863a-fa8eee41b4e5" (UID: "1f1b9df4-9c3c-406a-863a-fa8eee41b4e5"). InnerVolumeSpecName "sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:30:11 crc kubenswrapper[4881]: I1211 08:30:11.638101 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1f1b9df4-9c3c-406a-863a-fa8eee41b4e5-tmp" (OuterVolumeSpecName: "tmp") pod "1f1b9df4-9c3c-406a-863a-fa8eee41b4e5" (UID: "1f1b9df4-9c3c-406a-863a-fa8eee41b4e5"). InnerVolumeSpecName "tmp". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 08:30:11 crc kubenswrapper[4881]: I1211 08:30:11.638568 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1f1b9df4-9c3c-406a-863a-fa8eee41b4e5-collector-syslog-receiver" (OuterVolumeSpecName: "collector-syslog-receiver") pod "1f1b9df4-9c3c-406a-863a-fa8eee41b4e5" (UID: "1f1b9df4-9c3c-406a-863a-fa8eee41b4e5"). InnerVolumeSpecName "collector-syslog-receiver". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:30:11 crc kubenswrapper[4881]: I1211 08:30:11.644194 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1f1b9df4-9c3c-406a-863a-fa8eee41b4e5-metrics" (OuterVolumeSpecName: "metrics") pod "1f1b9df4-9c3c-406a-863a-fa8eee41b4e5" (UID: "1f1b9df4-9c3c-406a-863a-fa8eee41b4e5"). InnerVolumeSpecName "metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:30:11 crc kubenswrapper[4881]: I1211 08:30:11.732013 4881 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/1f1b9df4-9c3c-406a-863a-fa8eee41b4e5-trusted-ca\") on node \"crc\" DevicePath \"\"" Dec 11 08:30:11 crc kubenswrapper[4881]: I1211 08:30:11.732056 4881 reconciler_common.go:293] "Volume detached for volume \"metrics\" (UniqueName: \"kubernetes.io/secret/1f1b9df4-9c3c-406a-863a-fa8eee41b4e5-metrics\") on node \"crc\" DevicePath \"\"" Dec 11 08:30:11 crc kubenswrapper[4881]: I1211 08:30:11.732070 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-khslc\" (UniqueName: \"kubernetes.io/projected/1f1b9df4-9c3c-406a-863a-fa8eee41b4e5-kube-api-access-khslc\") on node \"crc\" DevicePath \"\"" Dec 11 08:30:11 crc kubenswrapper[4881]: I1211 08:30:11.732084 4881 reconciler_common.go:293] "Volume detached for volume \"entrypoint\" (UniqueName: \"kubernetes.io/configmap/1f1b9df4-9c3c-406a-863a-fa8eee41b4e5-entrypoint\") on node \"crc\" DevicePath \"\"" Dec 11 08:30:11 crc kubenswrapper[4881]: I1211 08:30:11.732098 4881 reconciler_common.go:293] "Volume detached for volume \"config-openshift-service-cacrt\" (UniqueName: \"kubernetes.io/configmap/1f1b9df4-9c3c-406a-863a-fa8eee41b4e5-config-openshift-service-cacrt\") on node \"crc\" DevicePath \"\"" Dec 11 08:30:11 crc kubenswrapper[4881]: I1211 08:30:11.732110 4881 reconciler_common.go:293] "Volume detached for volume \"collector-token\" (UniqueName: \"kubernetes.io/secret/1f1b9df4-9c3c-406a-863a-fa8eee41b4e5-collector-token\") on node \"crc\" DevicePath \"\"" Dec 11 08:30:11 crc kubenswrapper[4881]: I1211 08:30:11.732122 4881 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1f1b9df4-9c3c-406a-863a-fa8eee41b4e5-config\") on node \"crc\" DevicePath \"\"" Dec 11 08:30:11 crc kubenswrapper[4881]: I1211 08:30:11.732133 4881 reconciler_common.go:293] "Volume detached for volume \"collector-syslog-receiver\" (UniqueName: \"kubernetes.io/secret/1f1b9df4-9c3c-406a-863a-fa8eee41b4e5-collector-syslog-receiver\") on node \"crc\" DevicePath \"\"" Dec 11 08:30:11 crc kubenswrapper[4881]: I1211 08:30:11.732144 4881 reconciler_common.go:293] "Volume detached for volume \"sa-token\" (UniqueName: \"kubernetes.io/projected/1f1b9df4-9c3c-406a-863a-fa8eee41b4e5-sa-token\") on node \"crc\" DevicePath \"\"" Dec 11 08:30:11 crc kubenswrapper[4881]: I1211 08:30:11.732155 4881 reconciler_common.go:293] "Volume detached for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/1f1b9df4-9c3c-406a-863a-fa8eee41b4e5-tmp\") on node \"crc\" DevicePath \"\"" Dec 11 08:30:12 crc kubenswrapper[4881]: I1211 08:30:12.472576 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/collector-zfsc9" Dec 11 08:30:12 crc kubenswrapper[4881]: I1211 08:30:12.549572 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-logging/collector-zfsc9"] Dec 11 08:30:12 crc kubenswrapper[4881]: I1211 08:30:12.557699 4881 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-logging/collector-zfsc9"] Dec 11 08:30:12 crc kubenswrapper[4881]: I1211 08:30:12.562741 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-logging/collector-bwkfh"] Dec 11 08:30:12 crc kubenswrapper[4881]: I1211 08:30:12.563901 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/collector-bwkfh" Dec 11 08:30:12 crc kubenswrapper[4881]: I1211 08:30:12.566407 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"collector-syslog-receiver" Dec 11 08:30:12 crc kubenswrapper[4881]: I1211 08:30:12.566973 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"collector-metrics" Dec 11 08:30:12 crc kubenswrapper[4881]: I1211 08:30:12.567087 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-logging"/"collector-config" Dec 11 08:30:12 crc kubenswrapper[4881]: I1211 08:30:12.567263 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"collector-dockercfg-g5mvn" Dec 11 08:30:12 crc kubenswrapper[4881]: I1211 08:30:12.567380 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"collector-token" Dec 11 08:30:12 crc kubenswrapper[4881]: I1211 08:30:12.571073 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/collector-bwkfh"] Dec 11 08:30:12 crc kubenswrapper[4881]: I1211 08:30:12.572656 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-logging"/"collector-trustbundle" Dec 11 08:30:12 crc kubenswrapper[4881]: I1211 08:30:12.749690 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"collector-token\" (UniqueName: \"kubernetes.io/secret/c095e07a-478b-41db-8ca3-a6b29c79756c-collector-token\") pod \"collector-bwkfh\" (UID: \"c095e07a-478b-41db-8ca3-a6b29c79756c\") " pod="openshift-logging/collector-bwkfh" Dec 11 08:30:12 crc kubenswrapper[4881]: I1211 08:30:12.751058 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/c095e07a-478b-41db-8ca3-a6b29c79756c-tmp\") pod \"collector-bwkfh\" (UID: \"c095e07a-478b-41db-8ca3-a6b29c79756c\") " pod="openshift-logging/collector-bwkfh" Dec 11 08:30:12 crc kubenswrapper[4881]: I1211 08:30:12.751561 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/secret/c095e07a-478b-41db-8ca3-a6b29c79756c-metrics\") pod \"collector-bwkfh\" (UID: \"c095e07a-478b-41db-8ca3-a6b29c79756c\") " pod="openshift-logging/collector-bwkfh" Dec 11 08:30:12 crc kubenswrapper[4881]: I1211 08:30:12.751699 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"entrypoint\" (UniqueName: \"kubernetes.io/configmap/c095e07a-478b-41db-8ca3-a6b29c79756c-entrypoint\") pod \"collector-bwkfh\" (UID: \"c095e07a-478b-41db-8ca3-a6b29c79756c\") " pod="openshift-logging/collector-bwkfh" Dec 11 08:30:12 crc kubenswrapper[4881]: I1211 08:30:12.751808 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c095e07a-478b-41db-8ca3-a6b29c79756c-config\") pod \"collector-bwkfh\" (UID: \"c095e07a-478b-41db-8ca3-a6b29c79756c\") " pod="openshift-logging/collector-bwkfh" Dec 11 08:30:12 crc kubenswrapper[4881]: I1211 08:30:12.752025 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fls29\" (UniqueName: \"kubernetes.io/projected/c095e07a-478b-41db-8ca3-a6b29c79756c-kube-api-access-fls29\") pod \"collector-bwkfh\" (UID: \"c095e07a-478b-41db-8ca3-a6b29c79756c\") " pod="openshift-logging/collector-bwkfh" Dec 11 08:30:12 crc kubenswrapper[4881]: I1211 08:30:12.752154 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"datadir\" (UniqueName: \"kubernetes.io/host-path/c095e07a-478b-41db-8ca3-a6b29c79756c-datadir\") pod \"collector-bwkfh\" (UID: \"c095e07a-478b-41db-8ca3-a6b29c79756c\") " pod="openshift-logging/collector-bwkfh" Dec 11 08:30:12 crc kubenswrapper[4881]: I1211 08:30:12.752281 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sa-token\" (UniqueName: \"kubernetes.io/projected/c095e07a-478b-41db-8ca3-a6b29c79756c-sa-token\") pod \"collector-bwkfh\" (UID: \"c095e07a-478b-41db-8ca3-a6b29c79756c\") " pod="openshift-logging/collector-bwkfh" Dec 11 08:30:12 crc kubenswrapper[4881]: I1211 08:30:12.752530 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"collector-syslog-receiver\" (UniqueName: \"kubernetes.io/secret/c095e07a-478b-41db-8ca3-a6b29c79756c-collector-syslog-receiver\") pod \"collector-bwkfh\" (UID: \"c095e07a-478b-41db-8ca3-a6b29c79756c\") " pod="openshift-logging/collector-bwkfh" Dec 11 08:30:12 crc kubenswrapper[4881]: I1211 08:30:12.752574 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-openshift-service-cacrt\" (UniqueName: \"kubernetes.io/configmap/c095e07a-478b-41db-8ca3-a6b29c79756c-config-openshift-service-cacrt\") pod \"collector-bwkfh\" (UID: \"c095e07a-478b-41db-8ca3-a6b29c79756c\") " pod="openshift-logging/collector-bwkfh" Dec 11 08:30:12 crc kubenswrapper[4881]: I1211 08:30:12.752662 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/c095e07a-478b-41db-8ca3-a6b29c79756c-trusted-ca\") pod \"collector-bwkfh\" (UID: \"c095e07a-478b-41db-8ca3-a6b29c79756c\") " pod="openshift-logging/collector-bwkfh" Dec 11 08:30:12 crc kubenswrapper[4881]: I1211 08:30:12.853749 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fls29\" (UniqueName: \"kubernetes.io/projected/c095e07a-478b-41db-8ca3-a6b29c79756c-kube-api-access-fls29\") pod \"collector-bwkfh\" (UID: \"c095e07a-478b-41db-8ca3-a6b29c79756c\") " pod="openshift-logging/collector-bwkfh" Dec 11 08:30:12 crc kubenswrapper[4881]: I1211 08:30:12.853816 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"datadir\" (UniqueName: \"kubernetes.io/host-path/c095e07a-478b-41db-8ca3-a6b29c79756c-datadir\") pod \"collector-bwkfh\" (UID: \"c095e07a-478b-41db-8ca3-a6b29c79756c\") " pod="openshift-logging/collector-bwkfh" Dec 11 08:30:12 crc kubenswrapper[4881]: I1211 08:30:12.853860 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sa-token\" (UniqueName: \"kubernetes.io/projected/c095e07a-478b-41db-8ca3-a6b29c79756c-sa-token\") pod \"collector-bwkfh\" (UID: \"c095e07a-478b-41db-8ca3-a6b29c79756c\") " pod="openshift-logging/collector-bwkfh" Dec 11 08:30:12 crc kubenswrapper[4881]: I1211 08:30:12.853919 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"collector-syslog-receiver\" (UniqueName: \"kubernetes.io/secret/c095e07a-478b-41db-8ca3-a6b29c79756c-collector-syslog-receiver\") pod \"collector-bwkfh\" (UID: \"c095e07a-478b-41db-8ca3-a6b29c79756c\") " pod="openshift-logging/collector-bwkfh" Dec 11 08:30:12 crc kubenswrapper[4881]: I1211 08:30:12.853939 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-openshift-service-cacrt\" (UniqueName: \"kubernetes.io/configmap/c095e07a-478b-41db-8ca3-a6b29c79756c-config-openshift-service-cacrt\") pod \"collector-bwkfh\" (UID: \"c095e07a-478b-41db-8ca3-a6b29c79756c\") " pod="openshift-logging/collector-bwkfh" Dec 11 08:30:12 crc kubenswrapper[4881]: I1211 08:30:12.853969 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/c095e07a-478b-41db-8ca3-a6b29c79756c-trusted-ca\") pod \"collector-bwkfh\" (UID: \"c095e07a-478b-41db-8ca3-a6b29c79756c\") " pod="openshift-logging/collector-bwkfh" Dec 11 08:30:12 crc kubenswrapper[4881]: I1211 08:30:12.853991 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"collector-token\" (UniqueName: \"kubernetes.io/secret/c095e07a-478b-41db-8ca3-a6b29c79756c-collector-token\") pod \"collector-bwkfh\" (UID: \"c095e07a-478b-41db-8ca3-a6b29c79756c\") " pod="openshift-logging/collector-bwkfh" Dec 11 08:30:12 crc kubenswrapper[4881]: I1211 08:30:12.854007 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/c095e07a-478b-41db-8ca3-a6b29c79756c-tmp\") pod \"collector-bwkfh\" (UID: \"c095e07a-478b-41db-8ca3-a6b29c79756c\") " pod="openshift-logging/collector-bwkfh" Dec 11 08:30:12 crc kubenswrapper[4881]: I1211 08:30:12.854026 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/secret/c095e07a-478b-41db-8ca3-a6b29c79756c-metrics\") pod \"collector-bwkfh\" (UID: \"c095e07a-478b-41db-8ca3-a6b29c79756c\") " pod="openshift-logging/collector-bwkfh" Dec 11 08:30:12 crc kubenswrapper[4881]: I1211 08:30:12.854043 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"entrypoint\" (UniqueName: \"kubernetes.io/configmap/c095e07a-478b-41db-8ca3-a6b29c79756c-entrypoint\") pod \"collector-bwkfh\" (UID: \"c095e07a-478b-41db-8ca3-a6b29c79756c\") " pod="openshift-logging/collector-bwkfh" Dec 11 08:30:12 crc kubenswrapper[4881]: I1211 08:30:12.854030 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"datadir\" (UniqueName: \"kubernetes.io/host-path/c095e07a-478b-41db-8ca3-a6b29c79756c-datadir\") pod \"collector-bwkfh\" (UID: \"c095e07a-478b-41db-8ca3-a6b29c79756c\") " pod="openshift-logging/collector-bwkfh" Dec 11 08:30:12 crc kubenswrapper[4881]: I1211 08:30:12.854062 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c095e07a-478b-41db-8ca3-a6b29c79756c-config\") pod \"collector-bwkfh\" (UID: \"c095e07a-478b-41db-8ca3-a6b29c79756c\") " pod="openshift-logging/collector-bwkfh" Dec 11 08:30:12 crc kubenswrapper[4881]: I1211 08:30:12.855590 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"entrypoint\" (UniqueName: \"kubernetes.io/configmap/c095e07a-478b-41db-8ca3-a6b29c79756c-entrypoint\") pod \"collector-bwkfh\" (UID: \"c095e07a-478b-41db-8ca3-a6b29c79756c\") " pod="openshift-logging/collector-bwkfh" Dec 11 08:30:12 crc kubenswrapper[4881]: I1211 08:30:12.855599 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c095e07a-478b-41db-8ca3-a6b29c79756c-config\") pod \"collector-bwkfh\" (UID: \"c095e07a-478b-41db-8ca3-a6b29c79756c\") " pod="openshift-logging/collector-bwkfh" Dec 11 08:30:12 crc kubenswrapper[4881]: I1211 08:30:12.856495 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-openshift-service-cacrt\" (UniqueName: \"kubernetes.io/configmap/c095e07a-478b-41db-8ca3-a6b29c79756c-config-openshift-service-cacrt\") pod \"collector-bwkfh\" (UID: \"c095e07a-478b-41db-8ca3-a6b29c79756c\") " pod="openshift-logging/collector-bwkfh" Dec 11 08:30:12 crc kubenswrapper[4881]: I1211 08:30:12.856800 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/c095e07a-478b-41db-8ca3-a6b29c79756c-trusted-ca\") pod \"collector-bwkfh\" (UID: \"c095e07a-478b-41db-8ca3-a6b29c79756c\") " pod="openshift-logging/collector-bwkfh" Dec 11 08:30:12 crc kubenswrapper[4881]: I1211 08:30:12.858556 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/c095e07a-478b-41db-8ca3-a6b29c79756c-tmp\") pod \"collector-bwkfh\" (UID: \"c095e07a-478b-41db-8ca3-a6b29c79756c\") " pod="openshift-logging/collector-bwkfh" Dec 11 08:30:12 crc kubenswrapper[4881]: I1211 08:30:12.861452 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"collector-syslog-receiver\" (UniqueName: \"kubernetes.io/secret/c095e07a-478b-41db-8ca3-a6b29c79756c-collector-syslog-receiver\") pod \"collector-bwkfh\" (UID: \"c095e07a-478b-41db-8ca3-a6b29c79756c\") " pod="openshift-logging/collector-bwkfh" Dec 11 08:30:12 crc kubenswrapper[4881]: I1211 08:30:12.862768 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"collector-token\" (UniqueName: \"kubernetes.io/secret/c095e07a-478b-41db-8ca3-a6b29c79756c-collector-token\") pod \"collector-bwkfh\" (UID: \"c095e07a-478b-41db-8ca3-a6b29c79756c\") " pod="openshift-logging/collector-bwkfh" Dec 11 08:30:12 crc kubenswrapper[4881]: I1211 08:30:12.868274 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics\" (UniqueName: \"kubernetes.io/secret/c095e07a-478b-41db-8ca3-a6b29c79756c-metrics\") pod \"collector-bwkfh\" (UID: \"c095e07a-478b-41db-8ca3-a6b29c79756c\") " pod="openshift-logging/collector-bwkfh" Dec 11 08:30:12 crc kubenswrapper[4881]: I1211 08:30:12.869705 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fls29\" (UniqueName: \"kubernetes.io/projected/c095e07a-478b-41db-8ca3-a6b29c79756c-kube-api-access-fls29\") pod \"collector-bwkfh\" (UID: \"c095e07a-478b-41db-8ca3-a6b29c79756c\") " pod="openshift-logging/collector-bwkfh" Dec 11 08:30:12 crc kubenswrapper[4881]: I1211 08:30:12.872081 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sa-token\" (UniqueName: \"kubernetes.io/projected/c095e07a-478b-41db-8ca3-a6b29c79756c-sa-token\") pod \"collector-bwkfh\" (UID: \"c095e07a-478b-41db-8ca3-a6b29c79756c\") " pod="openshift-logging/collector-bwkfh" Dec 11 08:30:12 crc kubenswrapper[4881]: I1211 08:30:12.880595 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/collector-bwkfh" Dec 11 08:30:13 crc kubenswrapper[4881]: I1211 08:30:13.017080 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1f1b9df4-9c3c-406a-863a-fa8eee41b4e5" path="/var/lib/kubelet/pods/1f1b9df4-9c3c-406a-863a-fa8eee41b4e5/volumes" Dec 11 08:30:13 crc kubenswrapper[4881]: I1211 08:30:13.315459 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/collector-bwkfh"] Dec 11 08:30:13 crc kubenswrapper[4881]: I1211 08:30:13.482724 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/collector-bwkfh" event={"ID":"c095e07a-478b-41db-8ca3-a6b29c79756c","Type":"ContainerStarted","Data":"ac80c3946479a695e652b3cbe3a165512bf594d98f53f5dcddcfc9154b015c20"} Dec 11 08:30:21 crc kubenswrapper[4881]: I1211 08:30:21.541185 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/collector-bwkfh" event={"ID":"c095e07a-478b-41db-8ca3-a6b29c79756c","Type":"ContainerStarted","Data":"02c3cdf84b3abf2b9ff22450446bf340a147c18ae22171b1836c4b3e27f484fb"} Dec 11 08:30:21 crc kubenswrapper[4881]: I1211 08:30:21.567313 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-logging/collector-bwkfh" podStartSLOduration=1.847541694 podStartE2EDuration="9.567294743s" podCreationTimestamp="2025-12-11 08:30:12 +0000 UTC" firstStartedPulling="2025-12-11 08:30:13.326645524 +0000 UTC m=+861.704014231" lastFinishedPulling="2025-12-11 08:30:21.046398583 +0000 UTC m=+869.423767280" observedRunningTime="2025-12-11 08:30:21.5659793 +0000 UTC m=+869.943348007" watchObservedRunningTime="2025-12-11 08:30:21.567294743 +0000 UTC m=+869.944663450" Dec 11 08:30:35 crc kubenswrapper[4881]: I1211 08:30:35.972239 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-jtkpv"] Dec 11 08:30:35 crc kubenswrapper[4881]: I1211 08:30:35.984127 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-jtkpv" Dec 11 08:30:35 crc kubenswrapper[4881]: I1211 08:30:35.984815 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-jtkpv"] Dec 11 08:30:36 crc kubenswrapper[4881]: I1211 08:30:36.144181 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/677c7915-14eb-4f19-adb1-6cc04c415161-utilities\") pod \"redhat-operators-jtkpv\" (UID: \"677c7915-14eb-4f19-adb1-6cc04c415161\") " pod="openshift-marketplace/redhat-operators-jtkpv" Dec 11 08:30:36 crc kubenswrapper[4881]: I1211 08:30:36.144261 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/677c7915-14eb-4f19-adb1-6cc04c415161-catalog-content\") pod \"redhat-operators-jtkpv\" (UID: \"677c7915-14eb-4f19-adb1-6cc04c415161\") " pod="openshift-marketplace/redhat-operators-jtkpv" Dec 11 08:30:36 crc kubenswrapper[4881]: I1211 08:30:36.145046 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d2bq2\" (UniqueName: \"kubernetes.io/projected/677c7915-14eb-4f19-adb1-6cc04c415161-kube-api-access-d2bq2\") pod \"redhat-operators-jtkpv\" (UID: \"677c7915-14eb-4f19-adb1-6cc04c415161\") " pod="openshift-marketplace/redhat-operators-jtkpv" Dec 11 08:30:36 crc kubenswrapper[4881]: I1211 08:30:36.246610 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d2bq2\" (UniqueName: \"kubernetes.io/projected/677c7915-14eb-4f19-adb1-6cc04c415161-kube-api-access-d2bq2\") pod \"redhat-operators-jtkpv\" (UID: \"677c7915-14eb-4f19-adb1-6cc04c415161\") " pod="openshift-marketplace/redhat-operators-jtkpv" Dec 11 08:30:36 crc kubenswrapper[4881]: I1211 08:30:36.246729 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/677c7915-14eb-4f19-adb1-6cc04c415161-utilities\") pod \"redhat-operators-jtkpv\" (UID: \"677c7915-14eb-4f19-adb1-6cc04c415161\") " pod="openshift-marketplace/redhat-operators-jtkpv" Dec 11 08:30:36 crc kubenswrapper[4881]: I1211 08:30:36.247198 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/677c7915-14eb-4f19-adb1-6cc04c415161-utilities\") pod \"redhat-operators-jtkpv\" (UID: \"677c7915-14eb-4f19-adb1-6cc04c415161\") " pod="openshift-marketplace/redhat-operators-jtkpv" Dec 11 08:30:36 crc kubenswrapper[4881]: I1211 08:30:36.246767 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/677c7915-14eb-4f19-adb1-6cc04c415161-catalog-content\") pod \"redhat-operators-jtkpv\" (UID: \"677c7915-14eb-4f19-adb1-6cc04c415161\") " pod="openshift-marketplace/redhat-operators-jtkpv" Dec 11 08:30:36 crc kubenswrapper[4881]: I1211 08:30:36.247745 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/677c7915-14eb-4f19-adb1-6cc04c415161-catalog-content\") pod \"redhat-operators-jtkpv\" (UID: \"677c7915-14eb-4f19-adb1-6cc04c415161\") " pod="openshift-marketplace/redhat-operators-jtkpv" Dec 11 08:30:36 crc kubenswrapper[4881]: I1211 08:30:36.272501 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d2bq2\" (UniqueName: \"kubernetes.io/projected/677c7915-14eb-4f19-adb1-6cc04c415161-kube-api-access-d2bq2\") pod \"redhat-operators-jtkpv\" (UID: \"677c7915-14eb-4f19-adb1-6cc04c415161\") " pod="openshift-marketplace/redhat-operators-jtkpv" Dec 11 08:30:36 crc kubenswrapper[4881]: I1211 08:30:36.301831 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-jtkpv" Dec 11 08:30:37 crc kubenswrapper[4881]: I1211 08:30:37.062346 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-jtkpv"] Dec 11 08:30:37 crc kubenswrapper[4881]: W1211 08:30:37.066833 4881 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod677c7915_14eb_4f19_adb1_6cc04c415161.slice/crio-a253fd7e36ca45d4886414875e6297d1f9ecdf215834fa86665093898eba7b8b WatchSource:0}: Error finding container a253fd7e36ca45d4886414875e6297d1f9ecdf215834fa86665093898eba7b8b: Status 404 returned error can't find the container with id a253fd7e36ca45d4886414875e6297d1f9ecdf215834fa86665093898eba7b8b Dec 11 08:30:37 crc kubenswrapper[4881]: I1211 08:30:37.700194 4881 generic.go:334] "Generic (PLEG): container finished" podID="677c7915-14eb-4f19-adb1-6cc04c415161" containerID="1243c09fd069888f91da306add24aaec13c8fc7d4cb3cf19c8def68f99406992" exitCode=0 Dec 11 08:30:37 crc kubenswrapper[4881]: I1211 08:30:37.700273 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-jtkpv" event={"ID":"677c7915-14eb-4f19-adb1-6cc04c415161","Type":"ContainerDied","Data":"1243c09fd069888f91da306add24aaec13c8fc7d4cb3cf19c8def68f99406992"} Dec 11 08:30:37 crc kubenswrapper[4881]: I1211 08:30:37.700495 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-jtkpv" event={"ID":"677c7915-14eb-4f19-adb1-6cc04c415161","Type":"ContainerStarted","Data":"a253fd7e36ca45d4886414875e6297d1f9ecdf215834fa86665093898eba7b8b"} Dec 11 08:30:38 crc kubenswrapper[4881]: I1211 08:30:38.708141 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-jtkpv" event={"ID":"677c7915-14eb-4f19-adb1-6cc04c415161","Type":"ContainerStarted","Data":"6847691d313273ac38428914d8d35d857a7e1aefb2273596e4848e51348efdb9"} Dec 11 08:30:40 crc kubenswrapper[4881]: I1211 08:30:40.799998 4881 generic.go:334] "Generic (PLEG): container finished" podID="677c7915-14eb-4f19-adb1-6cc04c415161" containerID="6847691d313273ac38428914d8d35d857a7e1aefb2273596e4848e51348efdb9" exitCode=0 Dec 11 08:30:40 crc kubenswrapper[4881]: I1211 08:30:40.800078 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-jtkpv" event={"ID":"677c7915-14eb-4f19-adb1-6cc04c415161","Type":"ContainerDied","Data":"6847691d313273ac38428914d8d35d857a7e1aefb2273596e4848e51348efdb9"} Dec 11 08:30:41 crc kubenswrapper[4881]: I1211 08:30:41.810509 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-jtkpv" event={"ID":"677c7915-14eb-4f19-adb1-6cc04c415161","Type":"ContainerStarted","Data":"276e84b5af858569a56f9e3db5279bbce95d7a4a93cc627c288079af479aede9"} Dec 11 08:30:41 crc kubenswrapper[4881]: I1211 08:30:41.834196 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-jtkpv" podStartSLOduration=3.035749534 podStartE2EDuration="6.834172971s" podCreationTimestamp="2025-12-11 08:30:35 +0000 UTC" firstStartedPulling="2025-12-11 08:30:37.702205799 +0000 UTC m=+886.079574506" lastFinishedPulling="2025-12-11 08:30:41.500629246 +0000 UTC m=+889.877997943" observedRunningTime="2025-12-11 08:30:41.828952502 +0000 UTC m=+890.206321199" watchObservedRunningTime="2025-12-11 08:30:41.834172971 +0000 UTC m=+890.211541658" Dec 11 08:30:46 crc kubenswrapper[4881]: I1211 08:30:46.303077 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-jtkpv" Dec 11 08:30:46 crc kubenswrapper[4881]: I1211 08:30:46.303732 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-jtkpv" Dec 11 08:30:47 crc kubenswrapper[4881]: I1211 08:30:47.367734 4881 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-jtkpv" podUID="677c7915-14eb-4f19-adb1-6cc04c415161" containerName="registry-server" probeResult="failure" output=< Dec 11 08:30:47 crc kubenswrapper[4881]: timeout: failed to connect service ":50051" within 1s Dec 11 08:30:47 crc kubenswrapper[4881]: > Dec 11 08:30:50 crc kubenswrapper[4881]: I1211 08:30:50.402566 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8q9msf"] Dec 11 08:30:50 crc kubenswrapper[4881]: I1211 08:30:50.404539 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8q9msf" Dec 11 08:30:50 crc kubenswrapper[4881]: I1211 08:30:50.407376 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Dec 11 08:30:50 crc kubenswrapper[4881]: I1211 08:30:50.434115 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8q9msf"] Dec 11 08:30:50 crc kubenswrapper[4881]: I1211 08:30:50.445281 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/6d84ce93-6f0f-4248-a431-dd1692ff2ba8-bundle\") pod \"98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8q9msf\" (UID: \"6d84ce93-6f0f-4248-a431-dd1692ff2ba8\") " pod="openshift-marketplace/98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8q9msf" Dec 11 08:30:50 crc kubenswrapper[4881]: I1211 08:30:50.445418 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/6d84ce93-6f0f-4248-a431-dd1692ff2ba8-util\") pod \"98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8q9msf\" (UID: \"6d84ce93-6f0f-4248-a431-dd1692ff2ba8\") " pod="openshift-marketplace/98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8q9msf" Dec 11 08:30:50 crc kubenswrapper[4881]: I1211 08:30:50.445485 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-55zcd\" (UniqueName: \"kubernetes.io/projected/6d84ce93-6f0f-4248-a431-dd1692ff2ba8-kube-api-access-55zcd\") pod \"98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8q9msf\" (UID: \"6d84ce93-6f0f-4248-a431-dd1692ff2ba8\") " pod="openshift-marketplace/98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8q9msf" Dec 11 08:30:50 crc kubenswrapper[4881]: I1211 08:30:50.546130 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-55zcd\" (UniqueName: \"kubernetes.io/projected/6d84ce93-6f0f-4248-a431-dd1692ff2ba8-kube-api-access-55zcd\") pod \"98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8q9msf\" (UID: \"6d84ce93-6f0f-4248-a431-dd1692ff2ba8\") " pod="openshift-marketplace/98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8q9msf" Dec 11 08:30:50 crc kubenswrapper[4881]: I1211 08:30:50.546218 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/6d84ce93-6f0f-4248-a431-dd1692ff2ba8-bundle\") pod \"98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8q9msf\" (UID: \"6d84ce93-6f0f-4248-a431-dd1692ff2ba8\") " pod="openshift-marketplace/98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8q9msf" Dec 11 08:30:50 crc kubenswrapper[4881]: I1211 08:30:50.546287 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/6d84ce93-6f0f-4248-a431-dd1692ff2ba8-util\") pod \"98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8q9msf\" (UID: \"6d84ce93-6f0f-4248-a431-dd1692ff2ba8\") " pod="openshift-marketplace/98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8q9msf" Dec 11 08:30:50 crc kubenswrapper[4881]: I1211 08:30:50.546818 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/6d84ce93-6f0f-4248-a431-dd1692ff2ba8-util\") pod \"98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8q9msf\" (UID: \"6d84ce93-6f0f-4248-a431-dd1692ff2ba8\") " pod="openshift-marketplace/98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8q9msf" Dec 11 08:30:50 crc kubenswrapper[4881]: I1211 08:30:50.547104 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/6d84ce93-6f0f-4248-a431-dd1692ff2ba8-bundle\") pod \"98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8q9msf\" (UID: \"6d84ce93-6f0f-4248-a431-dd1692ff2ba8\") " pod="openshift-marketplace/98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8q9msf" Dec 11 08:30:50 crc kubenswrapper[4881]: I1211 08:30:50.571296 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-55zcd\" (UniqueName: \"kubernetes.io/projected/6d84ce93-6f0f-4248-a431-dd1692ff2ba8-kube-api-access-55zcd\") pod \"98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8q9msf\" (UID: \"6d84ce93-6f0f-4248-a431-dd1692ff2ba8\") " pod="openshift-marketplace/98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8q9msf" Dec 11 08:30:50 crc kubenswrapper[4881]: I1211 08:30:50.732197 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8q9msf" Dec 11 08:30:51 crc kubenswrapper[4881]: I1211 08:30:51.410226 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8q9msf"] Dec 11 08:30:51 crc kubenswrapper[4881]: W1211 08:30:51.434195 4881 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6d84ce93_6f0f_4248_a431_dd1692ff2ba8.slice/crio-ea6839fbedff8d9ef3dc52dbbad782d4e886830df55b4769140f551085339cee WatchSource:0}: Error finding container ea6839fbedff8d9ef3dc52dbbad782d4e886830df55b4769140f551085339cee: Status 404 returned error can't find the container with id ea6839fbedff8d9ef3dc52dbbad782d4e886830df55b4769140f551085339cee Dec 11 08:30:51 crc kubenswrapper[4881]: I1211 08:30:51.981973 4881 generic.go:334] "Generic (PLEG): container finished" podID="6d84ce93-6f0f-4248-a431-dd1692ff2ba8" containerID="d869d47663284c848f33e8d2ff970912b6d4a221d0554378e17d57917041012b" exitCode=0 Dec 11 08:30:51 crc kubenswrapper[4881]: I1211 08:30:51.982299 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8q9msf" event={"ID":"6d84ce93-6f0f-4248-a431-dd1692ff2ba8","Type":"ContainerDied","Data":"d869d47663284c848f33e8d2ff970912b6d4a221d0554378e17d57917041012b"} Dec 11 08:30:51 crc kubenswrapper[4881]: I1211 08:30:51.982509 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8q9msf" event={"ID":"6d84ce93-6f0f-4248-a431-dd1692ff2ba8","Type":"ContainerStarted","Data":"ea6839fbedff8d9ef3dc52dbbad782d4e886830df55b4769140f551085339cee"} Dec 11 08:30:55 crc kubenswrapper[4881]: I1211 08:30:55.017670 4881 generic.go:334] "Generic (PLEG): container finished" podID="6d84ce93-6f0f-4248-a431-dd1692ff2ba8" containerID="92dc47db95c5b5afccc6199b974e6579dae4b5a040630e647d0470eaba8e265f" exitCode=0 Dec 11 08:30:55 crc kubenswrapper[4881]: I1211 08:30:55.019951 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8q9msf" event={"ID":"6d84ce93-6f0f-4248-a431-dd1692ff2ba8","Type":"ContainerDied","Data":"92dc47db95c5b5afccc6199b974e6579dae4b5a040630e647d0470eaba8e265f"} Dec 11 08:30:56 crc kubenswrapper[4881]: I1211 08:30:56.027450 4881 generic.go:334] "Generic (PLEG): container finished" podID="6d84ce93-6f0f-4248-a431-dd1692ff2ba8" containerID="53c8efb03ea97aba0d32799facda2d2fc47f20f8440563c8e9d990532420d4b7" exitCode=0 Dec 11 08:30:56 crc kubenswrapper[4881]: I1211 08:30:56.027550 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8q9msf" event={"ID":"6d84ce93-6f0f-4248-a431-dd1692ff2ba8","Type":"ContainerDied","Data":"53c8efb03ea97aba0d32799facda2d2fc47f20f8440563c8e9d990532420d4b7"} Dec 11 08:30:56 crc kubenswrapper[4881]: I1211 08:30:56.352060 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-jtkpv" Dec 11 08:30:56 crc kubenswrapper[4881]: I1211 08:30:56.399053 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-jtkpv" Dec 11 08:30:56 crc kubenswrapper[4881]: I1211 08:30:56.748346 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-jtkpv"] Dec 11 08:30:57 crc kubenswrapper[4881]: I1211 08:30:57.358705 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8q9msf" Dec 11 08:30:57 crc kubenswrapper[4881]: I1211 08:30:57.491673 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/6d84ce93-6f0f-4248-a431-dd1692ff2ba8-bundle\") pod \"6d84ce93-6f0f-4248-a431-dd1692ff2ba8\" (UID: \"6d84ce93-6f0f-4248-a431-dd1692ff2ba8\") " Dec 11 08:30:57 crc kubenswrapper[4881]: I1211 08:30:57.492104 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/6d84ce93-6f0f-4248-a431-dd1692ff2ba8-util\") pod \"6d84ce93-6f0f-4248-a431-dd1692ff2ba8\" (UID: \"6d84ce93-6f0f-4248-a431-dd1692ff2ba8\") " Dec 11 08:30:57 crc kubenswrapper[4881]: I1211 08:30:57.492272 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-55zcd\" (UniqueName: \"kubernetes.io/projected/6d84ce93-6f0f-4248-a431-dd1692ff2ba8-kube-api-access-55zcd\") pod \"6d84ce93-6f0f-4248-a431-dd1692ff2ba8\" (UID: \"6d84ce93-6f0f-4248-a431-dd1692ff2ba8\") " Dec 11 08:30:57 crc kubenswrapper[4881]: I1211 08:30:57.492623 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6d84ce93-6f0f-4248-a431-dd1692ff2ba8-bundle" (OuterVolumeSpecName: "bundle") pod "6d84ce93-6f0f-4248-a431-dd1692ff2ba8" (UID: "6d84ce93-6f0f-4248-a431-dd1692ff2ba8"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 08:30:57 crc kubenswrapper[4881]: I1211 08:30:57.492915 4881 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/6d84ce93-6f0f-4248-a431-dd1692ff2ba8-bundle\") on node \"crc\" DevicePath \"\"" Dec 11 08:30:57 crc kubenswrapper[4881]: I1211 08:30:57.502602 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6d84ce93-6f0f-4248-a431-dd1692ff2ba8-util" (OuterVolumeSpecName: "util") pod "6d84ce93-6f0f-4248-a431-dd1692ff2ba8" (UID: "6d84ce93-6f0f-4248-a431-dd1692ff2ba8"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 08:30:57 crc kubenswrapper[4881]: I1211 08:30:57.502926 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6d84ce93-6f0f-4248-a431-dd1692ff2ba8-kube-api-access-55zcd" (OuterVolumeSpecName: "kube-api-access-55zcd") pod "6d84ce93-6f0f-4248-a431-dd1692ff2ba8" (UID: "6d84ce93-6f0f-4248-a431-dd1692ff2ba8"). InnerVolumeSpecName "kube-api-access-55zcd". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:30:57 crc kubenswrapper[4881]: I1211 08:30:57.594467 4881 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/6d84ce93-6f0f-4248-a431-dd1692ff2ba8-util\") on node \"crc\" DevicePath \"\"" Dec 11 08:30:57 crc kubenswrapper[4881]: I1211 08:30:57.594533 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-55zcd\" (UniqueName: \"kubernetes.io/projected/6d84ce93-6f0f-4248-a431-dd1692ff2ba8-kube-api-access-55zcd\") on node \"crc\" DevicePath \"\"" Dec 11 08:30:58 crc kubenswrapper[4881]: I1211 08:30:58.047180 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8q9msf" event={"ID":"6d84ce93-6f0f-4248-a431-dd1692ff2ba8","Type":"ContainerDied","Data":"ea6839fbedff8d9ef3dc52dbbad782d4e886830df55b4769140f551085339cee"} Dec 11 08:30:58 crc kubenswrapper[4881]: I1211 08:30:58.047223 4881 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ea6839fbedff8d9ef3dc52dbbad782d4e886830df55b4769140f551085339cee" Dec 11 08:30:58 crc kubenswrapper[4881]: I1211 08:30:58.047243 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8q9msf" Dec 11 08:30:58 crc kubenswrapper[4881]: I1211 08:30:58.047318 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-jtkpv" podUID="677c7915-14eb-4f19-adb1-6cc04c415161" containerName="registry-server" containerID="cri-o://276e84b5af858569a56f9e3db5279bbce95d7a4a93cc627c288079af479aede9" gracePeriod=2 Dec 11 08:30:59 crc kubenswrapper[4881]: I1211 08:30:59.056926 4881 generic.go:334] "Generic (PLEG): container finished" podID="677c7915-14eb-4f19-adb1-6cc04c415161" containerID="276e84b5af858569a56f9e3db5279bbce95d7a4a93cc627c288079af479aede9" exitCode=0 Dec 11 08:30:59 crc kubenswrapper[4881]: I1211 08:30:59.056975 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-jtkpv" event={"ID":"677c7915-14eb-4f19-adb1-6cc04c415161","Type":"ContainerDied","Data":"276e84b5af858569a56f9e3db5279bbce95d7a4a93cc627c288079af479aede9"} Dec 11 08:30:59 crc kubenswrapper[4881]: I1211 08:30:59.619510 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-jtkpv" Dec 11 08:30:59 crc kubenswrapper[4881]: I1211 08:30:59.730523 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d2bq2\" (UniqueName: \"kubernetes.io/projected/677c7915-14eb-4f19-adb1-6cc04c415161-kube-api-access-d2bq2\") pod \"677c7915-14eb-4f19-adb1-6cc04c415161\" (UID: \"677c7915-14eb-4f19-adb1-6cc04c415161\") " Dec 11 08:30:59 crc kubenswrapper[4881]: I1211 08:30:59.730668 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/677c7915-14eb-4f19-adb1-6cc04c415161-catalog-content\") pod \"677c7915-14eb-4f19-adb1-6cc04c415161\" (UID: \"677c7915-14eb-4f19-adb1-6cc04c415161\") " Dec 11 08:30:59 crc kubenswrapper[4881]: I1211 08:30:59.730700 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/677c7915-14eb-4f19-adb1-6cc04c415161-utilities\") pod \"677c7915-14eb-4f19-adb1-6cc04c415161\" (UID: \"677c7915-14eb-4f19-adb1-6cc04c415161\") " Dec 11 08:30:59 crc kubenswrapper[4881]: I1211 08:30:59.731976 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/677c7915-14eb-4f19-adb1-6cc04c415161-utilities" (OuterVolumeSpecName: "utilities") pod "677c7915-14eb-4f19-adb1-6cc04c415161" (UID: "677c7915-14eb-4f19-adb1-6cc04c415161"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 08:30:59 crc kubenswrapper[4881]: I1211 08:30:59.736410 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/677c7915-14eb-4f19-adb1-6cc04c415161-kube-api-access-d2bq2" (OuterVolumeSpecName: "kube-api-access-d2bq2") pod "677c7915-14eb-4f19-adb1-6cc04c415161" (UID: "677c7915-14eb-4f19-adb1-6cc04c415161"). InnerVolumeSpecName "kube-api-access-d2bq2". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:30:59 crc kubenswrapper[4881]: I1211 08:30:59.832689 4881 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/677c7915-14eb-4f19-adb1-6cc04c415161-utilities\") on node \"crc\" DevicePath \"\"" Dec 11 08:30:59 crc kubenswrapper[4881]: I1211 08:30:59.832736 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d2bq2\" (UniqueName: \"kubernetes.io/projected/677c7915-14eb-4f19-adb1-6cc04c415161-kube-api-access-d2bq2\") on node \"crc\" DevicePath \"\"" Dec 11 08:30:59 crc kubenswrapper[4881]: I1211 08:30:59.846374 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/677c7915-14eb-4f19-adb1-6cc04c415161-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "677c7915-14eb-4f19-adb1-6cc04c415161" (UID: "677c7915-14eb-4f19-adb1-6cc04c415161"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 08:30:59 crc kubenswrapper[4881]: I1211 08:30:59.935094 4881 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/677c7915-14eb-4f19-adb1-6cc04c415161-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 11 08:31:00 crc kubenswrapper[4881]: I1211 08:31:00.066950 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-jtkpv" event={"ID":"677c7915-14eb-4f19-adb1-6cc04c415161","Type":"ContainerDied","Data":"a253fd7e36ca45d4886414875e6297d1f9ecdf215834fa86665093898eba7b8b"} Dec 11 08:31:00 crc kubenswrapper[4881]: I1211 08:31:00.067008 4881 scope.go:117] "RemoveContainer" containerID="276e84b5af858569a56f9e3db5279bbce95d7a4a93cc627c288079af479aede9" Dec 11 08:31:00 crc kubenswrapper[4881]: I1211 08:31:00.068035 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-jtkpv" Dec 11 08:31:00 crc kubenswrapper[4881]: I1211 08:31:00.085783 4881 scope.go:117] "RemoveContainer" containerID="6847691d313273ac38428914d8d35d857a7e1aefb2273596e4848e51348efdb9" Dec 11 08:31:00 crc kubenswrapper[4881]: I1211 08:31:00.104643 4881 scope.go:117] "RemoveContainer" containerID="1243c09fd069888f91da306add24aaec13c8fc7d4cb3cf19c8def68f99406992" Dec 11 08:31:00 crc kubenswrapper[4881]: I1211 08:31:00.104784 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-jtkpv"] Dec 11 08:31:00 crc kubenswrapper[4881]: I1211 08:31:00.112639 4881 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-jtkpv"] Dec 11 08:31:01 crc kubenswrapper[4881]: I1211 08:31:01.013725 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="677c7915-14eb-4f19-adb1-6cc04c415161" path="/var/lib/kubelet/pods/677c7915-14eb-4f19-adb1-6cc04c415161/volumes" Dec 11 08:31:03 crc kubenswrapper[4881]: I1211 08:31:03.027030 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-operator-6769fb99d-g9bf9"] Dec 11 08:31:03 crc kubenswrapper[4881]: E1211 08:31:03.027643 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="677c7915-14eb-4f19-adb1-6cc04c415161" containerName="extract-utilities" Dec 11 08:31:03 crc kubenswrapper[4881]: I1211 08:31:03.027665 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="677c7915-14eb-4f19-adb1-6cc04c415161" containerName="extract-utilities" Dec 11 08:31:03 crc kubenswrapper[4881]: E1211 08:31:03.027683 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6d84ce93-6f0f-4248-a431-dd1692ff2ba8" containerName="extract" Dec 11 08:31:03 crc kubenswrapper[4881]: I1211 08:31:03.027690 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="6d84ce93-6f0f-4248-a431-dd1692ff2ba8" containerName="extract" Dec 11 08:31:03 crc kubenswrapper[4881]: E1211 08:31:03.027705 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6d84ce93-6f0f-4248-a431-dd1692ff2ba8" containerName="util" Dec 11 08:31:03 crc kubenswrapper[4881]: I1211 08:31:03.027719 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="6d84ce93-6f0f-4248-a431-dd1692ff2ba8" containerName="util" Dec 11 08:31:03 crc kubenswrapper[4881]: E1211 08:31:03.027726 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6d84ce93-6f0f-4248-a431-dd1692ff2ba8" containerName="pull" Dec 11 08:31:03 crc kubenswrapper[4881]: I1211 08:31:03.027733 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="6d84ce93-6f0f-4248-a431-dd1692ff2ba8" containerName="pull" Dec 11 08:31:03 crc kubenswrapper[4881]: E1211 08:31:03.027746 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="677c7915-14eb-4f19-adb1-6cc04c415161" containerName="extract-content" Dec 11 08:31:03 crc kubenswrapper[4881]: I1211 08:31:03.027752 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="677c7915-14eb-4f19-adb1-6cc04c415161" containerName="extract-content" Dec 11 08:31:03 crc kubenswrapper[4881]: E1211 08:31:03.027768 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="677c7915-14eb-4f19-adb1-6cc04c415161" containerName="registry-server" Dec 11 08:31:03 crc kubenswrapper[4881]: I1211 08:31:03.027774 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="677c7915-14eb-4f19-adb1-6cc04c415161" containerName="registry-server" Dec 11 08:31:03 crc kubenswrapper[4881]: I1211 08:31:03.027969 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="677c7915-14eb-4f19-adb1-6cc04c415161" containerName="registry-server" Dec 11 08:31:03 crc kubenswrapper[4881]: I1211 08:31:03.027991 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="6d84ce93-6f0f-4248-a431-dd1692ff2ba8" containerName="extract" Dec 11 08:31:03 crc kubenswrapper[4881]: I1211 08:31:03.028536 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-operator-6769fb99d-g9bf9" Dec 11 08:31:03 crc kubenswrapper[4881]: I1211 08:31:03.032073 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"kube-root-ca.crt" Dec 11 08:31:03 crc kubenswrapper[4881]: I1211 08:31:03.032401 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"openshift-service-ca.crt" Dec 11 08:31:03 crc kubenswrapper[4881]: I1211 08:31:03.033017 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"nmstate-operator-dockercfg-9zh8v" Dec 11 08:31:03 crc kubenswrapper[4881]: I1211 08:31:03.047298 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-operator-6769fb99d-g9bf9"] Dec 11 08:31:03 crc kubenswrapper[4881]: I1211 08:31:03.182882 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fsw5p\" (UniqueName: \"kubernetes.io/projected/ae9ad369-6e2e-4c6c-a12a-cf228edaa48c-kube-api-access-fsw5p\") pod \"nmstate-operator-6769fb99d-g9bf9\" (UID: \"ae9ad369-6e2e-4c6c-a12a-cf228edaa48c\") " pod="openshift-nmstate/nmstate-operator-6769fb99d-g9bf9" Dec 11 08:31:03 crc kubenswrapper[4881]: I1211 08:31:03.284557 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fsw5p\" (UniqueName: \"kubernetes.io/projected/ae9ad369-6e2e-4c6c-a12a-cf228edaa48c-kube-api-access-fsw5p\") pod \"nmstate-operator-6769fb99d-g9bf9\" (UID: \"ae9ad369-6e2e-4c6c-a12a-cf228edaa48c\") " pod="openshift-nmstate/nmstate-operator-6769fb99d-g9bf9" Dec 11 08:31:03 crc kubenswrapper[4881]: I1211 08:31:03.315283 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fsw5p\" (UniqueName: \"kubernetes.io/projected/ae9ad369-6e2e-4c6c-a12a-cf228edaa48c-kube-api-access-fsw5p\") pod \"nmstate-operator-6769fb99d-g9bf9\" (UID: \"ae9ad369-6e2e-4c6c-a12a-cf228edaa48c\") " pod="openshift-nmstate/nmstate-operator-6769fb99d-g9bf9" Dec 11 08:31:03 crc kubenswrapper[4881]: I1211 08:31:03.371680 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-operator-6769fb99d-g9bf9" Dec 11 08:31:03 crc kubenswrapper[4881]: I1211 08:31:03.870259 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-operator-6769fb99d-g9bf9"] Dec 11 08:31:03 crc kubenswrapper[4881]: I1211 08:31:03.883354 4881 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Dec 11 08:31:04 crc kubenswrapper[4881]: I1211 08:31:04.118916 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-operator-6769fb99d-g9bf9" event={"ID":"ae9ad369-6e2e-4c6c-a12a-cf228edaa48c","Type":"ContainerStarted","Data":"de9123f178fdf192ba8ee57512852e02cfa0139b2b4a2d12710808c5845281df"} Dec 11 08:31:10 crc kubenswrapper[4881]: I1211 08:31:10.177987 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-operator-6769fb99d-g9bf9" event={"ID":"ae9ad369-6e2e-4c6c-a12a-cf228edaa48c","Type":"ContainerStarted","Data":"edc7148cb4fce9f4560341a826049bbca7eff4f924813cb310a6a935c4bbe29f"} Dec 11 08:31:10 crc kubenswrapper[4881]: I1211 08:31:10.203635 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-operator-6769fb99d-g9bf9" podStartSLOduration=1.659028051 podStartE2EDuration="7.20361795s" podCreationTimestamp="2025-12-11 08:31:03 +0000 UTC" firstStartedPulling="2025-12-11 08:31:03.883003136 +0000 UTC m=+912.260371843" lastFinishedPulling="2025-12-11 08:31:09.427593045 +0000 UTC m=+917.804961742" observedRunningTime="2025-12-11 08:31:10.201236511 +0000 UTC m=+918.578605208" watchObservedRunningTime="2025-12-11 08:31:10.20361795 +0000 UTC m=+918.580986647" Dec 11 08:31:11 crc kubenswrapper[4881]: I1211 08:31:11.539196 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-metrics-7f7f7578db-m5zx5"] Dec 11 08:31:11 crc kubenswrapper[4881]: I1211 08:31:11.541034 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-metrics-7f7f7578db-m5zx5" Dec 11 08:31:11 crc kubenswrapper[4881]: I1211 08:31:11.543877 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"nmstate-handler-dockercfg-5dnc6" Dec 11 08:31:11 crc kubenswrapper[4881]: I1211 08:31:11.551270 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-metrics-7f7f7578db-m5zx5"] Dec 11 08:31:11 crc kubenswrapper[4881]: I1211 08:31:11.561032 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-webhook-f8fb84555-mfvb7"] Dec 11 08:31:11 crc kubenswrapper[4881]: I1211 08:31:11.562284 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-webhook-f8fb84555-mfvb7" Dec 11 08:31:11 crc kubenswrapper[4881]: I1211 08:31:11.568966 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"openshift-nmstate-webhook" Dec 11 08:31:11 crc kubenswrapper[4881]: I1211 08:31:11.587310 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-handler-qx6gc"] Dec 11 08:31:11 crc kubenswrapper[4881]: I1211 08:31:11.588559 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-handler-qx6gc" Dec 11 08:31:11 crc kubenswrapper[4881]: I1211 08:31:11.608302 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-webhook-f8fb84555-mfvb7"] Dec 11 08:31:11 crc kubenswrapper[4881]: I1211 08:31:11.618577 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/dbbccd2c-ccc0-4501-b4b4-b85621051f5f-tls-key-pair\") pod \"nmstate-webhook-f8fb84555-mfvb7\" (UID: \"dbbccd2c-ccc0-4501-b4b4-b85621051f5f\") " pod="openshift-nmstate/nmstate-webhook-f8fb84555-mfvb7" Dec 11 08:31:11 crc kubenswrapper[4881]: I1211 08:31:11.618657 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8kdvb\" (UniqueName: \"kubernetes.io/projected/dbbccd2c-ccc0-4501-b4b4-b85621051f5f-kube-api-access-8kdvb\") pod \"nmstate-webhook-f8fb84555-mfvb7\" (UID: \"dbbccd2c-ccc0-4501-b4b4-b85621051f5f\") " pod="openshift-nmstate/nmstate-webhook-f8fb84555-mfvb7" Dec 11 08:31:11 crc kubenswrapper[4881]: I1211 08:31:11.618702 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mdqmx\" (UniqueName: \"kubernetes.io/projected/74e6048b-0d2d-418c-907c-5858077de213-kube-api-access-mdqmx\") pod \"nmstate-metrics-7f7f7578db-m5zx5\" (UID: \"74e6048b-0d2d-418c-907c-5858077de213\") " pod="openshift-nmstate/nmstate-metrics-7f7f7578db-m5zx5" Dec 11 08:31:11 crc kubenswrapper[4881]: I1211 08:31:11.720176 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mdqmx\" (UniqueName: \"kubernetes.io/projected/74e6048b-0d2d-418c-907c-5858077de213-kube-api-access-mdqmx\") pod \"nmstate-metrics-7f7f7578db-m5zx5\" (UID: \"74e6048b-0d2d-418c-907c-5858077de213\") " pod="openshift-nmstate/nmstate-metrics-7f7f7578db-m5zx5" Dec 11 08:31:11 crc kubenswrapper[4881]: I1211 08:31:11.720258 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/3ef011ad-0329-41b6-89d0-bbe3c976576b-ovs-socket\") pod \"nmstate-handler-qx6gc\" (UID: \"3ef011ad-0329-41b6-89d0-bbe3c976576b\") " pod="openshift-nmstate/nmstate-handler-qx6gc" Dec 11 08:31:11 crc kubenswrapper[4881]: I1211 08:31:11.720294 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/3ef011ad-0329-41b6-89d0-bbe3c976576b-dbus-socket\") pod \"nmstate-handler-qx6gc\" (UID: \"3ef011ad-0329-41b6-89d0-bbe3c976576b\") " pod="openshift-nmstate/nmstate-handler-qx6gc" Dec 11 08:31:11 crc kubenswrapper[4881]: I1211 08:31:11.720415 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-phj74\" (UniqueName: \"kubernetes.io/projected/3ef011ad-0329-41b6-89d0-bbe3c976576b-kube-api-access-phj74\") pod \"nmstate-handler-qx6gc\" (UID: \"3ef011ad-0329-41b6-89d0-bbe3c976576b\") " pod="openshift-nmstate/nmstate-handler-qx6gc" Dec 11 08:31:11 crc kubenswrapper[4881]: I1211 08:31:11.720444 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/dbbccd2c-ccc0-4501-b4b4-b85621051f5f-tls-key-pair\") pod \"nmstate-webhook-f8fb84555-mfvb7\" (UID: \"dbbccd2c-ccc0-4501-b4b4-b85621051f5f\") " pod="openshift-nmstate/nmstate-webhook-f8fb84555-mfvb7" Dec 11 08:31:11 crc kubenswrapper[4881]: I1211 08:31:11.720470 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8kdvb\" (UniqueName: \"kubernetes.io/projected/dbbccd2c-ccc0-4501-b4b4-b85621051f5f-kube-api-access-8kdvb\") pod \"nmstate-webhook-f8fb84555-mfvb7\" (UID: \"dbbccd2c-ccc0-4501-b4b4-b85621051f5f\") " pod="openshift-nmstate/nmstate-webhook-f8fb84555-mfvb7" Dec 11 08:31:11 crc kubenswrapper[4881]: I1211 08:31:11.720484 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/3ef011ad-0329-41b6-89d0-bbe3c976576b-nmstate-lock\") pod \"nmstate-handler-qx6gc\" (UID: \"3ef011ad-0329-41b6-89d0-bbe3c976576b\") " pod="openshift-nmstate/nmstate-handler-qx6gc" Dec 11 08:31:11 crc kubenswrapper[4881]: I1211 08:31:11.740708 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/dbbccd2c-ccc0-4501-b4b4-b85621051f5f-tls-key-pair\") pod \"nmstate-webhook-f8fb84555-mfvb7\" (UID: \"dbbccd2c-ccc0-4501-b4b4-b85621051f5f\") " pod="openshift-nmstate/nmstate-webhook-f8fb84555-mfvb7" Dec 11 08:31:11 crc kubenswrapper[4881]: I1211 08:31:11.745957 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-console-plugin-6ff7998486-nj8ls"] Dec 11 08:31:11 crc kubenswrapper[4881]: I1211 08:31:11.748759 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8kdvb\" (UniqueName: \"kubernetes.io/projected/dbbccd2c-ccc0-4501-b4b4-b85621051f5f-kube-api-access-8kdvb\") pod \"nmstate-webhook-f8fb84555-mfvb7\" (UID: \"dbbccd2c-ccc0-4501-b4b4-b85621051f5f\") " pod="openshift-nmstate/nmstate-webhook-f8fb84555-mfvb7" Dec 11 08:31:11 crc kubenswrapper[4881]: I1211 08:31:11.750098 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-console-plugin-6ff7998486-nj8ls" Dec 11 08:31:11 crc kubenswrapper[4881]: I1211 08:31:11.752985 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"plugin-serving-cert" Dec 11 08:31:11 crc kubenswrapper[4881]: I1211 08:31:11.753133 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"default-dockercfg-zpk5t" Dec 11 08:31:11 crc kubenswrapper[4881]: I1211 08:31:11.753244 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"nginx-conf" Dec 11 08:31:11 crc kubenswrapper[4881]: I1211 08:31:11.766353 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mdqmx\" (UniqueName: \"kubernetes.io/projected/74e6048b-0d2d-418c-907c-5858077de213-kube-api-access-mdqmx\") pod \"nmstate-metrics-7f7f7578db-m5zx5\" (UID: \"74e6048b-0d2d-418c-907c-5858077de213\") " pod="openshift-nmstate/nmstate-metrics-7f7f7578db-m5zx5" Dec 11 08:31:11 crc kubenswrapper[4881]: I1211 08:31:11.774806 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-console-plugin-6ff7998486-nj8ls"] Dec 11 08:31:11 crc kubenswrapper[4881]: I1211 08:31:11.822897 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/3ef011ad-0329-41b6-89d0-bbe3c976576b-ovs-socket\") pod \"nmstate-handler-qx6gc\" (UID: \"3ef011ad-0329-41b6-89d0-bbe3c976576b\") " pod="openshift-nmstate/nmstate-handler-qx6gc" Dec 11 08:31:11 crc kubenswrapper[4881]: I1211 08:31:11.822765 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/3ef011ad-0329-41b6-89d0-bbe3c976576b-ovs-socket\") pod \"nmstate-handler-qx6gc\" (UID: \"3ef011ad-0329-41b6-89d0-bbe3c976576b\") " pod="openshift-nmstate/nmstate-handler-qx6gc" Dec 11 08:31:11 crc kubenswrapper[4881]: I1211 08:31:11.826730 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/3ef011ad-0329-41b6-89d0-bbe3c976576b-dbus-socket\") pod \"nmstate-handler-qx6gc\" (UID: \"3ef011ad-0329-41b6-89d0-bbe3c976576b\") " pod="openshift-nmstate/nmstate-handler-qx6gc" Dec 11 08:31:11 crc kubenswrapper[4881]: I1211 08:31:11.826805 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q2rvg\" (UniqueName: \"kubernetes.io/projected/809482cd-c05d-41df-96db-84149e666743-kube-api-access-q2rvg\") pod \"nmstate-console-plugin-6ff7998486-nj8ls\" (UID: \"809482cd-c05d-41df-96db-84149e666743\") " pod="openshift-nmstate/nmstate-console-plugin-6ff7998486-nj8ls" Dec 11 08:31:11 crc kubenswrapper[4881]: I1211 08:31:11.826848 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/809482cd-c05d-41df-96db-84149e666743-nginx-conf\") pod \"nmstate-console-plugin-6ff7998486-nj8ls\" (UID: \"809482cd-c05d-41df-96db-84149e666743\") " pod="openshift-nmstate/nmstate-console-plugin-6ff7998486-nj8ls" Dec 11 08:31:11 crc kubenswrapper[4881]: I1211 08:31:11.826873 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/809482cd-c05d-41df-96db-84149e666743-plugin-serving-cert\") pod \"nmstate-console-plugin-6ff7998486-nj8ls\" (UID: \"809482cd-c05d-41df-96db-84149e666743\") " pod="openshift-nmstate/nmstate-console-plugin-6ff7998486-nj8ls" Dec 11 08:31:11 crc kubenswrapper[4881]: I1211 08:31:11.827611 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/3ef011ad-0329-41b6-89d0-bbe3c976576b-dbus-socket\") pod \"nmstate-handler-qx6gc\" (UID: \"3ef011ad-0329-41b6-89d0-bbe3c976576b\") " pod="openshift-nmstate/nmstate-handler-qx6gc" Dec 11 08:31:11 crc kubenswrapper[4881]: I1211 08:31:11.828077 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-phj74\" (UniqueName: \"kubernetes.io/projected/3ef011ad-0329-41b6-89d0-bbe3c976576b-kube-api-access-phj74\") pod \"nmstate-handler-qx6gc\" (UID: \"3ef011ad-0329-41b6-89d0-bbe3c976576b\") " pod="openshift-nmstate/nmstate-handler-qx6gc" Dec 11 08:31:11 crc kubenswrapper[4881]: I1211 08:31:11.828178 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/3ef011ad-0329-41b6-89d0-bbe3c976576b-nmstate-lock\") pod \"nmstate-handler-qx6gc\" (UID: \"3ef011ad-0329-41b6-89d0-bbe3c976576b\") " pod="openshift-nmstate/nmstate-handler-qx6gc" Dec 11 08:31:11 crc kubenswrapper[4881]: I1211 08:31:11.828323 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/3ef011ad-0329-41b6-89d0-bbe3c976576b-nmstate-lock\") pod \"nmstate-handler-qx6gc\" (UID: \"3ef011ad-0329-41b6-89d0-bbe3c976576b\") " pod="openshift-nmstate/nmstate-handler-qx6gc" Dec 11 08:31:11 crc kubenswrapper[4881]: I1211 08:31:11.849616 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-phj74\" (UniqueName: \"kubernetes.io/projected/3ef011ad-0329-41b6-89d0-bbe3c976576b-kube-api-access-phj74\") pod \"nmstate-handler-qx6gc\" (UID: \"3ef011ad-0329-41b6-89d0-bbe3c976576b\") " pod="openshift-nmstate/nmstate-handler-qx6gc" Dec 11 08:31:11 crc kubenswrapper[4881]: I1211 08:31:11.865737 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-metrics-7f7f7578db-m5zx5" Dec 11 08:31:11 crc kubenswrapper[4881]: I1211 08:31:11.879849 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-webhook-f8fb84555-mfvb7" Dec 11 08:31:11 crc kubenswrapper[4881]: I1211 08:31:11.911728 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-handler-qx6gc" Dec 11 08:31:11 crc kubenswrapper[4881]: I1211 08:31:11.930006 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q2rvg\" (UniqueName: \"kubernetes.io/projected/809482cd-c05d-41df-96db-84149e666743-kube-api-access-q2rvg\") pod \"nmstate-console-plugin-6ff7998486-nj8ls\" (UID: \"809482cd-c05d-41df-96db-84149e666743\") " pod="openshift-nmstate/nmstate-console-plugin-6ff7998486-nj8ls" Dec 11 08:31:11 crc kubenswrapper[4881]: I1211 08:31:11.930054 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/809482cd-c05d-41df-96db-84149e666743-plugin-serving-cert\") pod \"nmstate-console-plugin-6ff7998486-nj8ls\" (UID: \"809482cd-c05d-41df-96db-84149e666743\") " pod="openshift-nmstate/nmstate-console-plugin-6ff7998486-nj8ls" Dec 11 08:31:11 crc kubenswrapper[4881]: I1211 08:31:11.930076 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/809482cd-c05d-41df-96db-84149e666743-nginx-conf\") pod \"nmstate-console-plugin-6ff7998486-nj8ls\" (UID: \"809482cd-c05d-41df-96db-84149e666743\") " pod="openshift-nmstate/nmstate-console-plugin-6ff7998486-nj8ls" Dec 11 08:31:11 crc kubenswrapper[4881]: E1211 08:31:11.930754 4881 secret.go:188] Couldn't get secret openshift-nmstate/plugin-serving-cert: secret "plugin-serving-cert" not found Dec 11 08:31:11 crc kubenswrapper[4881]: E1211 08:31:11.930886 4881 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/809482cd-c05d-41df-96db-84149e666743-plugin-serving-cert podName:809482cd-c05d-41df-96db-84149e666743 nodeName:}" failed. No retries permitted until 2025-12-11 08:31:12.430826212 +0000 UTC m=+920.808194899 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "plugin-serving-cert" (UniqueName: "kubernetes.io/secret/809482cd-c05d-41df-96db-84149e666743-plugin-serving-cert") pod "nmstate-console-plugin-6ff7998486-nj8ls" (UID: "809482cd-c05d-41df-96db-84149e666743") : secret "plugin-serving-cert" not found Dec 11 08:31:11 crc kubenswrapper[4881]: I1211 08:31:11.931125 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/809482cd-c05d-41df-96db-84149e666743-nginx-conf\") pod \"nmstate-console-plugin-6ff7998486-nj8ls\" (UID: \"809482cd-c05d-41df-96db-84149e666743\") " pod="openshift-nmstate/nmstate-console-plugin-6ff7998486-nj8ls" Dec 11 08:31:11 crc kubenswrapper[4881]: I1211 08:31:11.959103 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q2rvg\" (UniqueName: \"kubernetes.io/projected/809482cd-c05d-41df-96db-84149e666743-kube-api-access-q2rvg\") pod \"nmstate-console-plugin-6ff7998486-nj8ls\" (UID: \"809482cd-c05d-41df-96db-84149e666743\") " pod="openshift-nmstate/nmstate-console-plugin-6ff7998486-nj8ls" Dec 11 08:31:12 crc kubenswrapper[4881]: I1211 08:31:12.018064 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-69ddbf8769-4wnph"] Dec 11 08:31:12 crc kubenswrapper[4881]: I1211 08:31:12.018898 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-69ddbf8769-4wnph" Dec 11 08:31:12 crc kubenswrapper[4881]: I1211 08:31:12.065186 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-69ddbf8769-4wnph"] Dec 11 08:31:12 crc kubenswrapper[4881]: I1211 08:31:12.133354 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/05fbeafe-b228-42a0-8cfa-dc070853c0d7-console-config\") pod \"console-69ddbf8769-4wnph\" (UID: \"05fbeafe-b228-42a0-8cfa-dc070853c0d7\") " pod="openshift-console/console-69ddbf8769-4wnph" Dec 11 08:31:12 crc kubenswrapper[4881]: I1211 08:31:12.133438 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/05fbeafe-b228-42a0-8cfa-dc070853c0d7-service-ca\") pod \"console-69ddbf8769-4wnph\" (UID: \"05fbeafe-b228-42a0-8cfa-dc070853c0d7\") " pod="openshift-console/console-69ddbf8769-4wnph" Dec 11 08:31:12 crc kubenswrapper[4881]: I1211 08:31:12.133464 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/05fbeafe-b228-42a0-8cfa-dc070853c0d7-console-serving-cert\") pod \"console-69ddbf8769-4wnph\" (UID: \"05fbeafe-b228-42a0-8cfa-dc070853c0d7\") " pod="openshift-console/console-69ddbf8769-4wnph" Dec 11 08:31:12 crc kubenswrapper[4881]: I1211 08:31:12.133502 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/05fbeafe-b228-42a0-8cfa-dc070853c0d7-oauth-serving-cert\") pod \"console-69ddbf8769-4wnph\" (UID: \"05fbeafe-b228-42a0-8cfa-dc070853c0d7\") " pod="openshift-console/console-69ddbf8769-4wnph" Dec 11 08:31:12 crc kubenswrapper[4881]: I1211 08:31:12.133520 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-slhgb\" (UniqueName: \"kubernetes.io/projected/05fbeafe-b228-42a0-8cfa-dc070853c0d7-kube-api-access-slhgb\") pod \"console-69ddbf8769-4wnph\" (UID: \"05fbeafe-b228-42a0-8cfa-dc070853c0d7\") " pod="openshift-console/console-69ddbf8769-4wnph" Dec 11 08:31:12 crc kubenswrapper[4881]: I1211 08:31:12.133552 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/05fbeafe-b228-42a0-8cfa-dc070853c0d7-trusted-ca-bundle\") pod \"console-69ddbf8769-4wnph\" (UID: \"05fbeafe-b228-42a0-8cfa-dc070853c0d7\") " pod="openshift-console/console-69ddbf8769-4wnph" Dec 11 08:31:12 crc kubenswrapper[4881]: I1211 08:31:12.133582 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/05fbeafe-b228-42a0-8cfa-dc070853c0d7-console-oauth-config\") pod \"console-69ddbf8769-4wnph\" (UID: \"05fbeafe-b228-42a0-8cfa-dc070853c0d7\") " pod="openshift-console/console-69ddbf8769-4wnph" Dec 11 08:31:12 crc kubenswrapper[4881]: I1211 08:31:12.195521 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-handler-qx6gc" event={"ID":"3ef011ad-0329-41b6-89d0-bbe3c976576b","Type":"ContainerStarted","Data":"9aedb9718756a8a3168b3ef4027ff995ca6dd0b896dd06029045a22b2f17ad02"} Dec 11 08:31:12 crc kubenswrapper[4881]: I1211 08:31:12.234825 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/05fbeafe-b228-42a0-8cfa-dc070853c0d7-console-config\") pod \"console-69ddbf8769-4wnph\" (UID: \"05fbeafe-b228-42a0-8cfa-dc070853c0d7\") " pod="openshift-console/console-69ddbf8769-4wnph" Dec 11 08:31:12 crc kubenswrapper[4881]: I1211 08:31:12.234909 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/05fbeafe-b228-42a0-8cfa-dc070853c0d7-service-ca\") pod \"console-69ddbf8769-4wnph\" (UID: \"05fbeafe-b228-42a0-8cfa-dc070853c0d7\") " pod="openshift-console/console-69ddbf8769-4wnph" Dec 11 08:31:12 crc kubenswrapper[4881]: I1211 08:31:12.234955 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/05fbeafe-b228-42a0-8cfa-dc070853c0d7-console-serving-cert\") pod \"console-69ddbf8769-4wnph\" (UID: \"05fbeafe-b228-42a0-8cfa-dc070853c0d7\") " pod="openshift-console/console-69ddbf8769-4wnph" Dec 11 08:31:12 crc kubenswrapper[4881]: I1211 08:31:12.234984 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/05fbeafe-b228-42a0-8cfa-dc070853c0d7-oauth-serving-cert\") pod \"console-69ddbf8769-4wnph\" (UID: \"05fbeafe-b228-42a0-8cfa-dc070853c0d7\") " pod="openshift-console/console-69ddbf8769-4wnph" Dec 11 08:31:12 crc kubenswrapper[4881]: I1211 08:31:12.235017 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-slhgb\" (UniqueName: \"kubernetes.io/projected/05fbeafe-b228-42a0-8cfa-dc070853c0d7-kube-api-access-slhgb\") pod \"console-69ddbf8769-4wnph\" (UID: \"05fbeafe-b228-42a0-8cfa-dc070853c0d7\") " pod="openshift-console/console-69ddbf8769-4wnph" Dec 11 08:31:12 crc kubenswrapper[4881]: I1211 08:31:12.235058 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/05fbeafe-b228-42a0-8cfa-dc070853c0d7-console-oauth-config\") pod \"console-69ddbf8769-4wnph\" (UID: \"05fbeafe-b228-42a0-8cfa-dc070853c0d7\") " pod="openshift-console/console-69ddbf8769-4wnph" Dec 11 08:31:12 crc kubenswrapper[4881]: I1211 08:31:12.235078 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/05fbeafe-b228-42a0-8cfa-dc070853c0d7-trusted-ca-bundle\") pod \"console-69ddbf8769-4wnph\" (UID: \"05fbeafe-b228-42a0-8cfa-dc070853c0d7\") " pod="openshift-console/console-69ddbf8769-4wnph" Dec 11 08:31:12 crc kubenswrapper[4881]: I1211 08:31:12.235814 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/05fbeafe-b228-42a0-8cfa-dc070853c0d7-service-ca\") pod \"console-69ddbf8769-4wnph\" (UID: \"05fbeafe-b228-42a0-8cfa-dc070853c0d7\") " pod="openshift-console/console-69ddbf8769-4wnph" Dec 11 08:31:12 crc kubenswrapper[4881]: I1211 08:31:12.235828 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/05fbeafe-b228-42a0-8cfa-dc070853c0d7-console-config\") pod \"console-69ddbf8769-4wnph\" (UID: \"05fbeafe-b228-42a0-8cfa-dc070853c0d7\") " pod="openshift-console/console-69ddbf8769-4wnph" Dec 11 08:31:12 crc kubenswrapper[4881]: I1211 08:31:12.236178 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/05fbeafe-b228-42a0-8cfa-dc070853c0d7-trusted-ca-bundle\") pod \"console-69ddbf8769-4wnph\" (UID: \"05fbeafe-b228-42a0-8cfa-dc070853c0d7\") " pod="openshift-console/console-69ddbf8769-4wnph" Dec 11 08:31:12 crc kubenswrapper[4881]: I1211 08:31:12.236400 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/05fbeafe-b228-42a0-8cfa-dc070853c0d7-oauth-serving-cert\") pod \"console-69ddbf8769-4wnph\" (UID: \"05fbeafe-b228-42a0-8cfa-dc070853c0d7\") " pod="openshift-console/console-69ddbf8769-4wnph" Dec 11 08:31:12 crc kubenswrapper[4881]: I1211 08:31:12.239026 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/05fbeafe-b228-42a0-8cfa-dc070853c0d7-console-oauth-config\") pod \"console-69ddbf8769-4wnph\" (UID: \"05fbeafe-b228-42a0-8cfa-dc070853c0d7\") " pod="openshift-console/console-69ddbf8769-4wnph" Dec 11 08:31:12 crc kubenswrapper[4881]: I1211 08:31:12.239718 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/05fbeafe-b228-42a0-8cfa-dc070853c0d7-console-serving-cert\") pod \"console-69ddbf8769-4wnph\" (UID: \"05fbeafe-b228-42a0-8cfa-dc070853c0d7\") " pod="openshift-console/console-69ddbf8769-4wnph" Dec 11 08:31:12 crc kubenswrapper[4881]: I1211 08:31:12.255353 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-slhgb\" (UniqueName: \"kubernetes.io/projected/05fbeafe-b228-42a0-8cfa-dc070853c0d7-kube-api-access-slhgb\") pod \"console-69ddbf8769-4wnph\" (UID: \"05fbeafe-b228-42a0-8cfa-dc070853c0d7\") " pod="openshift-console/console-69ddbf8769-4wnph" Dec 11 08:31:12 crc kubenswrapper[4881]: I1211 08:31:12.345453 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-69ddbf8769-4wnph" Dec 11 08:31:12 crc kubenswrapper[4881]: I1211 08:31:12.440522 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/809482cd-c05d-41df-96db-84149e666743-plugin-serving-cert\") pod \"nmstate-console-plugin-6ff7998486-nj8ls\" (UID: \"809482cd-c05d-41df-96db-84149e666743\") " pod="openshift-nmstate/nmstate-console-plugin-6ff7998486-nj8ls" Dec 11 08:31:12 crc kubenswrapper[4881]: I1211 08:31:12.445033 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/809482cd-c05d-41df-96db-84149e666743-plugin-serving-cert\") pod \"nmstate-console-plugin-6ff7998486-nj8ls\" (UID: \"809482cd-c05d-41df-96db-84149e666743\") " pod="openshift-nmstate/nmstate-console-plugin-6ff7998486-nj8ls" Dec 11 08:31:12 crc kubenswrapper[4881]: I1211 08:31:12.477120 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-metrics-7f7f7578db-m5zx5"] Dec 11 08:31:12 crc kubenswrapper[4881]: I1211 08:31:12.490685 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-console-plugin-6ff7998486-nj8ls" Dec 11 08:31:12 crc kubenswrapper[4881]: I1211 08:31:12.538501 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-webhook-f8fb84555-mfvb7"] Dec 11 08:31:12 crc kubenswrapper[4881]: W1211 08:31:12.545663 4881 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poddbbccd2c_ccc0_4501_b4b4_b85621051f5f.slice/crio-a2677af6e1e3eee5814297958c6f72e0ad4cbbedf5453d441bfb78967f181a6c WatchSource:0}: Error finding container a2677af6e1e3eee5814297958c6f72e0ad4cbbedf5453d441bfb78967f181a6c: Status 404 returned error can't find the container with id a2677af6e1e3eee5814297958c6f72e0ad4cbbedf5453d441bfb78967f181a6c Dec 11 08:31:12 crc kubenswrapper[4881]: I1211 08:31:12.791675 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-69ddbf8769-4wnph"] Dec 11 08:31:12 crc kubenswrapper[4881]: W1211 08:31:12.802758 4881 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod05fbeafe_b228_42a0_8cfa_dc070853c0d7.slice/crio-6cac496caddbe4caba5d4512edc18c308c126297080e4e7856fda80a28de0d48 WatchSource:0}: Error finding container 6cac496caddbe4caba5d4512edc18c308c126297080e4e7856fda80a28de0d48: Status 404 returned error can't find the container with id 6cac496caddbe4caba5d4512edc18c308c126297080e4e7856fda80a28de0d48 Dec 11 08:31:12 crc kubenswrapper[4881]: W1211 08:31:12.891321 4881 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod809482cd_c05d_41df_96db_84149e666743.slice/crio-f0911053504a43ece04fa9e3d995dd9076a33cebbd71427fdd3ba0ef94eebc7d WatchSource:0}: Error finding container f0911053504a43ece04fa9e3d995dd9076a33cebbd71427fdd3ba0ef94eebc7d: Status 404 returned error can't find the container with id f0911053504a43ece04fa9e3d995dd9076a33cebbd71427fdd3ba0ef94eebc7d Dec 11 08:31:12 crc kubenswrapper[4881]: I1211 08:31:12.894218 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-console-plugin-6ff7998486-nj8ls"] Dec 11 08:31:13 crc kubenswrapper[4881]: I1211 08:31:13.205659 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-console-plugin-6ff7998486-nj8ls" event={"ID":"809482cd-c05d-41df-96db-84149e666743","Type":"ContainerStarted","Data":"f0911053504a43ece04fa9e3d995dd9076a33cebbd71427fdd3ba0ef94eebc7d"} Dec 11 08:31:13 crc kubenswrapper[4881]: I1211 08:31:13.207791 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-69ddbf8769-4wnph" event={"ID":"05fbeafe-b228-42a0-8cfa-dc070853c0d7","Type":"ContainerStarted","Data":"4e41a6248105052d2b5a161cd418325715f0a7ff208cc2c03c4b50e6266fb0db"} Dec 11 08:31:13 crc kubenswrapper[4881]: I1211 08:31:13.207836 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-69ddbf8769-4wnph" event={"ID":"05fbeafe-b228-42a0-8cfa-dc070853c0d7","Type":"ContainerStarted","Data":"6cac496caddbe4caba5d4512edc18c308c126297080e4e7856fda80a28de0d48"} Dec 11 08:31:13 crc kubenswrapper[4881]: I1211 08:31:13.210151 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-7f7f7578db-m5zx5" event={"ID":"74e6048b-0d2d-418c-907c-5858077de213","Type":"ContainerStarted","Data":"e9c605482a779a193d4e400c7a13597f95e24dd6bc83a1389cf3a904c7821120"} Dec 11 08:31:13 crc kubenswrapper[4881]: I1211 08:31:13.212189 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-webhook-f8fb84555-mfvb7" event={"ID":"dbbccd2c-ccc0-4501-b4b4-b85621051f5f","Type":"ContainerStarted","Data":"a2677af6e1e3eee5814297958c6f72e0ad4cbbedf5453d441bfb78967f181a6c"} Dec 11 08:31:13 crc kubenswrapper[4881]: I1211 08:31:13.227138 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-69ddbf8769-4wnph" podStartSLOduration=2.227113352 podStartE2EDuration="2.227113352s" podCreationTimestamp="2025-12-11 08:31:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 08:31:13.224145278 +0000 UTC m=+921.601513995" watchObservedRunningTime="2025-12-11 08:31:13.227113352 +0000 UTC m=+921.604482059" Dec 11 08:31:16 crc kubenswrapper[4881]: I1211 08:31:16.239992 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-handler-qx6gc" event={"ID":"3ef011ad-0329-41b6-89d0-bbe3c976576b","Type":"ContainerStarted","Data":"a34b2ceddada205d0cc21cc3c295a027ff67fd980c99bf180b0f4142385e765d"} Dec 11 08:31:16 crc kubenswrapper[4881]: I1211 08:31:16.241694 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-nmstate/nmstate-handler-qx6gc" Dec 11 08:31:16 crc kubenswrapper[4881]: I1211 08:31:16.245023 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-7f7f7578db-m5zx5" event={"ID":"74e6048b-0d2d-418c-907c-5858077de213","Type":"ContainerStarted","Data":"eef406d56174b0eca319a02dfb530fd2b02b3264aef177fe503ddeb3fbe1af34"} Dec 11 08:31:16 crc kubenswrapper[4881]: I1211 08:31:16.246837 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-webhook-f8fb84555-mfvb7" event={"ID":"dbbccd2c-ccc0-4501-b4b4-b85621051f5f","Type":"ContainerStarted","Data":"7405e2049c58a34da88a8790d0020b9b8b9de59aafc918726494c2173862dd75"} Dec 11 08:31:16 crc kubenswrapper[4881]: I1211 08:31:16.247092 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-nmstate/nmstate-webhook-f8fb84555-mfvb7" Dec 11 08:31:16 crc kubenswrapper[4881]: I1211 08:31:16.262804 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-handler-qx6gc" podStartSLOduration=2.239520267 podStartE2EDuration="5.262781074s" podCreationTimestamp="2025-12-11 08:31:11 +0000 UTC" firstStartedPulling="2025-12-11 08:31:12.029299709 +0000 UTC m=+920.406668406" lastFinishedPulling="2025-12-11 08:31:15.052560516 +0000 UTC m=+923.429929213" observedRunningTime="2025-12-11 08:31:16.256576961 +0000 UTC m=+924.633945698" watchObservedRunningTime="2025-12-11 08:31:16.262781074 +0000 UTC m=+924.640149771" Dec 11 08:31:16 crc kubenswrapper[4881]: I1211 08:31:16.295126 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-webhook-f8fb84555-mfvb7" podStartSLOduration=2.786855064 podStartE2EDuration="5.295104724s" podCreationTimestamp="2025-12-11 08:31:11 +0000 UTC" firstStartedPulling="2025-12-11 08:31:12.549037452 +0000 UTC m=+920.926406149" lastFinishedPulling="2025-12-11 08:31:15.057287112 +0000 UTC m=+923.434655809" observedRunningTime="2025-12-11 08:31:16.28766411 +0000 UTC m=+924.665032807" watchObservedRunningTime="2025-12-11 08:31:16.295104724 +0000 UTC m=+924.672473421" Dec 11 08:31:17 crc kubenswrapper[4881]: I1211 08:31:17.259043 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-console-plugin-6ff7998486-nj8ls" event={"ID":"809482cd-c05d-41df-96db-84149e666743","Type":"ContainerStarted","Data":"e0c42c892b0f274c087580ccc9950d67f46003ab814f27c066b874232942c33b"} Dec 11 08:31:17 crc kubenswrapper[4881]: I1211 08:31:17.282250 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-console-plugin-6ff7998486-nj8ls" podStartSLOduration=3.016313361 podStartE2EDuration="6.282219662s" podCreationTimestamp="2025-12-11 08:31:11 +0000 UTC" firstStartedPulling="2025-12-11 08:31:12.893198948 +0000 UTC m=+921.270567665" lastFinishedPulling="2025-12-11 08:31:16.159105249 +0000 UTC m=+924.536473966" observedRunningTime="2025-12-11 08:31:17.27646793 +0000 UTC m=+925.653836657" watchObservedRunningTime="2025-12-11 08:31:17.282219662 +0000 UTC m=+925.659588369" Dec 11 08:31:18 crc kubenswrapper[4881]: I1211 08:31:18.269006 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-7f7f7578db-m5zx5" event={"ID":"74e6048b-0d2d-418c-907c-5858077de213","Type":"ContainerStarted","Data":"6a0ea07bde2e29566bd295eb37dcb203efb69f3372084f2cd5867e1bad870974"} Dec 11 08:31:18 crc kubenswrapper[4881]: I1211 08:31:18.297674 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-metrics-7f7f7578db-m5zx5" podStartSLOduration=2.320166413 podStartE2EDuration="7.29764716s" podCreationTimestamp="2025-12-11 08:31:11 +0000 UTC" firstStartedPulling="2025-12-11 08:31:12.509036491 +0000 UTC m=+920.886405198" lastFinishedPulling="2025-12-11 08:31:17.486517248 +0000 UTC m=+925.863885945" observedRunningTime="2025-12-11 08:31:18.295073877 +0000 UTC m=+926.672442574" watchObservedRunningTime="2025-12-11 08:31:18.29764716 +0000 UTC m=+926.675015887" Dec 11 08:31:21 crc kubenswrapper[4881]: I1211 08:31:21.939979 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-nmstate/nmstate-handler-qx6gc" Dec 11 08:31:22 crc kubenswrapper[4881]: I1211 08:31:22.346633 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-69ddbf8769-4wnph" Dec 11 08:31:22 crc kubenswrapper[4881]: I1211 08:31:22.346931 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-69ddbf8769-4wnph" Dec 11 08:31:22 crc kubenswrapper[4881]: I1211 08:31:22.351284 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-69ddbf8769-4wnph" Dec 11 08:31:23 crc kubenswrapper[4881]: I1211 08:31:23.322517 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-69ddbf8769-4wnph" Dec 11 08:31:23 crc kubenswrapper[4881]: I1211 08:31:23.396423 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-68c7b5cd9d-j9fr6"] Dec 11 08:31:31 crc kubenswrapper[4881]: I1211 08:31:31.886809 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-nmstate/nmstate-webhook-f8fb84555-mfvb7" Dec 11 08:31:48 crc kubenswrapper[4881]: I1211 08:31:48.443028 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-console/console-68c7b5cd9d-j9fr6" podUID="4d08bf9c-c409-4f74-afec-d566a07b6207" containerName="console" containerID="cri-o://e2c2ceae104d1983210b87b381f0457fc246b06bc8f3ca15bc6d4a0694433d72" gracePeriod=15 Dec 11 08:31:48 crc kubenswrapper[4881]: I1211 08:31:48.894078 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-68c7b5cd9d-j9fr6_4d08bf9c-c409-4f74-afec-d566a07b6207/console/0.log" Dec 11 08:31:48 crc kubenswrapper[4881]: I1211 08:31:48.894418 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-68c7b5cd9d-j9fr6" Dec 11 08:31:48 crc kubenswrapper[4881]: I1211 08:31:48.981979 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/4d08bf9c-c409-4f74-afec-d566a07b6207-console-serving-cert\") pod \"4d08bf9c-c409-4f74-afec-d566a07b6207\" (UID: \"4d08bf9c-c409-4f74-afec-d566a07b6207\") " Dec 11 08:31:48 crc kubenswrapper[4881]: I1211 08:31:48.982098 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/4d08bf9c-c409-4f74-afec-d566a07b6207-console-oauth-config\") pod \"4d08bf9c-c409-4f74-afec-d566a07b6207\" (UID: \"4d08bf9c-c409-4f74-afec-d566a07b6207\") " Dec 11 08:31:48 crc kubenswrapper[4881]: I1211 08:31:48.982211 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/4d08bf9c-c409-4f74-afec-d566a07b6207-trusted-ca-bundle\") pod \"4d08bf9c-c409-4f74-afec-d566a07b6207\" (UID: \"4d08bf9c-c409-4f74-afec-d566a07b6207\") " Dec 11 08:31:48 crc kubenswrapper[4881]: I1211 08:31:48.982235 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/4d08bf9c-c409-4f74-afec-d566a07b6207-service-ca\") pod \"4d08bf9c-c409-4f74-afec-d566a07b6207\" (UID: \"4d08bf9c-c409-4f74-afec-d566a07b6207\") " Dec 11 08:31:48 crc kubenswrapper[4881]: I1211 08:31:48.982266 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/4d08bf9c-c409-4f74-afec-d566a07b6207-console-config\") pod \"4d08bf9c-c409-4f74-afec-d566a07b6207\" (UID: \"4d08bf9c-c409-4f74-afec-d566a07b6207\") " Dec 11 08:31:48 crc kubenswrapper[4881]: I1211 08:31:48.982300 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-md956\" (UniqueName: \"kubernetes.io/projected/4d08bf9c-c409-4f74-afec-d566a07b6207-kube-api-access-md956\") pod \"4d08bf9c-c409-4f74-afec-d566a07b6207\" (UID: \"4d08bf9c-c409-4f74-afec-d566a07b6207\") " Dec 11 08:31:48 crc kubenswrapper[4881]: I1211 08:31:48.982328 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/4d08bf9c-c409-4f74-afec-d566a07b6207-oauth-serving-cert\") pod \"4d08bf9c-c409-4f74-afec-d566a07b6207\" (UID: \"4d08bf9c-c409-4f74-afec-d566a07b6207\") " Dec 11 08:31:48 crc kubenswrapper[4881]: I1211 08:31:48.983299 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4d08bf9c-c409-4f74-afec-d566a07b6207-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "4d08bf9c-c409-4f74-afec-d566a07b6207" (UID: "4d08bf9c-c409-4f74-afec-d566a07b6207"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:31:48 crc kubenswrapper[4881]: I1211 08:31:48.984767 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4d08bf9c-c409-4f74-afec-d566a07b6207-service-ca" (OuterVolumeSpecName: "service-ca") pod "4d08bf9c-c409-4f74-afec-d566a07b6207" (UID: "4d08bf9c-c409-4f74-afec-d566a07b6207"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:31:48 crc kubenswrapper[4881]: I1211 08:31:48.990956 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4d08bf9c-c409-4f74-afec-d566a07b6207-console-config" (OuterVolumeSpecName: "console-config") pod "4d08bf9c-c409-4f74-afec-d566a07b6207" (UID: "4d08bf9c-c409-4f74-afec-d566a07b6207"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:31:48 crc kubenswrapper[4881]: I1211 08:31:48.990951 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4d08bf9c-c409-4f74-afec-d566a07b6207-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "4d08bf9c-c409-4f74-afec-d566a07b6207" (UID: "4d08bf9c-c409-4f74-afec-d566a07b6207"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:31:48 crc kubenswrapper[4881]: I1211 08:31:48.991317 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4d08bf9c-c409-4f74-afec-d566a07b6207-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "4d08bf9c-c409-4f74-afec-d566a07b6207" (UID: "4d08bf9c-c409-4f74-afec-d566a07b6207"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:31:48 crc kubenswrapper[4881]: I1211 08:31:48.992641 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4d08bf9c-c409-4f74-afec-d566a07b6207-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "4d08bf9c-c409-4f74-afec-d566a07b6207" (UID: "4d08bf9c-c409-4f74-afec-d566a07b6207"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:31:48 crc kubenswrapper[4881]: I1211 08:31:48.993152 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4d08bf9c-c409-4f74-afec-d566a07b6207-kube-api-access-md956" (OuterVolumeSpecName: "kube-api-access-md956") pod "4d08bf9c-c409-4f74-afec-d566a07b6207" (UID: "4d08bf9c-c409-4f74-afec-d566a07b6207"). InnerVolumeSpecName "kube-api-access-md956". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:31:49 crc kubenswrapper[4881]: I1211 08:31:49.084293 4881 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/4d08bf9c-c409-4f74-afec-d566a07b6207-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 11 08:31:49 crc kubenswrapper[4881]: I1211 08:31:49.084358 4881 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/4d08bf9c-c409-4f74-afec-d566a07b6207-service-ca\") on node \"crc\" DevicePath \"\"" Dec 11 08:31:49 crc kubenswrapper[4881]: I1211 08:31:49.084371 4881 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/4d08bf9c-c409-4f74-afec-d566a07b6207-console-config\") on node \"crc\" DevicePath \"\"" Dec 11 08:31:49 crc kubenswrapper[4881]: I1211 08:31:49.084385 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-md956\" (UniqueName: \"kubernetes.io/projected/4d08bf9c-c409-4f74-afec-d566a07b6207-kube-api-access-md956\") on node \"crc\" DevicePath \"\"" Dec 11 08:31:49 crc kubenswrapper[4881]: I1211 08:31:49.084397 4881 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/4d08bf9c-c409-4f74-afec-d566a07b6207-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 11 08:31:49 crc kubenswrapper[4881]: I1211 08:31:49.084411 4881 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/4d08bf9c-c409-4f74-afec-d566a07b6207-console-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 11 08:31:49 crc kubenswrapper[4881]: I1211 08:31:49.084420 4881 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/4d08bf9c-c409-4f74-afec-d566a07b6207-console-oauth-config\") on node \"crc\" DevicePath \"\"" Dec 11 08:31:49 crc kubenswrapper[4881]: I1211 08:31:49.208237 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d472jx7"] Dec 11 08:31:49 crc kubenswrapper[4881]: E1211 08:31:49.208609 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4d08bf9c-c409-4f74-afec-d566a07b6207" containerName="console" Dec 11 08:31:49 crc kubenswrapper[4881]: I1211 08:31:49.208629 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="4d08bf9c-c409-4f74-afec-d566a07b6207" containerName="console" Dec 11 08:31:49 crc kubenswrapper[4881]: I1211 08:31:49.208786 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="4d08bf9c-c409-4f74-afec-d566a07b6207" containerName="console" Dec 11 08:31:49 crc kubenswrapper[4881]: I1211 08:31:49.209754 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d472jx7" Dec 11 08:31:49 crc kubenswrapper[4881]: I1211 08:31:49.212736 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Dec 11 08:31:49 crc kubenswrapper[4881]: I1211 08:31:49.236665 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d472jx7"] Dec 11 08:31:49 crc kubenswrapper[4881]: I1211 08:31:49.287550 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/8a6e4a4a-8f04-4f2a-87bd-07f36e56a51e-bundle\") pod \"5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d472jx7\" (UID: \"8a6e4a4a-8f04-4f2a-87bd-07f36e56a51e\") " pod="openshift-marketplace/5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d472jx7" Dec 11 08:31:49 crc kubenswrapper[4881]: I1211 08:31:49.287680 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nrght\" (UniqueName: \"kubernetes.io/projected/8a6e4a4a-8f04-4f2a-87bd-07f36e56a51e-kube-api-access-nrght\") pod \"5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d472jx7\" (UID: \"8a6e4a4a-8f04-4f2a-87bd-07f36e56a51e\") " pod="openshift-marketplace/5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d472jx7" Dec 11 08:31:49 crc kubenswrapper[4881]: I1211 08:31:49.287717 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/8a6e4a4a-8f04-4f2a-87bd-07f36e56a51e-util\") pod \"5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d472jx7\" (UID: \"8a6e4a4a-8f04-4f2a-87bd-07f36e56a51e\") " pod="openshift-marketplace/5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d472jx7" Dec 11 08:31:49 crc kubenswrapper[4881]: I1211 08:31:49.388786 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nrght\" (UniqueName: \"kubernetes.io/projected/8a6e4a4a-8f04-4f2a-87bd-07f36e56a51e-kube-api-access-nrght\") pod \"5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d472jx7\" (UID: \"8a6e4a4a-8f04-4f2a-87bd-07f36e56a51e\") " pod="openshift-marketplace/5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d472jx7" Dec 11 08:31:49 crc kubenswrapper[4881]: I1211 08:31:49.388849 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/8a6e4a4a-8f04-4f2a-87bd-07f36e56a51e-util\") pod \"5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d472jx7\" (UID: \"8a6e4a4a-8f04-4f2a-87bd-07f36e56a51e\") " pod="openshift-marketplace/5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d472jx7" Dec 11 08:31:49 crc kubenswrapper[4881]: I1211 08:31:49.388892 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/8a6e4a4a-8f04-4f2a-87bd-07f36e56a51e-bundle\") pod \"5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d472jx7\" (UID: \"8a6e4a4a-8f04-4f2a-87bd-07f36e56a51e\") " pod="openshift-marketplace/5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d472jx7" Dec 11 08:31:49 crc kubenswrapper[4881]: I1211 08:31:49.389707 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/8a6e4a4a-8f04-4f2a-87bd-07f36e56a51e-util\") pod \"5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d472jx7\" (UID: \"8a6e4a4a-8f04-4f2a-87bd-07f36e56a51e\") " pod="openshift-marketplace/5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d472jx7" Dec 11 08:31:49 crc kubenswrapper[4881]: I1211 08:31:49.389731 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/8a6e4a4a-8f04-4f2a-87bd-07f36e56a51e-bundle\") pod \"5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d472jx7\" (UID: \"8a6e4a4a-8f04-4f2a-87bd-07f36e56a51e\") " pod="openshift-marketplace/5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d472jx7" Dec 11 08:31:49 crc kubenswrapper[4881]: I1211 08:31:49.410177 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nrght\" (UniqueName: \"kubernetes.io/projected/8a6e4a4a-8f04-4f2a-87bd-07f36e56a51e-kube-api-access-nrght\") pod \"5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d472jx7\" (UID: \"8a6e4a4a-8f04-4f2a-87bd-07f36e56a51e\") " pod="openshift-marketplace/5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d472jx7" Dec 11 08:31:49 crc kubenswrapper[4881]: I1211 08:31:49.524443 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d472jx7" Dec 11 08:31:49 crc kubenswrapper[4881]: I1211 08:31:49.532207 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-68c7b5cd9d-j9fr6_4d08bf9c-c409-4f74-afec-d566a07b6207/console/0.log" Dec 11 08:31:49 crc kubenswrapper[4881]: I1211 08:31:49.532260 4881 generic.go:334] "Generic (PLEG): container finished" podID="4d08bf9c-c409-4f74-afec-d566a07b6207" containerID="e2c2ceae104d1983210b87b381f0457fc246b06bc8f3ca15bc6d4a0694433d72" exitCode=2 Dec 11 08:31:49 crc kubenswrapper[4881]: I1211 08:31:49.532297 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-68c7b5cd9d-j9fr6" event={"ID":"4d08bf9c-c409-4f74-afec-d566a07b6207","Type":"ContainerDied","Data":"e2c2ceae104d1983210b87b381f0457fc246b06bc8f3ca15bc6d4a0694433d72"} Dec 11 08:31:49 crc kubenswrapper[4881]: I1211 08:31:49.532320 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-68c7b5cd9d-j9fr6" event={"ID":"4d08bf9c-c409-4f74-afec-d566a07b6207","Type":"ContainerDied","Data":"87785d399aee96832fd21e716fe9a18beab4390b0a005fd6673b87ed75fccd58"} Dec 11 08:31:49 crc kubenswrapper[4881]: I1211 08:31:49.532354 4881 scope.go:117] "RemoveContainer" containerID="e2c2ceae104d1983210b87b381f0457fc246b06bc8f3ca15bc6d4a0694433d72" Dec 11 08:31:49 crc kubenswrapper[4881]: I1211 08:31:49.532364 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-68c7b5cd9d-j9fr6" Dec 11 08:31:49 crc kubenswrapper[4881]: I1211 08:31:49.555394 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-68c7b5cd9d-j9fr6"] Dec 11 08:31:49 crc kubenswrapper[4881]: I1211 08:31:49.568299 4881 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-console/console-68c7b5cd9d-j9fr6"] Dec 11 08:31:49 crc kubenswrapper[4881]: I1211 08:31:49.572806 4881 scope.go:117] "RemoveContainer" containerID="e2c2ceae104d1983210b87b381f0457fc246b06bc8f3ca15bc6d4a0694433d72" Dec 11 08:31:49 crc kubenswrapper[4881]: E1211 08:31:49.573740 4881 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e2c2ceae104d1983210b87b381f0457fc246b06bc8f3ca15bc6d4a0694433d72\": container with ID starting with e2c2ceae104d1983210b87b381f0457fc246b06bc8f3ca15bc6d4a0694433d72 not found: ID does not exist" containerID="e2c2ceae104d1983210b87b381f0457fc246b06bc8f3ca15bc6d4a0694433d72" Dec 11 08:31:49 crc kubenswrapper[4881]: I1211 08:31:49.573797 4881 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e2c2ceae104d1983210b87b381f0457fc246b06bc8f3ca15bc6d4a0694433d72"} err="failed to get container status \"e2c2ceae104d1983210b87b381f0457fc246b06bc8f3ca15bc6d4a0694433d72\": rpc error: code = NotFound desc = could not find container \"e2c2ceae104d1983210b87b381f0457fc246b06bc8f3ca15bc6d4a0694433d72\": container with ID starting with e2c2ceae104d1983210b87b381f0457fc246b06bc8f3ca15bc6d4a0694433d72 not found: ID does not exist" Dec 11 08:31:49 crc kubenswrapper[4881]: I1211 08:31:49.968909 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d472jx7"] Dec 11 08:31:49 crc kubenswrapper[4881]: W1211 08:31:49.974849 4881 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod8a6e4a4a_8f04_4f2a_87bd_07f36e56a51e.slice/crio-c3e4f7ccc7df39743e6855afeee861ff89ed337a43631b4b926c3971db52ac4d WatchSource:0}: Error finding container c3e4f7ccc7df39743e6855afeee861ff89ed337a43631b4b926c3971db52ac4d: Status 404 returned error can't find the container with id c3e4f7ccc7df39743e6855afeee861ff89ed337a43631b4b926c3971db52ac4d Dec 11 08:31:50 crc kubenswrapper[4881]: I1211 08:31:50.545052 4881 generic.go:334] "Generic (PLEG): container finished" podID="8a6e4a4a-8f04-4f2a-87bd-07f36e56a51e" containerID="b7c62a76410c47a4600eddcd50e2d56a5e9653a5b56aed09cadfe31eadc11e0d" exitCode=0 Dec 11 08:31:50 crc kubenswrapper[4881]: I1211 08:31:50.545134 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d472jx7" event={"ID":"8a6e4a4a-8f04-4f2a-87bd-07f36e56a51e","Type":"ContainerDied","Data":"b7c62a76410c47a4600eddcd50e2d56a5e9653a5b56aed09cadfe31eadc11e0d"} Dec 11 08:31:50 crc kubenswrapper[4881]: I1211 08:31:50.545387 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d472jx7" event={"ID":"8a6e4a4a-8f04-4f2a-87bd-07f36e56a51e","Type":"ContainerStarted","Data":"c3e4f7ccc7df39743e6855afeee861ff89ed337a43631b4b926c3971db52ac4d"} Dec 11 08:31:51 crc kubenswrapper[4881]: I1211 08:31:51.016041 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4d08bf9c-c409-4f74-afec-d566a07b6207" path="/var/lib/kubelet/pods/4d08bf9c-c409-4f74-afec-d566a07b6207/volumes" Dec 11 08:31:53 crc kubenswrapper[4881]: I1211 08:31:53.564410 4881 generic.go:334] "Generic (PLEG): container finished" podID="8a6e4a4a-8f04-4f2a-87bd-07f36e56a51e" containerID="03b45f81cc118702336632444456280d5cf92ac419e90b7e6b2160cca33d0aeb" exitCode=0 Dec 11 08:31:53 crc kubenswrapper[4881]: I1211 08:31:53.564512 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d472jx7" event={"ID":"8a6e4a4a-8f04-4f2a-87bd-07f36e56a51e","Type":"ContainerDied","Data":"03b45f81cc118702336632444456280d5cf92ac419e90b7e6b2160cca33d0aeb"} Dec 11 08:31:54 crc kubenswrapper[4881]: I1211 08:31:54.573949 4881 generic.go:334] "Generic (PLEG): container finished" podID="8a6e4a4a-8f04-4f2a-87bd-07f36e56a51e" containerID="a8c9c230148b506784f8db2ebd95ed3f6ab39b43912edf05471e7b586e96b2f4" exitCode=0 Dec 11 08:31:54 crc kubenswrapper[4881]: I1211 08:31:54.574120 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d472jx7" event={"ID":"8a6e4a4a-8f04-4f2a-87bd-07f36e56a51e","Type":"ContainerDied","Data":"a8c9c230148b506784f8db2ebd95ed3f6ab39b43912edf05471e7b586e96b2f4"} Dec 11 08:31:55 crc kubenswrapper[4881]: I1211 08:31:55.894949 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d472jx7" Dec 11 08:31:56 crc kubenswrapper[4881]: I1211 08:31:56.032221 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nrght\" (UniqueName: \"kubernetes.io/projected/8a6e4a4a-8f04-4f2a-87bd-07f36e56a51e-kube-api-access-nrght\") pod \"8a6e4a4a-8f04-4f2a-87bd-07f36e56a51e\" (UID: \"8a6e4a4a-8f04-4f2a-87bd-07f36e56a51e\") " Dec 11 08:31:56 crc kubenswrapper[4881]: I1211 08:31:56.032490 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/8a6e4a4a-8f04-4f2a-87bd-07f36e56a51e-bundle\") pod \"8a6e4a4a-8f04-4f2a-87bd-07f36e56a51e\" (UID: \"8a6e4a4a-8f04-4f2a-87bd-07f36e56a51e\") " Dec 11 08:31:56 crc kubenswrapper[4881]: I1211 08:31:56.032556 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/8a6e4a4a-8f04-4f2a-87bd-07f36e56a51e-util\") pod \"8a6e4a4a-8f04-4f2a-87bd-07f36e56a51e\" (UID: \"8a6e4a4a-8f04-4f2a-87bd-07f36e56a51e\") " Dec 11 08:31:56 crc kubenswrapper[4881]: I1211 08:31:56.033704 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8a6e4a4a-8f04-4f2a-87bd-07f36e56a51e-bundle" (OuterVolumeSpecName: "bundle") pod "8a6e4a4a-8f04-4f2a-87bd-07f36e56a51e" (UID: "8a6e4a4a-8f04-4f2a-87bd-07f36e56a51e"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 08:31:56 crc kubenswrapper[4881]: I1211 08:31:56.054165 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8a6e4a4a-8f04-4f2a-87bd-07f36e56a51e-kube-api-access-nrght" (OuterVolumeSpecName: "kube-api-access-nrght") pod "8a6e4a4a-8f04-4f2a-87bd-07f36e56a51e" (UID: "8a6e4a4a-8f04-4f2a-87bd-07f36e56a51e"). InnerVolumeSpecName "kube-api-access-nrght". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:31:56 crc kubenswrapper[4881]: I1211 08:31:56.060690 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8a6e4a4a-8f04-4f2a-87bd-07f36e56a51e-util" (OuterVolumeSpecName: "util") pod "8a6e4a4a-8f04-4f2a-87bd-07f36e56a51e" (UID: "8a6e4a4a-8f04-4f2a-87bd-07f36e56a51e"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 08:31:56 crc kubenswrapper[4881]: I1211 08:31:56.061820 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nrght\" (UniqueName: \"kubernetes.io/projected/8a6e4a4a-8f04-4f2a-87bd-07f36e56a51e-kube-api-access-nrght\") on node \"crc\" DevicePath \"\"" Dec 11 08:31:56 crc kubenswrapper[4881]: I1211 08:31:56.061840 4881 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/8a6e4a4a-8f04-4f2a-87bd-07f36e56a51e-bundle\") on node \"crc\" DevicePath \"\"" Dec 11 08:31:56 crc kubenswrapper[4881]: I1211 08:31:56.061850 4881 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/8a6e4a4a-8f04-4f2a-87bd-07f36e56a51e-util\") on node \"crc\" DevicePath \"\"" Dec 11 08:31:56 crc kubenswrapper[4881]: I1211 08:31:56.591247 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d472jx7" event={"ID":"8a6e4a4a-8f04-4f2a-87bd-07f36e56a51e","Type":"ContainerDied","Data":"c3e4f7ccc7df39743e6855afeee861ff89ed337a43631b4b926c3971db52ac4d"} Dec 11 08:31:56 crc kubenswrapper[4881]: I1211 08:31:56.591298 4881 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c3e4f7ccc7df39743e6855afeee861ff89ed337a43631b4b926c3971db52ac4d" Dec 11 08:31:56 crc kubenswrapper[4881]: I1211 08:31:56.591365 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d472jx7" Dec 11 08:32:07 crc kubenswrapper[4881]: I1211 08:32:07.687827 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/metallb-operator-controller-manager-5646b5c6f5-clxl4"] Dec 11 08:32:07 crc kubenswrapper[4881]: E1211 08:32:07.688900 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8a6e4a4a-8f04-4f2a-87bd-07f36e56a51e" containerName="pull" Dec 11 08:32:07 crc kubenswrapper[4881]: I1211 08:32:07.688916 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="8a6e4a4a-8f04-4f2a-87bd-07f36e56a51e" containerName="pull" Dec 11 08:32:07 crc kubenswrapper[4881]: E1211 08:32:07.688932 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8a6e4a4a-8f04-4f2a-87bd-07f36e56a51e" containerName="util" Dec 11 08:32:07 crc kubenswrapper[4881]: I1211 08:32:07.688938 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="8a6e4a4a-8f04-4f2a-87bd-07f36e56a51e" containerName="util" Dec 11 08:32:07 crc kubenswrapper[4881]: E1211 08:32:07.688955 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8a6e4a4a-8f04-4f2a-87bd-07f36e56a51e" containerName="extract" Dec 11 08:32:07 crc kubenswrapper[4881]: I1211 08:32:07.688960 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="8a6e4a4a-8f04-4f2a-87bd-07f36e56a51e" containerName="extract" Dec 11 08:32:07 crc kubenswrapper[4881]: I1211 08:32:07.689179 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="8a6e4a4a-8f04-4f2a-87bd-07f36e56a51e" containerName="extract" Dec 11 08:32:07 crc kubenswrapper[4881]: I1211 08:32:07.690411 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-controller-manager-5646b5c6f5-clxl4" Dec 11 08:32:07 crc kubenswrapper[4881]: I1211 08:32:07.695743 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"openshift-service-ca.crt" Dec 11 08:32:07 crc kubenswrapper[4881]: I1211 08:32:07.696114 4881 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"manager-account-dockercfg-9cs72" Dec 11 08:32:07 crc kubenswrapper[4881]: I1211 08:32:07.696264 4881 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-controller-manager-service-cert" Dec 11 08:32:07 crc kubenswrapper[4881]: I1211 08:32:07.696342 4881 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-webhook-server-cert" Dec 11 08:32:07 crc kubenswrapper[4881]: I1211 08:32:07.696521 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"kube-root-ca.crt" Dec 11 08:32:07 crc kubenswrapper[4881]: I1211 08:32:07.703051 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-controller-manager-5646b5c6f5-clxl4"] Dec 11 08:32:07 crc kubenswrapper[4881]: I1211 08:32:07.763986 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/fda9a059-2ee6-41ae-ad81-e4f694080990-webhook-cert\") pod \"metallb-operator-controller-manager-5646b5c6f5-clxl4\" (UID: \"fda9a059-2ee6-41ae-ad81-e4f694080990\") " pod="metallb-system/metallb-operator-controller-manager-5646b5c6f5-clxl4" Dec 11 08:32:07 crc kubenswrapper[4881]: I1211 08:32:07.764241 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jxm54\" (UniqueName: \"kubernetes.io/projected/fda9a059-2ee6-41ae-ad81-e4f694080990-kube-api-access-jxm54\") pod \"metallb-operator-controller-manager-5646b5c6f5-clxl4\" (UID: \"fda9a059-2ee6-41ae-ad81-e4f694080990\") " pod="metallb-system/metallb-operator-controller-manager-5646b5c6f5-clxl4" Dec 11 08:32:07 crc kubenswrapper[4881]: I1211 08:32:07.764498 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/fda9a059-2ee6-41ae-ad81-e4f694080990-apiservice-cert\") pod \"metallb-operator-controller-manager-5646b5c6f5-clxl4\" (UID: \"fda9a059-2ee6-41ae-ad81-e4f694080990\") " pod="metallb-system/metallb-operator-controller-manager-5646b5c6f5-clxl4" Dec 11 08:32:07 crc kubenswrapper[4881]: I1211 08:32:07.866368 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/fda9a059-2ee6-41ae-ad81-e4f694080990-apiservice-cert\") pod \"metallb-operator-controller-manager-5646b5c6f5-clxl4\" (UID: \"fda9a059-2ee6-41ae-ad81-e4f694080990\") " pod="metallb-system/metallb-operator-controller-manager-5646b5c6f5-clxl4" Dec 11 08:32:07 crc kubenswrapper[4881]: I1211 08:32:07.866441 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jxm54\" (UniqueName: \"kubernetes.io/projected/fda9a059-2ee6-41ae-ad81-e4f694080990-kube-api-access-jxm54\") pod \"metallb-operator-controller-manager-5646b5c6f5-clxl4\" (UID: \"fda9a059-2ee6-41ae-ad81-e4f694080990\") " pod="metallb-system/metallb-operator-controller-manager-5646b5c6f5-clxl4" Dec 11 08:32:07 crc kubenswrapper[4881]: I1211 08:32:07.866459 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/fda9a059-2ee6-41ae-ad81-e4f694080990-webhook-cert\") pod \"metallb-operator-controller-manager-5646b5c6f5-clxl4\" (UID: \"fda9a059-2ee6-41ae-ad81-e4f694080990\") " pod="metallb-system/metallb-operator-controller-manager-5646b5c6f5-clxl4" Dec 11 08:32:07 crc kubenswrapper[4881]: I1211 08:32:07.875533 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/fda9a059-2ee6-41ae-ad81-e4f694080990-apiservice-cert\") pod \"metallb-operator-controller-manager-5646b5c6f5-clxl4\" (UID: \"fda9a059-2ee6-41ae-ad81-e4f694080990\") " pod="metallb-system/metallb-operator-controller-manager-5646b5c6f5-clxl4" Dec 11 08:32:07 crc kubenswrapper[4881]: I1211 08:32:07.876398 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/fda9a059-2ee6-41ae-ad81-e4f694080990-webhook-cert\") pod \"metallb-operator-controller-manager-5646b5c6f5-clxl4\" (UID: \"fda9a059-2ee6-41ae-ad81-e4f694080990\") " pod="metallb-system/metallb-operator-controller-manager-5646b5c6f5-clxl4" Dec 11 08:32:07 crc kubenswrapper[4881]: I1211 08:32:07.888397 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jxm54\" (UniqueName: \"kubernetes.io/projected/fda9a059-2ee6-41ae-ad81-e4f694080990-kube-api-access-jxm54\") pod \"metallb-operator-controller-manager-5646b5c6f5-clxl4\" (UID: \"fda9a059-2ee6-41ae-ad81-e4f694080990\") " pod="metallb-system/metallb-operator-controller-manager-5646b5c6f5-clxl4" Dec 11 08:32:07 crc kubenswrapper[4881]: I1211 08:32:07.964277 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/metallb-operator-webhook-server-65f54b9948-tf47z"] Dec 11 08:32:07 crc kubenswrapper[4881]: I1211 08:32:07.965619 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-webhook-server-65f54b9948-tf47z" Dec 11 08:32:07 crc kubenswrapper[4881]: I1211 08:32:07.968541 4881 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-webhook-cert" Dec 11 08:32:07 crc kubenswrapper[4881]: I1211 08:32:07.968764 4881 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-webhook-server-service-cert" Dec 11 08:32:07 crc kubenswrapper[4881]: I1211 08:32:07.969272 4881 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-dockercfg-wsc9w" Dec 11 08:32:07 crc kubenswrapper[4881]: I1211 08:32:07.981825 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-webhook-server-65f54b9948-tf47z"] Dec 11 08:32:08 crc kubenswrapper[4881]: I1211 08:32:08.017894 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-controller-manager-5646b5c6f5-clxl4" Dec 11 08:32:08 crc kubenswrapper[4881]: I1211 08:32:08.070952 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bxhx6\" (UniqueName: \"kubernetes.io/projected/56dc9d55-54e9-44bc-9a4b-3a1eb32f4f04-kube-api-access-bxhx6\") pod \"metallb-operator-webhook-server-65f54b9948-tf47z\" (UID: \"56dc9d55-54e9-44bc-9a4b-3a1eb32f4f04\") " pod="metallb-system/metallb-operator-webhook-server-65f54b9948-tf47z" Dec 11 08:32:08 crc kubenswrapper[4881]: I1211 08:32:08.071074 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/56dc9d55-54e9-44bc-9a4b-3a1eb32f4f04-apiservice-cert\") pod \"metallb-operator-webhook-server-65f54b9948-tf47z\" (UID: \"56dc9d55-54e9-44bc-9a4b-3a1eb32f4f04\") " pod="metallb-system/metallb-operator-webhook-server-65f54b9948-tf47z" Dec 11 08:32:08 crc kubenswrapper[4881]: I1211 08:32:08.071095 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/56dc9d55-54e9-44bc-9a4b-3a1eb32f4f04-webhook-cert\") pod \"metallb-operator-webhook-server-65f54b9948-tf47z\" (UID: \"56dc9d55-54e9-44bc-9a4b-3a1eb32f4f04\") " pod="metallb-system/metallb-operator-webhook-server-65f54b9948-tf47z" Dec 11 08:32:08 crc kubenswrapper[4881]: I1211 08:32:08.172969 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/56dc9d55-54e9-44bc-9a4b-3a1eb32f4f04-apiservice-cert\") pod \"metallb-operator-webhook-server-65f54b9948-tf47z\" (UID: \"56dc9d55-54e9-44bc-9a4b-3a1eb32f4f04\") " pod="metallb-system/metallb-operator-webhook-server-65f54b9948-tf47z" Dec 11 08:32:08 crc kubenswrapper[4881]: I1211 08:32:08.173013 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/56dc9d55-54e9-44bc-9a4b-3a1eb32f4f04-webhook-cert\") pod \"metallb-operator-webhook-server-65f54b9948-tf47z\" (UID: \"56dc9d55-54e9-44bc-9a4b-3a1eb32f4f04\") " pod="metallb-system/metallb-operator-webhook-server-65f54b9948-tf47z" Dec 11 08:32:08 crc kubenswrapper[4881]: I1211 08:32:08.173087 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bxhx6\" (UniqueName: \"kubernetes.io/projected/56dc9d55-54e9-44bc-9a4b-3a1eb32f4f04-kube-api-access-bxhx6\") pod \"metallb-operator-webhook-server-65f54b9948-tf47z\" (UID: \"56dc9d55-54e9-44bc-9a4b-3a1eb32f4f04\") " pod="metallb-system/metallb-operator-webhook-server-65f54b9948-tf47z" Dec 11 08:32:08 crc kubenswrapper[4881]: I1211 08:32:08.177703 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/56dc9d55-54e9-44bc-9a4b-3a1eb32f4f04-apiservice-cert\") pod \"metallb-operator-webhook-server-65f54b9948-tf47z\" (UID: \"56dc9d55-54e9-44bc-9a4b-3a1eb32f4f04\") " pod="metallb-system/metallb-operator-webhook-server-65f54b9948-tf47z" Dec 11 08:32:08 crc kubenswrapper[4881]: I1211 08:32:08.178934 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/56dc9d55-54e9-44bc-9a4b-3a1eb32f4f04-webhook-cert\") pod \"metallb-operator-webhook-server-65f54b9948-tf47z\" (UID: \"56dc9d55-54e9-44bc-9a4b-3a1eb32f4f04\") " pod="metallb-system/metallb-operator-webhook-server-65f54b9948-tf47z" Dec 11 08:32:08 crc kubenswrapper[4881]: I1211 08:32:08.193999 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bxhx6\" (UniqueName: \"kubernetes.io/projected/56dc9d55-54e9-44bc-9a4b-3a1eb32f4f04-kube-api-access-bxhx6\") pod \"metallb-operator-webhook-server-65f54b9948-tf47z\" (UID: \"56dc9d55-54e9-44bc-9a4b-3a1eb32f4f04\") " pod="metallb-system/metallb-operator-webhook-server-65f54b9948-tf47z" Dec 11 08:32:08 crc kubenswrapper[4881]: I1211 08:32:08.284070 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-webhook-server-65f54b9948-tf47z" Dec 11 08:32:08 crc kubenswrapper[4881]: I1211 08:32:08.511928 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-controller-manager-5646b5c6f5-clxl4"] Dec 11 08:32:08 crc kubenswrapper[4881]: W1211 08:32:08.517805 4881 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfda9a059_2ee6_41ae_ad81_e4f694080990.slice/crio-65b189c263e32b7399b62a3cde48d262bdd8c0394ef0e5308e7ca80ad11bd6ff WatchSource:0}: Error finding container 65b189c263e32b7399b62a3cde48d262bdd8c0394ef0e5308e7ca80ad11bd6ff: Status 404 returned error can't find the container with id 65b189c263e32b7399b62a3cde48d262bdd8c0394ef0e5308e7ca80ad11bd6ff Dec 11 08:32:08 crc kubenswrapper[4881]: I1211 08:32:08.677657 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-5646b5c6f5-clxl4" event={"ID":"fda9a059-2ee6-41ae-ad81-e4f694080990","Type":"ContainerStarted","Data":"65b189c263e32b7399b62a3cde48d262bdd8c0394ef0e5308e7ca80ad11bd6ff"} Dec 11 08:32:08 crc kubenswrapper[4881]: I1211 08:32:08.729104 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-webhook-server-65f54b9948-tf47z"] Dec 11 08:32:08 crc kubenswrapper[4881]: W1211 08:32:08.732041 4881 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod56dc9d55_54e9_44bc_9a4b_3a1eb32f4f04.slice/crio-124bcfc95c32d822236db27ab813c9c4eea7a4d494de2cc2955b3f2d45a1466e WatchSource:0}: Error finding container 124bcfc95c32d822236db27ab813c9c4eea7a4d494de2cc2955b3f2d45a1466e: Status 404 returned error can't find the container with id 124bcfc95c32d822236db27ab813c9c4eea7a4d494de2cc2955b3f2d45a1466e Dec 11 08:32:09 crc kubenswrapper[4881]: I1211 08:32:09.117561 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-j4z2r"] Dec 11 08:32:09 crc kubenswrapper[4881]: I1211 08:32:09.119751 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-j4z2r" Dec 11 08:32:09 crc kubenswrapper[4881]: I1211 08:32:09.128301 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-j4z2r"] Dec 11 08:32:09 crc kubenswrapper[4881]: I1211 08:32:09.188161 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5f8jx\" (UniqueName: \"kubernetes.io/projected/98200def-f0ee-4a8a-b6cd-3a4343b82618-kube-api-access-5f8jx\") pod \"certified-operators-j4z2r\" (UID: \"98200def-f0ee-4a8a-b6cd-3a4343b82618\") " pod="openshift-marketplace/certified-operators-j4z2r" Dec 11 08:32:09 crc kubenswrapper[4881]: I1211 08:32:09.188520 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/98200def-f0ee-4a8a-b6cd-3a4343b82618-catalog-content\") pod \"certified-operators-j4z2r\" (UID: \"98200def-f0ee-4a8a-b6cd-3a4343b82618\") " pod="openshift-marketplace/certified-operators-j4z2r" Dec 11 08:32:09 crc kubenswrapper[4881]: I1211 08:32:09.188712 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/98200def-f0ee-4a8a-b6cd-3a4343b82618-utilities\") pod \"certified-operators-j4z2r\" (UID: \"98200def-f0ee-4a8a-b6cd-3a4343b82618\") " pod="openshift-marketplace/certified-operators-j4z2r" Dec 11 08:32:09 crc kubenswrapper[4881]: I1211 08:32:09.291000 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5f8jx\" (UniqueName: \"kubernetes.io/projected/98200def-f0ee-4a8a-b6cd-3a4343b82618-kube-api-access-5f8jx\") pod \"certified-operators-j4z2r\" (UID: \"98200def-f0ee-4a8a-b6cd-3a4343b82618\") " pod="openshift-marketplace/certified-operators-j4z2r" Dec 11 08:32:09 crc kubenswrapper[4881]: I1211 08:32:09.291372 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/98200def-f0ee-4a8a-b6cd-3a4343b82618-catalog-content\") pod \"certified-operators-j4z2r\" (UID: \"98200def-f0ee-4a8a-b6cd-3a4343b82618\") " pod="openshift-marketplace/certified-operators-j4z2r" Dec 11 08:32:09 crc kubenswrapper[4881]: I1211 08:32:09.291520 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/98200def-f0ee-4a8a-b6cd-3a4343b82618-utilities\") pod \"certified-operators-j4z2r\" (UID: \"98200def-f0ee-4a8a-b6cd-3a4343b82618\") " pod="openshift-marketplace/certified-operators-j4z2r" Dec 11 08:32:09 crc kubenswrapper[4881]: I1211 08:32:09.292077 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/98200def-f0ee-4a8a-b6cd-3a4343b82618-utilities\") pod \"certified-operators-j4z2r\" (UID: \"98200def-f0ee-4a8a-b6cd-3a4343b82618\") " pod="openshift-marketplace/certified-operators-j4z2r" Dec 11 08:32:09 crc kubenswrapper[4881]: I1211 08:32:09.292190 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/98200def-f0ee-4a8a-b6cd-3a4343b82618-catalog-content\") pod \"certified-operators-j4z2r\" (UID: \"98200def-f0ee-4a8a-b6cd-3a4343b82618\") " pod="openshift-marketplace/certified-operators-j4z2r" Dec 11 08:32:09 crc kubenswrapper[4881]: I1211 08:32:09.310492 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5f8jx\" (UniqueName: \"kubernetes.io/projected/98200def-f0ee-4a8a-b6cd-3a4343b82618-kube-api-access-5f8jx\") pod \"certified-operators-j4z2r\" (UID: \"98200def-f0ee-4a8a-b6cd-3a4343b82618\") " pod="openshift-marketplace/certified-operators-j4z2r" Dec 11 08:32:09 crc kubenswrapper[4881]: I1211 08:32:09.439491 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-j4z2r" Dec 11 08:32:09 crc kubenswrapper[4881]: I1211 08:32:09.694085 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-webhook-server-65f54b9948-tf47z" event={"ID":"56dc9d55-54e9-44bc-9a4b-3a1eb32f4f04","Type":"ContainerStarted","Data":"124bcfc95c32d822236db27ab813c9c4eea7a4d494de2cc2955b3f2d45a1466e"} Dec 11 08:32:09 crc kubenswrapper[4881]: I1211 08:32:09.803569 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-j4z2r"] Dec 11 08:32:10 crc kubenswrapper[4881]: I1211 08:32:10.709131 4881 generic.go:334] "Generic (PLEG): container finished" podID="98200def-f0ee-4a8a-b6cd-3a4343b82618" containerID="63e864f09ad2fe6ed2ef12f5bdaeebaeaa12a5d21327683d4c041e7fe4bdd6dd" exitCode=0 Dec 11 08:32:10 crc kubenswrapper[4881]: I1211 08:32:10.709232 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-j4z2r" event={"ID":"98200def-f0ee-4a8a-b6cd-3a4343b82618","Type":"ContainerDied","Data":"63e864f09ad2fe6ed2ef12f5bdaeebaeaa12a5d21327683d4c041e7fe4bdd6dd"} Dec 11 08:32:10 crc kubenswrapper[4881]: I1211 08:32:10.710069 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-j4z2r" event={"ID":"98200def-f0ee-4a8a-b6cd-3a4343b82618","Type":"ContainerStarted","Data":"223db6007f3cc521cbc7a7464593ebb94eddb014195bd1706b499a105cd762e5"} Dec 11 08:32:14 crc kubenswrapper[4881]: I1211 08:32:14.751866 4881 generic.go:334] "Generic (PLEG): container finished" podID="98200def-f0ee-4a8a-b6cd-3a4343b82618" containerID="351a76ea361cda3d03fd2b93c401b309c6cc35e0bae018cfcd61f20a46b20132" exitCode=0 Dec 11 08:32:14 crc kubenswrapper[4881]: I1211 08:32:14.751990 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-j4z2r" event={"ID":"98200def-f0ee-4a8a-b6cd-3a4343b82618","Type":"ContainerDied","Data":"351a76ea361cda3d03fd2b93c401b309c6cc35e0bae018cfcd61f20a46b20132"} Dec 11 08:32:14 crc kubenswrapper[4881]: I1211 08:32:14.754250 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-webhook-server-65f54b9948-tf47z" event={"ID":"56dc9d55-54e9-44bc-9a4b-3a1eb32f4f04","Type":"ContainerStarted","Data":"ac0435f89806b3c62c6feea889f8eb7c206398e171e8fd82d1711541be26cb30"} Dec 11 08:32:14 crc kubenswrapper[4881]: I1211 08:32:14.754438 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-webhook-server-65f54b9948-tf47z" Dec 11 08:32:14 crc kubenswrapper[4881]: I1211 08:32:14.756050 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-5646b5c6f5-clxl4" event={"ID":"fda9a059-2ee6-41ae-ad81-e4f694080990","Type":"ContainerStarted","Data":"ba24717e54cdb8c3d9e578f085dbc0e77a35029dc1dbbad127cf6f814ba65827"} Dec 11 08:32:14 crc kubenswrapper[4881]: I1211 08:32:14.756278 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-controller-manager-5646b5c6f5-clxl4" Dec 11 08:32:14 crc kubenswrapper[4881]: I1211 08:32:14.806358 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/metallb-operator-controller-manager-5646b5c6f5-clxl4" podStartSLOduration=2.661574418 podStartE2EDuration="7.806315013s" podCreationTimestamp="2025-12-11 08:32:07 +0000 UTC" firstStartedPulling="2025-12-11 08:32:08.520003617 +0000 UTC m=+976.897372324" lastFinishedPulling="2025-12-11 08:32:13.664744222 +0000 UTC m=+982.042112919" observedRunningTime="2025-12-11 08:32:14.803575035 +0000 UTC m=+983.180943742" watchObservedRunningTime="2025-12-11 08:32:14.806315013 +0000 UTC m=+983.183683710" Dec 11 08:32:14 crc kubenswrapper[4881]: I1211 08:32:14.828976 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/metallb-operator-webhook-server-65f54b9948-tf47z" podStartSLOduration=2.896640106 podStartE2EDuration="7.828954173s" podCreationTimestamp="2025-12-11 08:32:07 +0000 UTC" firstStartedPulling="2025-12-11 08:32:08.735314376 +0000 UTC m=+977.112683073" lastFinishedPulling="2025-12-11 08:32:13.667628443 +0000 UTC m=+982.044997140" observedRunningTime="2025-12-11 08:32:14.825289112 +0000 UTC m=+983.202657819" watchObservedRunningTime="2025-12-11 08:32:14.828954173 +0000 UTC m=+983.206322860" Dec 11 08:32:15 crc kubenswrapper[4881]: I1211 08:32:15.766170 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-j4z2r" event={"ID":"98200def-f0ee-4a8a-b6cd-3a4343b82618","Type":"ContainerStarted","Data":"f849d304b025947aed6db834901cfa8d3100832eb1f85f902823cc615059808a"} Dec 11 08:32:15 crc kubenswrapper[4881]: I1211 08:32:15.793115 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-j4z2r" podStartSLOduration=2.211080683 podStartE2EDuration="6.793097232s" podCreationTimestamp="2025-12-11 08:32:09 +0000 UTC" firstStartedPulling="2025-12-11 08:32:10.711188522 +0000 UTC m=+979.088557219" lastFinishedPulling="2025-12-11 08:32:15.293205071 +0000 UTC m=+983.670573768" observedRunningTime="2025-12-11 08:32:15.789737109 +0000 UTC m=+984.167105836" watchObservedRunningTime="2025-12-11 08:32:15.793097232 +0000 UTC m=+984.170465919" Dec 11 08:32:19 crc kubenswrapper[4881]: I1211 08:32:19.440703 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-j4z2r" Dec 11 08:32:19 crc kubenswrapper[4881]: I1211 08:32:19.441245 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-j4z2r" Dec 11 08:32:19 crc kubenswrapper[4881]: I1211 08:32:19.479588 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-j4z2r" Dec 11 08:32:28 crc kubenswrapper[4881]: I1211 08:32:28.288266 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-webhook-server-65f54b9948-tf47z" Dec 11 08:32:29 crc kubenswrapper[4881]: I1211 08:32:29.397322 4881 patch_prober.go:28] interesting pod/machine-config-daemon-z9nnh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 11 08:32:29 crc kubenswrapper[4881]: I1211 08:32:29.397769 4881 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 11 08:32:29 crc kubenswrapper[4881]: I1211 08:32:29.495661 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-j4z2r" Dec 11 08:32:29 crc kubenswrapper[4881]: I1211 08:32:29.543615 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-j4z2r"] Dec 11 08:32:29 crc kubenswrapper[4881]: I1211 08:32:29.870097 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-j4z2r" podUID="98200def-f0ee-4a8a-b6cd-3a4343b82618" containerName="registry-server" containerID="cri-o://f849d304b025947aed6db834901cfa8d3100832eb1f85f902823cc615059808a" gracePeriod=2 Dec 11 08:32:31 crc kubenswrapper[4881]: I1211 08:32:31.889500 4881 generic.go:334] "Generic (PLEG): container finished" podID="98200def-f0ee-4a8a-b6cd-3a4343b82618" containerID="f849d304b025947aed6db834901cfa8d3100832eb1f85f902823cc615059808a" exitCode=0 Dec 11 08:32:31 crc kubenswrapper[4881]: I1211 08:32:31.889553 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-j4z2r" event={"ID":"98200def-f0ee-4a8a-b6cd-3a4343b82618","Type":"ContainerDied","Data":"f849d304b025947aed6db834901cfa8d3100832eb1f85f902823cc615059808a"} Dec 11 08:32:32 crc kubenswrapper[4881]: I1211 08:32:32.102248 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-j4z2r" Dec 11 08:32:32 crc kubenswrapper[4881]: I1211 08:32:32.214610 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/98200def-f0ee-4a8a-b6cd-3a4343b82618-utilities\") pod \"98200def-f0ee-4a8a-b6cd-3a4343b82618\" (UID: \"98200def-f0ee-4a8a-b6cd-3a4343b82618\") " Dec 11 08:32:32 crc kubenswrapper[4881]: I1211 08:32:32.214793 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/98200def-f0ee-4a8a-b6cd-3a4343b82618-catalog-content\") pod \"98200def-f0ee-4a8a-b6cd-3a4343b82618\" (UID: \"98200def-f0ee-4a8a-b6cd-3a4343b82618\") " Dec 11 08:32:32 crc kubenswrapper[4881]: I1211 08:32:32.214846 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5f8jx\" (UniqueName: \"kubernetes.io/projected/98200def-f0ee-4a8a-b6cd-3a4343b82618-kube-api-access-5f8jx\") pod \"98200def-f0ee-4a8a-b6cd-3a4343b82618\" (UID: \"98200def-f0ee-4a8a-b6cd-3a4343b82618\") " Dec 11 08:32:32 crc kubenswrapper[4881]: I1211 08:32:32.215770 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/98200def-f0ee-4a8a-b6cd-3a4343b82618-utilities" (OuterVolumeSpecName: "utilities") pod "98200def-f0ee-4a8a-b6cd-3a4343b82618" (UID: "98200def-f0ee-4a8a-b6cd-3a4343b82618"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 08:32:32 crc kubenswrapper[4881]: I1211 08:32:32.221429 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/98200def-f0ee-4a8a-b6cd-3a4343b82618-kube-api-access-5f8jx" (OuterVolumeSpecName: "kube-api-access-5f8jx") pod "98200def-f0ee-4a8a-b6cd-3a4343b82618" (UID: "98200def-f0ee-4a8a-b6cd-3a4343b82618"). InnerVolumeSpecName "kube-api-access-5f8jx". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:32:32 crc kubenswrapper[4881]: I1211 08:32:32.265187 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/98200def-f0ee-4a8a-b6cd-3a4343b82618-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "98200def-f0ee-4a8a-b6cd-3a4343b82618" (UID: "98200def-f0ee-4a8a-b6cd-3a4343b82618"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 08:32:32 crc kubenswrapper[4881]: I1211 08:32:32.316543 4881 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/98200def-f0ee-4a8a-b6cd-3a4343b82618-utilities\") on node \"crc\" DevicePath \"\"" Dec 11 08:32:32 crc kubenswrapper[4881]: I1211 08:32:32.316582 4881 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/98200def-f0ee-4a8a-b6cd-3a4343b82618-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 11 08:32:32 crc kubenswrapper[4881]: I1211 08:32:32.316597 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5f8jx\" (UniqueName: \"kubernetes.io/projected/98200def-f0ee-4a8a-b6cd-3a4343b82618-kube-api-access-5f8jx\") on node \"crc\" DevicePath \"\"" Dec 11 08:32:32 crc kubenswrapper[4881]: I1211 08:32:32.900622 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-j4z2r" event={"ID":"98200def-f0ee-4a8a-b6cd-3a4343b82618","Type":"ContainerDied","Data":"223db6007f3cc521cbc7a7464593ebb94eddb014195bd1706b499a105cd762e5"} Dec 11 08:32:32 crc kubenswrapper[4881]: I1211 08:32:32.900910 4881 scope.go:117] "RemoveContainer" containerID="f849d304b025947aed6db834901cfa8d3100832eb1f85f902823cc615059808a" Dec 11 08:32:32 crc kubenswrapper[4881]: I1211 08:32:32.900708 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-j4z2r" Dec 11 08:32:32 crc kubenswrapper[4881]: I1211 08:32:32.928506 4881 scope.go:117] "RemoveContainer" containerID="351a76ea361cda3d03fd2b93c401b309c6cc35e0bae018cfcd61f20a46b20132" Dec 11 08:32:32 crc kubenswrapper[4881]: I1211 08:32:32.937165 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-j4z2r"] Dec 11 08:32:32 crc kubenswrapper[4881]: I1211 08:32:32.952873 4881 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-j4z2r"] Dec 11 08:32:32 crc kubenswrapper[4881]: I1211 08:32:32.969143 4881 scope.go:117] "RemoveContainer" containerID="63e864f09ad2fe6ed2ef12f5bdaeebaeaa12a5d21327683d4c041e7fe4bdd6dd" Dec 11 08:32:33 crc kubenswrapper[4881]: I1211 08:32:33.016619 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="98200def-f0ee-4a8a-b6cd-3a4343b82618" path="/var/lib/kubelet/pods/98200def-f0ee-4a8a-b6cd-3a4343b82618/volumes" Dec 11 08:32:48 crc kubenswrapper[4881]: I1211 08:32:48.020809 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-controller-manager-5646b5c6f5-clxl4" Dec 11 08:32:48 crc kubenswrapper[4881]: I1211 08:32:48.762290 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/frr-k8s-xj6xk"] Dec 11 08:32:48 crc kubenswrapper[4881]: E1211 08:32:48.762689 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="98200def-f0ee-4a8a-b6cd-3a4343b82618" containerName="extract-content" Dec 11 08:32:48 crc kubenswrapper[4881]: I1211 08:32:48.762720 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="98200def-f0ee-4a8a-b6cd-3a4343b82618" containerName="extract-content" Dec 11 08:32:48 crc kubenswrapper[4881]: E1211 08:32:48.762747 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="98200def-f0ee-4a8a-b6cd-3a4343b82618" containerName="registry-server" Dec 11 08:32:48 crc kubenswrapper[4881]: I1211 08:32:48.762755 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="98200def-f0ee-4a8a-b6cd-3a4343b82618" containerName="registry-server" Dec 11 08:32:48 crc kubenswrapper[4881]: E1211 08:32:48.762768 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="98200def-f0ee-4a8a-b6cd-3a4343b82618" containerName="extract-utilities" Dec 11 08:32:48 crc kubenswrapper[4881]: I1211 08:32:48.762778 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="98200def-f0ee-4a8a-b6cd-3a4343b82618" containerName="extract-utilities" Dec 11 08:32:48 crc kubenswrapper[4881]: I1211 08:32:48.762943 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="98200def-f0ee-4a8a-b6cd-3a4343b82618" containerName="registry-server" Dec 11 08:32:48 crc kubenswrapper[4881]: I1211 08:32:48.770211 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-xj6xk" Dec 11 08:32:48 crc kubenswrapper[4881]: I1211 08:32:48.776362 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/frr-k8s-webhook-server-7784b6fcf-nkdkx"] Dec 11 08:32:48 crc kubenswrapper[4881]: I1211 08:32:48.779388 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-webhook-server-7784b6fcf-nkdkx" Dec 11 08:32:48 crc kubenswrapper[4881]: I1211 08:32:48.779442 4881 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-certs-secret" Dec 11 08:32:48 crc kubenswrapper[4881]: I1211 08:32:48.780046 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"frr-startup" Dec 11 08:32:48 crc kubenswrapper[4881]: I1211 08:32:48.780144 4881 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-daemon-dockercfg-6xw2n" Dec 11 08:32:48 crc kubenswrapper[4881]: I1211 08:32:48.786382 4881 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-webhook-server-cert" Dec 11 08:32:48 crc kubenswrapper[4881]: I1211 08:32:48.794979 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/frr-k8s-webhook-server-7784b6fcf-nkdkx"] Dec 11 08:32:48 crc kubenswrapper[4881]: I1211 08:32:48.912313 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/30393d1e-8b58-4cb7-9e45-23b5b79f235e-metrics\") pod \"frr-k8s-xj6xk\" (UID: \"30393d1e-8b58-4cb7-9e45-23b5b79f235e\") " pod="metallb-system/frr-k8s-xj6xk" Dec 11 08:32:48 crc kubenswrapper[4881]: I1211 08:32:48.912380 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/30393d1e-8b58-4cb7-9e45-23b5b79f235e-frr-conf\") pod \"frr-k8s-xj6xk\" (UID: \"30393d1e-8b58-4cb7-9e45-23b5b79f235e\") " pod="metallb-system/frr-k8s-xj6xk" Dec 11 08:32:48 crc kubenswrapper[4881]: I1211 08:32:48.912409 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/52d03d01-bd10-4a71-993f-284fa256ebae-cert\") pod \"frr-k8s-webhook-server-7784b6fcf-nkdkx\" (UID: \"52d03d01-bd10-4a71-993f-284fa256ebae\") " pod="metallb-system/frr-k8s-webhook-server-7784b6fcf-nkdkx" Dec 11 08:32:48 crc kubenswrapper[4881]: I1211 08:32:48.912448 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fzmmp\" (UniqueName: \"kubernetes.io/projected/30393d1e-8b58-4cb7-9e45-23b5b79f235e-kube-api-access-fzmmp\") pod \"frr-k8s-xj6xk\" (UID: \"30393d1e-8b58-4cb7-9e45-23b5b79f235e\") " pod="metallb-system/frr-k8s-xj6xk" Dec 11 08:32:48 crc kubenswrapper[4881]: I1211 08:32:48.912503 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/30393d1e-8b58-4cb7-9e45-23b5b79f235e-frr-sockets\") pod \"frr-k8s-xj6xk\" (UID: \"30393d1e-8b58-4cb7-9e45-23b5b79f235e\") " pod="metallb-system/frr-k8s-xj6xk" Dec 11 08:32:48 crc kubenswrapper[4881]: I1211 08:32:48.912529 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/30393d1e-8b58-4cb7-9e45-23b5b79f235e-frr-startup\") pod \"frr-k8s-xj6xk\" (UID: \"30393d1e-8b58-4cb7-9e45-23b5b79f235e\") " pod="metallb-system/frr-k8s-xj6xk" Dec 11 08:32:48 crc kubenswrapper[4881]: I1211 08:32:48.912557 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/30393d1e-8b58-4cb7-9e45-23b5b79f235e-reloader\") pod \"frr-k8s-xj6xk\" (UID: \"30393d1e-8b58-4cb7-9e45-23b5b79f235e\") " pod="metallb-system/frr-k8s-xj6xk" Dec 11 08:32:48 crc kubenswrapper[4881]: I1211 08:32:48.912581 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w9fhj\" (UniqueName: \"kubernetes.io/projected/52d03d01-bd10-4a71-993f-284fa256ebae-kube-api-access-w9fhj\") pod \"frr-k8s-webhook-server-7784b6fcf-nkdkx\" (UID: \"52d03d01-bd10-4a71-993f-284fa256ebae\") " pod="metallb-system/frr-k8s-webhook-server-7784b6fcf-nkdkx" Dec 11 08:32:48 crc kubenswrapper[4881]: I1211 08:32:48.912753 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/30393d1e-8b58-4cb7-9e45-23b5b79f235e-metrics-certs\") pod \"frr-k8s-xj6xk\" (UID: \"30393d1e-8b58-4cb7-9e45-23b5b79f235e\") " pod="metallb-system/frr-k8s-xj6xk" Dec 11 08:32:48 crc kubenswrapper[4881]: I1211 08:32:48.918922 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/speaker-x559l"] Dec 11 08:32:48 crc kubenswrapper[4881]: I1211 08:32:48.920209 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/speaker-x559l" Dec 11 08:32:48 crc kubenswrapper[4881]: I1211 08:32:48.933581 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"metallb-excludel2" Dec 11 08:32:48 crc kubenswrapper[4881]: I1211 08:32:48.933595 4881 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-certs-secret" Dec 11 08:32:48 crc kubenswrapper[4881]: W1211 08:32:48.933595 4881 reflector.go:561] object-"metallb-system"/"metallb-memberlist": failed to list *v1.Secret: secrets "metallb-memberlist" is forbidden: User "system:node:crc" cannot list resource "secrets" in API group "" in the namespace "metallb-system": no relationship found between node 'crc' and this object Dec 11 08:32:48 crc kubenswrapper[4881]: E1211 08:32:48.933763 4881 reflector.go:158] "Unhandled Error" err="object-\"metallb-system\"/\"metallb-memberlist\": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets \"metallb-memberlist\" is forbidden: User \"system:node:crc\" cannot list resource \"secrets\" in API group \"\" in the namespace \"metallb-system\": no relationship found between node 'crc' and this object" logger="UnhandledError" Dec 11 08:32:48 crc kubenswrapper[4881]: I1211 08:32:48.933871 4881 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-dockercfg-xr24b" Dec 11 08:32:48 crc kubenswrapper[4881]: I1211 08:32:48.959431 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/controller-5bddd4b946-mzlq2"] Dec 11 08:32:48 crc kubenswrapper[4881]: I1211 08:32:48.960689 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/controller-5bddd4b946-mzlq2" Dec 11 08:32:48 crc kubenswrapper[4881]: I1211 08:32:48.963563 4881 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-certs-secret" Dec 11 08:32:48 crc kubenswrapper[4881]: I1211 08:32:48.983850 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/controller-5bddd4b946-mzlq2"] Dec 11 08:32:49 crc kubenswrapper[4881]: I1211 08:32:49.046572 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/30393d1e-8b58-4cb7-9e45-23b5b79f235e-reloader\") pod \"frr-k8s-xj6xk\" (UID: \"30393d1e-8b58-4cb7-9e45-23b5b79f235e\") " pod="metallb-system/frr-k8s-xj6xk" Dec 11 08:32:49 crc kubenswrapper[4881]: I1211 08:32:49.046623 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w9fhj\" (UniqueName: \"kubernetes.io/projected/52d03d01-bd10-4a71-993f-284fa256ebae-kube-api-access-w9fhj\") pod \"frr-k8s-webhook-server-7784b6fcf-nkdkx\" (UID: \"52d03d01-bd10-4a71-993f-284fa256ebae\") " pod="metallb-system/frr-k8s-webhook-server-7784b6fcf-nkdkx" Dec 11 08:32:49 crc kubenswrapper[4881]: I1211 08:32:49.046869 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/3a0e36ce-d2bd-4725-bc4a-e37ea057b5c3-metrics-certs\") pod \"speaker-x559l\" (UID: \"3a0e36ce-d2bd-4725-bc4a-e37ea057b5c3\") " pod="metallb-system/speaker-x559l" Dec 11 08:32:49 crc kubenswrapper[4881]: I1211 08:32:49.046893 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/30393d1e-8b58-4cb7-9e45-23b5b79f235e-metrics-certs\") pod \"frr-k8s-xj6xk\" (UID: \"30393d1e-8b58-4cb7-9e45-23b5b79f235e\") " pod="metallb-system/frr-k8s-xj6xk" Dec 11 08:32:49 crc kubenswrapper[4881]: I1211 08:32:49.046937 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fn6gz\" (UniqueName: \"kubernetes.io/projected/3a0e36ce-d2bd-4725-bc4a-e37ea057b5c3-kube-api-access-fn6gz\") pod \"speaker-x559l\" (UID: \"3a0e36ce-d2bd-4725-bc4a-e37ea057b5c3\") " pod="metallb-system/speaker-x559l" Dec 11 08:32:49 crc kubenswrapper[4881]: I1211 08:32:49.046968 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/30393d1e-8b58-4cb7-9e45-23b5b79f235e-metrics\") pod \"frr-k8s-xj6xk\" (UID: \"30393d1e-8b58-4cb7-9e45-23b5b79f235e\") " pod="metallb-system/frr-k8s-xj6xk" Dec 11 08:32:49 crc kubenswrapper[4881]: I1211 08:32:49.046991 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/30393d1e-8b58-4cb7-9e45-23b5b79f235e-frr-conf\") pod \"frr-k8s-xj6xk\" (UID: \"30393d1e-8b58-4cb7-9e45-23b5b79f235e\") " pod="metallb-system/frr-k8s-xj6xk" Dec 11 08:32:49 crc kubenswrapper[4881]: I1211 08:32:49.047025 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/52d03d01-bd10-4a71-993f-284fa256ebae-cert\") pod \"frr-k8s-webhook-server-7784b6fcf-nkdkx\" (UID: \"52d03d01-bd10-4a71-993f-284fa256ebae\") " pod="metallb-system/frr-k8s-webhook-server-7784b6fcf-nkdkx" Dec 11 08:32:49 crc kubenswrapper[4881]: I1211 08:32:49.047047 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/3a0e36ce-d2bd-4725-bc4a-e37ea057b5c3-metallb-excludel2\") pod \"speaker-x559l\" (UID: \"3a0e36ce-d2bd-4725-bc4a-e37ea057b5c3\") " pod="metallb-system/speaker-x559l" Dec 11 08:32:49 crc kubenswrapper[4881]: I1211 08:32:49.047087 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fzmmp\" (UniqueName: \"kubernetes.io/projected/30393d1e-8b58-4cb7-9e45-23b5b79f235e-kube-api-access-fzmmp\") pod \"frr-k8s-xj6xk\" (UID: \"30393d1e-8b58-4cb7-9e45-23b5b79f235e\") " pod="metallb-system/frr-k8s-xj6xk" Dec 11 08:32:49 crc kubenswrapper[4881]: I1211 08:32:49.047113 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/30393d1e-8b58-4cb7-9e45-23b5b79f235e-frr-sockets\") pod \"frr-k8s-xj6xk\" (UID: \"30393d1e-8b58-4cb7-9e45-23b5b79f235e\") " pod="metallb-system/frr-k8s-xj6xk" Dec 11 08:32:49 crc kubenswrapper[4881]: I1211 08:32:49.047146 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/30393d1e-8b58-4cb7-9e45-23b5b79f235e-frr-startup\") pod \"frr-k8s-xj6xk\" (UID: \"30393d1e-8b58-4cb7-9e45-23b5b79f235e\") " pod="metallb-system/frr-k8s-xj6xk" Dec 11 08:32:49 crc kubenswrapper[4881]: I1211 08:32:49.047174 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/3a0e36ce-d2bd-4725-bc4a-e37ea057b5c3-memberlist\") pod \"speaker-x559l\" (UID: \"3a0e36ce-d2bd-4725-bc4a-e37ea057b5c3\") " pod="metallb-system/speaker-x559l" Dec 11 08:32:49 crc kubenswrapper[4881]: I1211 08:32:49.047598 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/30393d1e-8b58-4cb7-9e45-23b5b79f235e-reloader\") pod \"frr-k8s-xj6xk\" (UID: \"30393d1e-8b58-4cb7-9e45-23b5b79f235e\") " pod="metallb-system/frr-k8s-xj6xk" Dec 11 08:32:49 crc kubenswrapper[4881]: I1211 08:32:49.047781 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/30393d1e-8b58-4cb7-9e45-23b5b79f235e-frr-sockets\") pod \"frr-k8s-xj6xk\" (UID: \"30393d1e-8b58-4cb7-9e45-23b5b79f235e\") " pod="metallb-system/frr-k8s-xj6xk" Dec 11 08:32:49 crc kubenswrapper[4881]: I1211 08:32:49.048796 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/30393d1e-8b58-4cb7-9e45-23b5b79f235e-frr-conf\") pod \"frr-k8s-xj6xk\" (UID: \"30393d1e-8b58-4cb7-9e45-23b5b79f235e\") " pod="metallb-system/frr-k8s-xj6xk" Dec 11 08:32:49 crc kubenswrapper[4881]: I1211 08:32:49.048843 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/30393d1e-8b58-4cb7-9e45-23b5b79f235e-metrics\") pod \"frr-k8s-xj6xk\" (UID: \"30393d1e-8b58-4cb7-9e45-23b5b79f235e\") " pod="metallb-system/frr-k8s-xj6xk" Dec 11 08:32:49 crc kubenswrapper[4881]: I1211 08:32:49.060139 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/30393d1e-8b58-4cb7-9e45-23b5b79f235e-frr-startup\") pod \"frr-k8s-xj6xk\" (UID: \"30393d1e-8b58-4cb7-9e45-23b5b79f235e\") " pod="metallb-system/frr-k8s-xj6xk" Dec 11 08:32:49 crc kubenswrapper[4881]: I1211 08:32:49.063021 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/30393d1e-8b58-4cb7-9e45-23b5b79f235e-metrics-certs\") pod \"frr-k8s-xj6xk\" (UID: \"30393d1e-8b58-4cb7-9e45-23b5b79f235e\") " pod="metallb-system/frr-k8s-xj6xk" Dec 11 08:32:49 crc kubenswrapper[4881]: I1211 08:32:49.063028 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/52d03d01-bd10-4a71-993f-284fa256ebae-cert\") pod \"frr-k8s-webhook-server-7784b6fcf-nkdkx\" (UID: \"52d03d01-bd10-4a71-993f-284fa256ebae\") " pod="metallb-system/frr-k8s-webhook-server-7784b6fcf-nkdkx" Dec 11 08:32:49 crc kubenswrapper[4881]: I1211 08:32:49.070976 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w9fhj\" (UniqueName: \"kubernetes.io/projected/52d03d01-bd10-4a71-993f-284fa256ebae-kube-api-access-w9fhj\") pod \"frr-k8s-webhook-server-7784b6fcf-nkdkx\" (UID: \"52d03d01-bd10-4a71-993f-284fa256ebae\") " pod="metallb-system/frr-k8s-webhook-server-7784b6fcf-nkdkx" Dec 11 08:32:49 crc kubenswrapper[4881]: I1211 08:32:49.074979 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fzmmp\" (UniqueName: \"kubernetes.io/projected/30393d1e-8b58-4cb7-9e45-23b5b79f235e-kube-api-access-fzmmp\") pod \"frr-k8s-xj6xk\" (UID: \"30393d1e-8b58-4cb7-9e45-23b5b79f235e\") " pod="metallb-system/frr-k8s-xj6xk" Dec 11 08:32:49 crc kubenswrapper[4881]: I1211 08:32:49.095762 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-xj6xk" Dec 11 08:32:49 crc kubenswrapper[4881]: I1211 08:32:49.112935 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-webhook-server-7784b6fcf-nkdkx" Dec 11 08:32:49 crc kubenswrapper[4881]: I1211 08:32:49.151209 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/3a0e36ce-d2bd-4725-bc4a-e37ea057b5c3-metallb-excludel2\") pod \"speaker-x559l\" (UID: \"3a0e36ce-d2bd-4725-bc4a-e37ea057b5c3\") " pod="metallb-system/speaker-x559l" Dec 11 08:32:49 crc kubenswrapper[4881]: I1211 08:32:49.151268 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/1f002df3-6d4a-4f05-8ef8-07bc16590076-metrics-certs\") pod \"controller-5bddd4b946-mzlq2\" (UID: \"1f002df3-6d4a-4f05-8ef8-07bc16590076\") " pod="metallb-system/controller-5bddd4b946-mzlq2" Dec 11 08:32:49 crc kubenswrapper[4881]: I1211 08:32:49.151363 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/3a0e36ce-d2bd-4725-bc4a-e37ea057b5c3-memberlist\") pod \"speaker-x559l\" (UID: \"3a0e36ce-d2bd-4725-bc4a-e37ea057b5c3\") " pod="metallb-system/speaker-x559l" Dec 11 08:32:49 crc kubenswrapper[4881]: I1211 08:32:49.151409 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/3a0e36ce-d2bd-4725-bc4a-e37ea057b5c3-metrics-certs\") pod \"speaker-x559l\" (UID: \"3a0e36ce-d2bd-4725-bc4a-e37ea057b5c3\") " pod="metallb-system/speaker-x559l" Dec 11 08:32:49 crc kubenswrapper[4881]: I1211 08:32:49.151432 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tvq2x\" (UniqueName: \"kubernetes.io/projected/1f002df3-6d4a-4f05-8ef8-07bc16590076-kube-api-access-tvq2x\") pod \"controller-5bddd4b946-mzlq2\" (UID: \"1f002df3-6d4a-4f05-8ef8-07bc16590076\") " pod="metallb-system/controller-5bddd4b946-mzlq2" Dec 11 08:32:49 crc kubenswrapper[4881]: I1211 08:32:49.151459 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/1f002df3-6d4a-4f05-8ef8-07bc16590076-cert\") pod \"controller-5bddd4b946-mzlq2\" (UID: \"1f002df3-6d4a-4f05-8ef8-07bc16590076\") " pod="metallb-system/controller-5bddd4b946-mzlq2" Dec 11 08:32:49 crc kubenswrapper[4881]: I1211 08:32:49.151539 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fn6gz\" (UniqueName: \"kubernetes.io/projected/3a0e36ce-d2bd-4725-bc4a-e37ea057b5c3-kube-api-access-fn6gz\") pod \"speaker-x559l\" (UID: \"3a0e36ce-d2bd-4725-bc4a-e37ea057b5c3\") " pod="metallb-system/speaker-x559l" Dec 11 08:32:49 crc kubenswrapper[4881]: E1211 08:32:49.152004 4881 secret.go:188] Couldn't get secret metallb-system/speaker-certs-secret: secret "speaker-certs-secret" not found Dec 11 08:32:49 crc kubenswrapper[4881]: E1211 08:32:49.152137 4881 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/3a0e36ce-d2bd-4725-bc4a-e37ea057b5c3-metrics-certs podName:3a0e36ce-d2bd-4725-bc4a-e37ea057b5c3 nodeName:}" failed. No retries permitted until 2025-12-11 08:32:49.652053537 +0000 UTC m=+1018.029422234 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/3a0e36ce-d2bd-4725-bc4a-e37ea057b5c3-metrics-certs") pod "speaker-x559l" (UID: "3a0e36ce-d2bd-4725-bc4a-e37ea057b5c3") : secret "speaker-certs-secret" not found Dec 11 08:32:49 crc kubenswrapper[4881]: I1211 08:32:49.152174 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/3a0e36ce-d2bd-4725-bc4a-e37ea057b5c3-metallb-excludel2\") pod \"speaker-x559l\" (UID: \"3a0e36ce-d2bd-4725-bc4a-e37ea057b5c3\") " pod="metallb-system/speaker-x559l" Dec 11 08:32:49 crc kubenswrapper[4881]: I1211 08:32:49.175936 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fn6gz\" (UniqueName: \"kubernetes.io/projected/3a0e36ce-d2bd-4725-bc4a-e37ea057b5c3-kube-api-access-fn6gz\") pod \"speaker-x559l\" (UID: \"3a0e36ce-d2bd-4725-bc4a-e37ea057b5c3\") " pod="metallb-system/speaker-x559l" Dec 11 08:32:49 crc kubenswrapper[4881]: I1211 08:32:49.253275 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tvq2x\" (UniqueName: \"kubernetes.io/projected/1f002df3-6d4a-4f05-8ef8-07bc16590076-kube-api-access-tvq2x\") pod \"controller-5bddd4b946-mzlq2\" (UID: \"1f002df3-6d4a-4f05-8ef8-07bc16590076\") " pod="metallb-system/controller-5bddd4b946-mzlq2" Dec 11 08:32:49 crc kubenswrapper[4881]: I1211 08:32:49.253575 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/1f002df3-6d4a-4f05-8ef8-07bc16590076-cert\") pod \"controller-5bddd4b946-mzlq2\" (UID: \"1f002df3-6d4a-4f05-8ef8-07bc16590076\") " pod="metallb-system/controller-5bddd4b946-mzlq2" Dec 11 08:32:49 crc kubenswrapper[4881]: I1211 08:32:49.254606 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/1f002df3-6d4a-4f05-8ef8-07bc16590076-metrics-certs\") pod \"controller-5bddd4b946-mzlq2\" (UID: \"1f002df3-6d4a-4f05-8ef8-07bc16590076\") " pod="metallb-system/controller-5bddd4b946-mzlq2" Dec 11 08:32:49 crc kubenswrapper[4881]: I1211 08:32:49.257023 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/1f002df3-6d4a-4f05-8ef8-07bc16590076-cert\") pod \"controller-5bddd4b946-mzlq2\" (UID: \"1f002df3-6d4a-4f05-8ef8-07bc16590076\") " pod="metallb-system/controller-5bddd4b946-mzlq2" Dec 11 08:32:49 crc kubenswrapper[4881]: I1211 08:32:49.259103 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/1f002df3-6d4a-4f05-8ef8-07bc16590076-metrics-certs\") pod \"controller-5bddd4b946-mzlq2\" (UID: \"1f002df3-6d4a-4f05-8ef8-07bc16590076\") " pod="metallb-system/controller-5bddd4b946-mzlq2" Dec 11 08:32:49 crc kubenswrapper[4881]: I1211 08:32:49.271541 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tvq2x\" (UniqueName: \"kubernetes.io/projected/1f002df3-6d4a-4f05-8ef8-07bc16590076-kube-api-access-tvq2x\") pod \"controller-5bddd4b946-mzlq2\" (UID: \"1f002df3-6d4a-4f05-8ef8-07bc16590076\") " pod="metallb-system/controller-5bddd4b946-mzlq2" Dec 11 08:32:49 crc kubenswrapper[4881]: I1211 08:32:49.278607 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/controller-5bddd4b946-mzlq2" Dec 11 08:32:49 crc kubenswrapper[4881]: I1211 08:32:49.642627 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/frr-k8s-webhook-server-7784b6fcf-nkdkx"] Dec 11 08:32:49 crc kubenswrapper[4881]: W1211 08:32:49.645728 4881 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod52d03d01_bd10_4a71_993f_284fa256ebae.slice/crio-2e7ea765d2744655d4d51e97d4e36e5623c2f48c22b7859875f698f0356017b9 WatchSource:0}: Error finding container 2e7ea765d2744655d4d51e97d4e36e5623c2f48c22b7859875f698f0356017b9: Status 404 returned error can't find the container with id 2e7ea765d2744655d4d51e97d4e36e5623c2f48c22b7859875f698f0356017b9 Dec 11 08:32:49 crc kubenswrapper[4881]: I1211 08:32:49.660961 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/3a0e36ce-d2bd-4725-bc4a-e37ea057b5c3-metrics-certs\") pod \"speaker-x559l\" (UID: \"3a0e36ce-d2bd-4725-bc4a-e37ea057b5c3\") " pod="metallb-system/speaker-x559l" Dec 11 08:32:49 crc kubenswrapper[4881]: I1211 08:32:49.667646 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/3a0e36ce-d2bd-4725-bc4a-e37ea057b5c3-metrics-certs\") pod \"speaker-x559l\" (UID: \"3a0e36ce-d2bd-4725-bc4a-e37ea057b5c3\") " pod="metallb-system/speaker-x559l" Dec 11 08:32:49 crc kubenswrapper[4881]: W1211 08:32:49.729158 4881 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod1f002df3_6d4a_4f05_8ef8_07bc16590076.slice/crio-4deee65d8d97d6b59cb06a41e7bc528baec38ba167e53e1b8b1c29b94db76291 WatchSource:0}: Error finding container 4deee65d8d97d6b59cb06a41e7bc528baec38ba167e53e1b8b1c29b94db76291: Status 404 returned error can't find the container with id 4deee65d8d97d6b59cb06a41e7bc528baec38ba167e53e1b8b1c29b94db76291 Dec 11 08:32:49 crc kubenswrapper[4881]: I1211 08:32:49.729290 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/controller-5bddd4b946-mzlq2"] Dec 11 08:32:49 crc kubenswrapper[4881]: I1211 08:32:49.973996 4881 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-memberlist" Dec 11 08:32:49 crc kubenswrapper[4881]: I1211 08:32:49.985296 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/3a0e36ce-d2bd-4725-bc4a-e37ea057b5c3-memberlist\") pod \"speaker-x559l\" (UID: \"3a0e36ce-d2bd-4725-bc4a-e37ea057b5c3\") " pod="metallb-system/speaker-x559l" Dec 11 08:32:50 crc kubenswrapper[4881]: I1211 08:32:50.073701 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-5bddd4b946-mzlq2" event={"ID":"1f002df3-6d4a-4f05-8ef8-07bc16590076","Type":"ContainerStarted","Data":"914ca35b8e64271f7a670374d63e5b2bfe8cfe46454bd993f31ad8ae479dc745"} Dec 11 08:32:50 crc kubenswrapper[4881]: I1211 08:32:50.073752 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-5bddd4b946-mzlq2" event={"ID":"1f002df3-6d4a-4f05-8ef8-07bc16590076","Type":"ContainerStarted","Data":"fb2978304540c5eb52473a70dac62cd98a8e240e0a078c4e42f4107edf6f925a"} Dec 11 08:32:50 crc kubenswrapper[4881]: I1211 08:32:50.073768 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-5bddd4b946-mzlq2" event={"ID":"1f002df3-6d4a-4f05-8ef8-07bc16590076","Type":"ContainerStarted","Data":"4deee65d8d97d6b59cb06a41e7bc528baec38ba167e53e1b8b1c29b94db76291"} Dec 11 08:32:50 crc kubenswrapper[4881]: I1211 08:32:50.073818 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/controller-5bddd4b946-mzlq2" Dec 11 08:32:50 crc kubenswrapper[4881]: I1211 08:32:50.076361 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-xj6xk" event={"ID":"30393d1e-8b58-4cb7-9e45-23b5b79f235e","Type":"ContainerStarted","Data":"6f88c8175026dcc7817c80b682c14f2442bef1ec5f532876ee5f6298d470dc5e"} Dec 11 08:32:50 crc kubenswrapper[4881]: I1211 08:32:50.077847 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-webhook-server-7784b6fcf-nkdkx" event={"ID":"52d03d01-bd10-4a71-993f-284fa256ebae","Type":"ContainerStarted","Data":"2e7ea765d2744655d4d51e97d4e36e5623c2f48c22b7859875f698f0356017b9"} Dec 11 08:32:50 crc kubenswrapper[4881]: I1211 08:32:50.104698 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/controller-5bddd4b946-mzlq2" podStartSLOduration=2.104676781 podStartE2EDuration="2.104676781s" podCreationTimestamp="2025-12-11 08:32:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 08:32:50.101855851 +0000 UTC m=+1018.479224548" watchObservedRunningTime="2025-12-11 08:32:50.104676781 +0000 UTC m=+1018.482045478" Dec 11 08:32:50 crc kubenswrapper[4881]: I1211 08:32:50.132874 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/speaker-x559l" Dec 11 08:32:50 crc kubenswrapper[4881]: W1211 08:32:50.158417 4881 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3a0e36ce_d2bd_4725_bc4a_e37ea057b5c3.slice/crio-8af096a0ebf8c806b0692a6d4f1aa9950a6be6a3b9b835c6b051daef61a99b20 WatchSource:0}: Error finding container 8af096a0ebf8c806b0692a6d4f1aa9950a6be6a3b9b835c6b051daef61a99b20: Status 404 returned error can't find the container with id 8af096a0ebf8c806b0692a6d4f1aa9950a6be6a3b9b835c6b051daef61a99b20 Dec 11 08:32:51 crc kubenswrapper[4881]: I1211 08:32:51.102278 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-x559l" event={"ID":"3a0e36ce-d2bd-4725-bc4a-e37ea057b5c3","Type":"ContainerStarted","Data":"6fe8f31843acad6bb52aaafd06bfe7e25572c664601330dfc576dc937b52e4ed"} Dec 11 08:32:51 crc kubenswrapper[4881]: I1211 08:32:51.102643 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-x559l" event={"ID":"3a0e36ce-d2bd-4725-bc4a-e37ea057b5c3","Type":"ContainerStarted","Data":"ed2af11603ce09f5ebddecc28d8a08c9d3f0351da3fe19fe9ab0b3b1db247b69"} Dec 11 08:32:51 crc kubenswrapper[4881]: I1211 08:32:51.102663 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-x559l" event={"ID":"3a0e36ce-d2bd-4725-bc4a-e37ea057b5c3","Type":"ContainerStarted","Data":"8af096a0ebf8c806b0692a6d4f1aa9950a6be6a3b9b835c6b051daef61a99b20"} Dec 11 08:32:51 crc kubenswrapper[4881]: I1211 08:32:51.102873 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/speaker-x559l" Dec 11 08:32:51 crc kubenswrapper[4881]: I1211 08:32:51.129098 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/speaker-x559l" podStartSLOduration=3.129079382 podStartE2EDuration="3.129079382s" podCreationTimestamp="2025-12-11 08:32:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 08:32:51.125246467 +0000 UTC m=+1019.502615164" watchObservedRunningTime="2025-12-11 08:32:51.129079382 +0000 UTC m=+1019.506448079" Dec 11 08:32:57 crc kubenswrapper[4881]: I1211 08:32:57.151840 4881 generic.go:334] "Generic (PLEG): container finished" podID="30393d1e-8b58-4cb7-9e45-23b5b79f235e" containerID="1f3a62def37d545f566d9af3bd3f256483c12cc5d966632686102d1504c16b08" exitCode=0 Dec 11 08:32:57 crc kubenswrapper[4881]: I1211 08:32:57.152314 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-xj6xk" event={"ID":"30393d1e-8b58-4cb7-9e45-23b5b79f235e","Type":"ContainerDied","Data":"1f3a62def37d545f566d9af3bd3f256483c12cc5d966632686102d1504c16b08"} Dec 11 08:32:57 crc kubenswrapper[4881]: I1211 08:32:57.156523 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-webhook-server-7784b6fcf-nkdkx" event={"ID":"52d03d01-bd10-4a71-993f-284fa256ebae","Type":"ContainerStarted","Data":"04cb998730ef7695127f33fd0f56ec332b67e520ab85840541256c482fae6167"} Dec 11 08:32:57 crc kubenswrapper[4881]: I1211 08:32:57.156820 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/frr-k8s-webhook-server-7784b6fcf-nkdkx" Dec 11 08:32:57 crc kubenswrapper[4881]: I1211 08:32:57.198382 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/frr-k8s-webhook-server-7784b6fcf-nkdkx" podStartSLOduration=1.927287022 podStartE2EDuration="9.198362967s" podCreationTimestamp="2025-12-11 08:32:48 +0000 UTC" firstStartedPulling="2025-12-11 08:32:49.647840966 +0000 UTC m=+1018.025209673" lastFinishedPulling="2025-12-11 08:32:56.918916921 +0000 UTC m=+1025.296285618" observedRunningTime="2025-12-11 08:32:57.196880871 +0000 UTC m=+1025.574249568" watchObservedRunningTime="2025-12-11 08:32:57.198362967 +0000 UTC m=+1025.575731654" Dec 11 08:32:58 crc kubenswrapper[4881]: I1211 08:32:58.168111 4881 generic.go:334] "Generic (PLEG): container finished" podID="30393d1e-8b58-4cb7-9e45-23b5b79f235e" containerID="2e9c97803c0cb49c0c0dd75a46a100484979dd62e506034ee9d0886f3c7902c6" exitCode=0 Dec 11 08:32:58 crc kubenswrapper[4881]: I1211 08:32:58.168184 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-xj6xk" event={"ID":"30393d1e-8b58-4cb7-9e45-23b5b79f235e","Type":"ContainerDied","Data":"2e9c97803c0cb49c0c0dd75a46a100484979dd62e506034ee9d0886f3c7902c6"} Dec 11 08:32:59 crc kubenswrapper[4881]: I1211 08:32:59.183211 4881 generic.go:334] "Generic (PLEG): container finished" podID="30393d1e-8b58-4cb7-9e45-23b5b79f235e" containerID="4a5280b8c78a1be89686d455c35591fffa5acb07adc7e1f46191b2b6cf310c8c" exitCode=0 Dec 11 08:32:59 crc kubenswrapper[4881]: I1211 08:32:59.183326 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-xj6xk" event={"ID":"30393d1e-8b58-4cb7-9e45-23b5b79f235e","Type":"ContainerDied","Data":"4a5280b8c78a1be89686d455c35591fffa5acb07adc7e1f46191b2b6cf310c8c"} Dec 11 08:32:59 crc kubenswrapper[4881]: I1211 08:32:59.283468 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/controller-5bddd4b946-mzlq2" Dec 11 08:32:59 crc kubenswrapper[4881]: I1211 08:32:59.397351 4881 patch_prober.go:28] interesting pod/machine-config-daemon-z9nnh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 11 08:32:59 crc kubenswrapper[4881]: I1211 08:32:59.397491 4881 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 11 08:33:00 crc kubenswrapper[4881]: I1211 08:33:00.136662 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/speaker-x559l" Dec 11 08:33:00 crc kubenswrapper[4881]: I1211 08:33:00.205722 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-xj6xk" event={"ID":"30393d1e-8b58-4cb7-9e45-23b5b79f235e","Type":"ContainerStarted","Data":"c1f5a2c04105e0369d1b342b63bc6b472a652424734ad90966cbeddd9c59d489"} Dec 11 08:33:00 crc kubenswrapper[4881]: I1211 08:33:00.205800 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-xj6xk" event={"ID":"30393d1e-8b58-4cb7-9e45-23b5b79f235e","Type":"ContainerStarted","Data":"f1b10547144e288156598eb5907c2dd3c0153194d178ecde121cf45c2ef7b165"} Dec 11 08:33:00 crc kubenswrapper[4881]: I1211 08:33:00.205823 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-xj6xk" event={"ID":"30393d1e-8b58-4cb7-9e45-23b5b79f235e","Type":"ContainerStarted","Data":"8a4a056188942e22ddaf367328cc9aa5bc4befba2a94bad89a0a24d3ed5b30b0"} Dec 11 08:33:00 crc kubenswrapper[4881]: I1211 08:33:00.205944 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-xj6xk" event={"ID":"30393d1e-8b58-4cb7-9e45-23b5b79f235e","Type":"ContainerStarted","Data":"72e373d88ce66bcc13bd7420052d0d6752d5e960e7fa22e3f4abb969cb352396"} Dec 11 08:33:00 crc kubenswrapper[4881]: I1211 08:33:00.205969 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-xj6xk" event={"ID":"30393d1e-8b58-4cb7-9e45-23b5b79f235e","Type":"ContainerStarted","Data":"44ab8ed33ca48a8837a6437a7b6d8e2f1865176bb9089f56c8686bb3935ec058"} Dec 11 08:33:01 crc kubenswrapper[4881]: I1211 08:33:01.222905 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-xj6xk" event={"ID":"30393d1e-8b58-4cb7-9e45-23b5b79f235e","Type":"ContainerStarted","Data":"c63247a1fa0e330930932530f7f6520040089fa3e6466044dae1f7cc5c5d16e3"} Dec 11 08:33:01 crc kubenswrapper[4881]: I1211 08:33:01.223409 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/frr-k8s-xj6xk" Dec 11 08:33:01 crc kubenswrapper[4881]: I1211 08:33:01.247865 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/frr-k8s-xj6xk" podStartSLOduration=5.709293194 podStartE2EDuration="13.247844078s" podCreationTimestamp="2025-12-11 08:32:48 +0000 UTC" firstStartedPulling="2025-12-11 08:32:49.362596447 +0000 UTC m=+1017.739965144" lastFinishedPulling="2025-12-11 08:32:56.901147331 +0000 UTC m=+1025.278516028" observedRunningTime="2025-12-11 08:33:01.247177221 +0000 UTC m=+1029.624545958" watchObservedRunningTime="2025-12-11 08:33:01.247844078 +0000 UTC m=+1029.625212785" Dec 11 08:33:03 crc kubenswrapper[4881]: I1211 08:33:03.025357 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-index-42zj2"] Dec 11 08:33:03 crc kubenswrapper[4881]: I1211 08:33:03.026702 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-42zj2" Dec 11 08:33:03 crc kubenswrapper[4881]: I1211 08:33:03.028405 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"kube-root-ca.crt" Dec 11 08:33:03 crc kubenswrapper[4881]: I1211 08:33:03.029409 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-index-dockercfg-pmxqj" Dec 11 08:33:03 crc kubenswrapper[4881]: I1211 08:33:03.029696 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"openshift-service-ca.crt" Dec 11 08:33:03 crc kubenswrapper[4881]: I1211 08:33:03.041092 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-42zj2"] Dec 11 08:33:03 crc kubenswrapper[4881]: I1211 08:33:03.106480 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4t8nd\" (UniqueName: \"kubernetes.io/projected/e8d44b6f-86e6-4542-b371-a7d846583b1c-kube-api-access-4t8nd\") pod \"openstack-operator-index-42zj2\" (UID: \"e8d44b6f-86e6-4542-b371-a7d846583b1c\") " pod="openstack-operators/openstack-operator-index-42zj2" Dec 11 08:33:03 crc kubenswrapper[4881]: I1211 08:33:03.207881 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4t8nd\" (UniqueName: \"kubernetes.io/projected/e8d44b6f-86e6-4542-b371-a7d846583b1c-kube-api-access-4t8nd\") pod \"openstack-operator-index-42zj2\" (UID: \"e8d44b6f-86e6-4542-b371-a7d846583b1c\") " pod="openstack-operators/openstack-operator-index-42zj2" Dec 11 08:33:03 crc kubenswrapper[4881]: I1211 08:33:03.225665 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4t8nd\" (UniqueName: \"kubernetes.io/projected/e8d44b6f-86e6-4542-b371-a7d846583b1c-kube-api-access-4t8nd\") pod \"openstack-operator-index-42zj2\" (UID: \"e8d44b6f-86e6-4542-b371-a7d846583b1c\") " pod="openstack-operators/openstack-operator-index-42zj2" Dec 11 08:33:03 crc kubenswrapper[4881]: I1211 08:33:03.362485 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-42zj2" Dec 11 08:33:03 crc kubenswrapper[4881]: I1211 08:33:03.680745 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-42zj2"] Dec 11 08:33:03 crc kubenswrapper[4881]: W1211 08:33:03.698808 4881 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pode8d44b6f_86e6_4542_b371_a7d846583b1c.slice/crio-f3f2e5386dac7c133aff78016022faf76190908ee4258a9575335ef864e0674d WatchSource:0}: Error finding container f3f2e5386dac7c133aff78016022faf76190908ee4258a9575335ef864e0674d: Status 404 returned error can't find the container with id f3f2e5386dac7c133aff78016022faf76190908ee4258a9575335ef864e0674d Dec 11 08:33:04 crc kubenswrapper[4881]: I1211 08:33:04.098198 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="metallb-system/frr-k8s-xj6xk" Dec 11 08:33:04 crc kubenswrapper[4881]: I1211 08:33:04.160782 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="metallb-system/frr-k8s-xj6xk" Dec 11 08:33:04 crc kubenswrapper[4881]: I1211 08:33:04.247391 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-42zj2" event={"ID":"e8d44b6f-86e6-4542-b371-a7d846583b1c","Type":"ContainerStarted","Data":"f3f2e5386dac7c133aff78016022faf76190908ee4258a9575335ef864e0674d"} Dec 11 08:33:06 crc kubenswrapper[4881]: I1211 08:33:06.409684 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/openstack-operator-index-42zj2"] Dec 11 08:33:07 crc kubenswrapper[4881]: I1211 08:33:07.022226 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-index-pgrfm"] Dec 11 08:33:07 crc kubenswrapper[4881]: I1211 08:33:07.024628 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-pgrfm" Dec 11 08:33:07 crc kubenswrapper[4881]: I1211 08:33:07.052567 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-pgrfm"] Dec 11 08:33:07 crc kubenswrapper[4881]: I1211 08:33:07.093205 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nnp6p\" (UniqueName: \"kubernetes.io/projected/c8209c30-f66b-4b47-a663-a0dac2ea36dd-kube-api-access-nnp6p\") pod \"openstack-operator-index-pgrfm\" (UID: \"c8209c30-f66b-4b47-a663-a0dac2ea36dd\") " pod="openstack-operators/openstack-operator-index-pgrfm" Dec 11 08:33:07 crc kubenswrapper[4881]: I1211 08:33:07.195569 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nnp6p\" (UniqueName: \"kubernetes.io/projected/c8209c30-f66b-4b47-a663-a0dac2ea36dd-kube-api-access-nnp6p\") pod \"openstack-operator-index-pgrfm\" (UID: \"c8209c30-f66b-4b47-a663-a0dac2ea36dd\") " pod="openstack-operators/openstack-operator-index-pgrfm" Dec 11 08:33:07 crc kubenswrapper[4881]: I1211 08:33:07.226943 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nnp6p\" (UniqueName: \"kubernetes.io/projected/c8209c30-f66b-4b47-a663-a0dac2ea36dd-kube-api-access-nnp6p\") pod \"openstack-operator-index-pgrfm\" (UID: \"c8209c30-f66b-4b47-a663-a0dac2ea36dd\") " pod="openstack-operators/openstack-operator-index-pgrfm" Dec 11 08:33:07 crc kubenswrapper[4881]: I1211 08:33:07.282497 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-42zj2" event={"ID":"e8d44b6f-86e6-4542-b371-a7d846583b1c","Type":"ContainerStarted","Data":"4efdb55493ad78649dbe917b80101734fb534505a2f4fda4b2514d1a14444724"} Dec 11 08:33:07 crc kubenswrapper[4881]: I1211 08:33:07.282645 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack-operators/openstack-operator-index-42zj2" podUID="e8d44b6f-86e6-4542-b371-a7d846583b1c" containerName="registry-server" containerID="cri-o://4efdb55493ad78649dbe917b80101734fb534505a2f4fda4b2514d1a14444724" gracePeriod=2 Dec 11 08:33:07 crc kubenswrapper[4881]: I1211 08:33:07.358554 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-pgrfm" Dec 11 08:33:07 crc kubenswrapper[4881]: I1211 08:33:07.829548 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-42zj2" Dec 11 08:33:07 crc kubenswrapper[4881]: W1211 08:33:07.889100 4881 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc8209c30_f66b_4b47_a663_a0dac2ea36dd.slice/crio-31af9f713d3cb3c6566302bbb6488a4a7a8edbbd7910694a0f4b1a2ca887b449 WatchSource:0}: Error finding container 31af9f713d3cb3c6566302bbb6488a4a7a8edbbd7910694a0f4b1a2ca887b449: Status 404 returned error can't find the container with id 31af9f713d3cb3c6566302bbb6488a4a7a8edbbd7910694a0f4b1a2ca887b449 Dec 11 08:33:07 crc kubenswrapper[4881]: I1211 08:33:07.889514 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-pgrfm"] Dec 11 08:33:07 crc kubenswrapper[4881]: I1211 08:33:07.917779 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4t8nd\" (UniqueName: \"kubernetes.io/projected/e8d44b6f-86e6-4542-b371-a7d846583b1c-kube-api-access-4t8nd\") pod \"e8d44b6f-86e6-4542-b371-a7d846583b1c\" (UID: \"e8d44b6f-86e6-4542-b371-a7d846583b1c\") " Dec 11 08:33:07 crc kubenswrapper[4881]: I1211 08:33:07.922866 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e8d44b6f-86e6-4542-b371-a7d846583b1c-kube-api-access-4t8nd" (OuterVolumeSpecName: "kube-api-access-4t8nd") pod "e8d44b6f-86e6-4542-b371-a7d846583b1c" (UID: "e8d44b6f-86e6-4542-b371-a7d846583b1c"). InnerVolumeSpecName "kube-api-access-4t8nd". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:33:08 crc kubenswrapper[4881]: I1211 08:33:08.019436 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4t8nd\" (UniqueName: \"kubernetes.io/projected/e8d44b6f-86e6-4542-b371-a7d846583b1c-kube-api-access-4t8nd\") on node \"crc\" DevicePath \"\"" Dec 11 08:33:08 crc kubenswrapper[4881]: I1211 08:33:08.291789 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-pgrfm" event={"ID":"c8209c30-f66b-4b47-a663-a0dac2ea36dd","Type":"ContainerStarted","Data":"2c4ae59b88350c61c299f7a9165870c661ba8002125d6680f7165def6255df8d"} Dec 11 08:33:08 crc kubenswrapper[4881]: I1211 08:33:08.291861 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-pgrfm" event={"ID":"c8209c30-f66b-4b47-a663-a0dac2ea36dd","Type":"ContainerStarted","Data":"31af9f713d3cb3c6566302bbb6488a4a7a8edbbd7910694a0f4b1a2ca887b449"} Dec 11 08:33:08 crc kubenswrapper[4881]: I1211 08:33:08.294015 4881 generic.go:334] "Generic (PLEG): container finished" podID="e8d44b6f-86e6-4542-b371-a7d846583b1c" containerID="4efdb55493ad78649dbe917b80101734fb534505a2f4fda4b2514d1a14444724" exitCode=0 Dec 11 08:33:08 crc kubenswrapper[4881]: I1211 08:33:08.294075 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-42zj2" event={"ID":"e8d44b6f-86e6-4542-b371-a7d846583b1c","Type":"ContainerDied","Data":"4efdb55493ad78649dbe917b80101734fb534505a2f4fda4b2514d1a14444724"} Dec 11 08:33:08 crc kubenswrapper[4881]: I1211 08:33:08.294100 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-42zj2" event={"ID":"e8d44b6f-86e6-4542-b371-a7d846583b1c","Type":"ContainerDied","Data":"f3f2e5386dac7c133aff78016022faf76190908ee4258a9575335ef864e0674d"} Dec 11 08:33:08 crc kubenswrapper[4881]: I1211 08:33:08.294125 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-42zj2" Dec 11 08:33:08 crc kubenswrapper[4881]: I1211 08:33:08.294121 4881 scope.go:117] "RemoveContainer" containerID="4efdb55493ad78649dbe917b80101734fb534505a2f4fda4b2514d1a14444724" Dec 11 08:33:08 crc kubenswrapper[4881]: I1211 08:33:08.309652 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-index-pgrfm" podStartSLOduration=2.261473982 podStartE2EDuration="2.309627843s" podCreationTimestamp="2025-12-11 08:33:06 +0000 UTC" firstStartedPulling="2025-12-11 08:33:07.895300141 +0000 UTC m=+1036.272668838" lastFinishedPulling="2025-12-11 08:33:07.943454002 +0000 UTC m=+1036.320822699" observedRunningTime="2025-12-11 08:33:08.307746417 +0000 UTC m=+1036.685115144" watchObservedRunningTime="2025-12-11 08:33:08.309627843 +0000 UTC m=+1036.686996550" Dec 11 08:33:08 crc kubenswrapper[4881]: I1211 08:33:08.324608 4881 scope.go:117] "RemoveContainer" containerID="4efdb55493ad78649dbe917b80101734fb534505a2f4fda4b2514d1a14444724" Dec 11 08:33:08 crc kubenswrapper[4881]: E1211 08:33:08.325290 4881 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4efdb55493ad78649dbe917b80101734fb534505a2f4fda4b2514d1a14444724\": container with ID starting with 4efdb55493ad78649dbe917b80101734fb534505a2f4fda4b2514d1a14444724 not found: ID does not exist" containerID="4efdb55493ad78649dbe917b80101734fb534505a2f4fda4b2514d1a14444724" Dec 11 08:33:08 crc kubenswrapper[4881]: I1211 08:33:08.325366 4881 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4efdb55493ad78649dbe917b80101734fb534505a2f4fda4b2514d1a14444724"} err="failed to get container status \"4efdb55493ad78649dbe917b80101734fb534505a2f4fda4b2514d1a14444724\": rpc error: code = NotFound desc = could not find container \"4efdb55493ad78649dbe917b80101734fb534505a2f4fda4b2514d1a14444724\": container with ID starting with 4efdb55493ad78649dbe917b80101734fb534505a2f4fda4b2514d1a14444724 not found: ID does not exist" Dec 11 08:33:08 crc kubenswrapper[4881]: I1211 08:33:08.334252 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/openstack-operator-index-42zj2"] Dec 11 08:33:08 crc kubenswrapper[4881]: I1211 08:33:08.343684 4881 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack-operators/openstack-operator-index-42zj2"] Dec 11 08:33:09 crc kubenswrapper[4881]: I1211 08:33:09.018690 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e8d44b6f-86e6-4542-b371-a7d846583b1c" path="/var/lib/kubelet/pods/e8d44b6f-86e6-4542-b371-a7d846583b1c/volumes" Dec 11 08:33:09 crc kubenswrapper[4881]: I1211 08:33:09.103402 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/frr-k8s-xj6xk" Dec 11 08:33:09 crc kubenswrapper[4881]: I1211 08:33:09.120689 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/frr-k8s-webhook-server-7784b6fcf-nkdkx" Dec 11 08:33:17 crc kubenswrapper[4881]: I1211 08:33:17.359478 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-index-pgrfm" Dec 11 08:33:17 crc kubenswrapper[4881]: I1211 08:33:17.361369 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack-operators/openstack-operator-index-pgrfm" Dec 11 08:33:17 crc kubenswrapper[4881]: I1211 08:33:17.390673 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack-operators/openstack-operator-index-pgrfm" Dec 11 08:33:17 crc kubenswrapper[4881]: I1211 08:33:17.427667 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-index-pgrfm" Dec 11 08:33:18 crc kubenswrapper[4881]: I1211 08:33:18.710650 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/b69ceae0a3beb1917c3cf44da9e05673c5eae51f07b46c3ef0a964a4f8l65q5"] Dec 11 08:33:18 crc kubenswrapper[4881]: E1211 08:33:18.711323 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e8d44b6f-86e6-4542-b371-a7d846583b1c" containerName="registry-server" Dec 11 08:33:18 crc kubenswrapper[4881]: I1211 08:33:18.711353 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="e8d44b6f-86e6-4542-b371-a7d846583b1c" containerName="registry-server" Dec 11 08:33:18 crc kubenswrapper[4881]: I1211 08:33:18.711589 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="e8d44b6f-86e6-4542-b371-a7d846583b1c" containerName="registry-server" Dec 11 08:33:18 crc kubenswrapper[4881]: I1211 08:33:18.712743 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/b69ceae0a3beb1917c3cf44da9e05673c5eae51f07b46c3ef0a964a4f8l65q5" Dec 11 08:33:18 crc kubenswrapper[4881]: I1211 08:33:18.716385 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"default-dockercfg-nffbq" Dec 11 08:33:18 crc kubenswrapper[4881]: I1211 08:33:18.726425 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/b69ceae0a3beb1917c3cf44da9e05673c5eae51f07b46c3ef0a964a4f8l65q5"] Dec 11 08:33:18 crc kubenswrapper[4881]: I1211 08:33:18.864663 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/63d42923-a2ed-4b92-82ab-e9ca4ad98e55-util\") pod \"b69ceae0a3beb1917c3cf44da9e05673c5eae51f07b46c3ef0a964a4f8l65q5\" (UID: \"63d42923-a2ed-4b92-82ab-e9ca4ad98e55\") " pod="openstack-operators/b69ceae0a3beb1917c3cf44da9e05673c5eae51f07b46c3ef0a964a4f8l65q5" Dec 11 08:33:18 crc kubenswrapper[4881]: I1211 08:33:18.864843 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8wtxh\" (UniqueName: \"kubernetes.io/projected/63d42923-a2ed-4b92-82ab-e9ca4ad98e55-kube-api-access-8wtxh\") pod \"b69ceae0a3beb1917c3cf44da9e05673c5eae51f07b46c3ef0a964a4f8l65q5\" (UID: \"63d42923-a2ed-4b92-82ab-e9ca4ad98e55\") " pod="openstack-operators/b69ceae0a3beb1917c3cf44da9e05673c5eae51f07b46c3ef0a964a4f8l65q5" Dec 11 08:33:18 crc kubenswrapper[4881]: I1211 08:33:18.865025 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/63d42923-a2ed-4b92-82ab-e9ca4ad98e55-bundle\") pod \"b69ceae0a3beb1917c3cf44da9e05673c5eae51f07b46c3ef0a964a4f8l65q5\" (UID: \"63d42923-a2ed-4b92-82ab-e9ca4ad98e55\") " pod="openstack-operators/b69ceae0a3beb1917c3cf44da9e05673c5eae51f07b46c3ef0a964a4f8l65q5" Dec 11 08:33:18 crc kubenswrapper[4881]: I1211 08:33:18.967359 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/63d42923-a2ed-4b92-82ab-e9ca4ad98e55-bundle\") pod \"b69ceae0a3beb1917c3cf44da9e05673c5eae51f07b46c3ef0a964a4f8l65q5\" (UID: \"63d42923-a2ed-4b92-82ab-e9ca4ad98e55\") " pod="openstack-operators/b69ceae0a3beb1917c3cf44da9e05673c5eae51f07b46c3ef0a964a4f8l65q5" Dec 11 08:33:18 crc kubenswrapper[4881]: I1211 08:33:18.967500 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/63d42923-a2ed-4b92-82ab-e9ca4ad98e55-util\") pod \"b69ceae0a3beb1917c3cf44da9e05673c5eae51f07b46c3ef0a964a4f8l65q5\" (UID: \"63d42923-a2ed-4b92-82ab-e9ca4ad98e55\") " pod="openstack-operators/b69ceae0a3beb1917c3cf44da9e05673c5eae51f07b46c3ef0a964a4f8l65q5" Dec 11 08:33:18 crc kubenswrapper[4881]: I1211 08:33:18.967551 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8wtxh\" (UniqueName: \"kubernetes.io/projected/63d42923-a2ed-4b92-82ab-e9ca4ad98e55-kube-api-access-8wtxh\") pod \"b69ceae0a3beb1917c3cf44da9e05673c5eae51f07b46c3ef0a964a4f8l65q5\" (UID: \"63d42923-a2ed-4b92-82ab-e9ca4ad98e55\") " pod="openstack-operators/b69ceae0a3beb1917c3cf44da9e05673c5eae51f07b46c3ef0a964a4f8l65q5" Dec 11 08:33:18 crc kubenswrapper[4881]: I1211 08:33:18.967985 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/63d42923-a2ed-4b92-82ab-e9ca4ad98e55-bundle\") pod \"b69ceae0a3beb1917c3cf44da9e05673c5eae51f07b46c3ef0a964a4f8l65q5\" (UID: \"63d42923-a2ed-4b92-82ab-e9ca4ad98e55\") " pod="openstack-operators/b69ceae0a3beb1917c3cf44da9e05673c5eae51f07b46c3ef0a964a4f8l65q5" Dec 11 08:33:18 crc kubenswrapper[4881]: I1211 08:33:18.968073 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/63d42923-a2ed-4b92-82ab-e9ca4ad98e55-util\") pod \"b69ceae0a3beb1917c3cf44da9e05673c5eae51f07b46c3ef0a964a4f8l65q5\" (UID: \"63d42923-a2ed-4b92-82ab-e9ca4ad98e55\") " pod="openstack-operators/b69ceae0a3beb1917c3cf44da9e05673c5eae51f07b46c3ef0a964a4f8l65q5" Dec 11 08:33:19 crc kubenswrapper[4881]: I1211 08:33:19.000438 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8wtxh\" (UniqueName: \"kubernetes.io/projected/63d42923-a2ed-4b92-82ab-e9ca4ad98e55-kube-api-access-8wtxh\") pod \"b69ceae0a3beb1917c3cf44da9e05673c5eae51f07b46c3ef0a964a4f8l65q5\" (UID: \"63d42923-a2ed-4b92-82ab-e9ca4ad98e55\") " pod="openstack-operators/b69ceae0a3beb1917c3cf44da9e05673c5eae51f07b46c3ef0a964a4f8l65q5" Dec 11 08:33:19 crc kubenswrapper[4881]: I1211 08:33:19.062725 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/b69ceae0a3beb1917c3cf44da9e05673c5eae51f07b46c3ef0a964a4f8l65q5" Dec 11 08:33:19 crc kubenswrapper[4881]: I1211 08:33:19.535074 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/b69ceae0a3beb1917c3cf44da9e05673c5eae51f07b46c3ef0a964a4f8l65q5"] Dec 11 08:33:20 crc kubenswrapper[4881]: I1211 08:33:20.427639 4881 generic.go:334] "Generic (PLEG): container finished" podID="63d42923-a2ed-4b92-82ab-e9ca4ad98e55" containerID="4ee922b809653379cb600874909433370ba7c35668554d5d89382d22dd590a62" exitCode=0 Dec 11 08:33:20 crc kubenswrapper[4881]: I1211 08:33:20.427704 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/b69ceae0a3beb1917c3cf44da9e05673c5eae51f07b46c3ef0a964a4f8l65q5" event={"ID":"63d42923-a2ed-4b92-82ab-e9ca4ad98e55","Type":"ContainerDied","Data":"4ee922b809653379cb600874909433370ba7c35668554d5d89382d22dd590a62"} Dec 11 08:33:20 crc kubenswrapper[4881]: I1211 08:33:20.428041 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/b69ceae0a3beb1917c3cf44da9e05673c5eae51f07b46c3ef0a964a4f8l65q5" event={"ID":"63d42923-a2ed-4b92-82ab-e9ca4ad98e55","Type":"ContainerStarted","Data":"663d761e9376b71227a4b165b60043bccfae2a1e017f74d8ea818c81898924d2"} Dec 11 08:33:21 crc kubenswrapper[4881]: I1211 08:33:21.444835 4881 generic.go:334] "Generic (PLEG): container finished" podID="63d42923-a2ed-4b92-82ab-e9ca4ad98e55" containerID="226e37ac6efe38feb2ad9231cab5b65230ba8bac12768fcf3d6136a1a235f0c2" exitCode=0 Dec 11 08:33:21 crc kubenswrapper[4881]: I1211 08:33:21.444876 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/b69ceae0a3beb1917c3cf44da9e05673c5eae51f07b46c3ef0a964a4f8l65q5" event={"ID":"63d42923-a2ed-4b92-82ab-e9ca4ad98e55","Type":"ContainerDied","Data":"226e37ac6efe38feb2ad9231cab5b65230ba8bac12768fcf3d6136a1a235f0c2"} Dec 11 08:33:22 crc kubenswrapper[4881]: I1211 08:33:22.453409 4881 generic.go:334] "Generic (PLEG): container finished" podID="63d42923-a2ed-4b92-82ab-e9ca4ad98e55" containerID="a996cd3334faefbaba8457809c9df9260b66fc3ffe8d9fc8340ef77652e1775d" exitCode=0 Dec 11 08:33:22 crc kubenswrapper[4881]: I1211 08:33:22.453489 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/b69ceae0a3beb1917c3cf44da9e05673c5eae51f07b46c3ef0a964a4f8l65q5" event={"ID":"63d42923-a2ed-4b92-82ab-e9ca4ad98e55","Type":"ContainerDied","Data":"a996cd3334faefbaba8457809c9df9260b66fc3ffe8d9fc8340ef77652e1775d"} Dec 11 08:33:23 crc kubenswrapper[4881]: I1211 08:33:23.892946 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/b69ceae0a3beb1917c3cf44da9e05673c5eae51f07b46c3ef0a964a4f8l65q5" Dec 11 08:33:23 crc kubenswrapper[4881]: I1211 08:33:23.952640 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/63d42923-a2ed-4b92-82ab-e9ca4ad98e55-util\") pod \"63d42923-a2ed-4b92-82ab-e9ca4ad98e55\" (UID: \"63d42923-a2ed-4b92-82ab-e9ca4ad98e55\") " Dec 11 08:33:23 crc kubenswrapper[4881]: I1211 08:33:23.952837 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8wtxh\" (UniqueName: \"kubernetes.io/projected/63d42923-a2ed-4b92-82ab-e9ca4ad98e55-kube-api-access-8wtxh\") pod \"63d42923-a2ed-4b92-82ab-e9ca4ad98e55\" (UID: \"63d42923-a2ed-4b92-82ab-e9ca4ad98e55\") " Dec 11 08:33:23 crc kubenswrapper[4881]: I1211 08:33:23.952867 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/63d42923-a2ed-4b92-82ab-e9ca4ad98e55-bundle\") pod \"63d42923-a2ed-4b92-82ab-e9ca4ad98e55\" (UID: \"63d42923-a2ed-4b92-82ab-e9ca4ad98e55\") " Dec 11 08:33:23 crc kubenswrapper[4881]: I1211 08:33:23.953585 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/63d42923-a2ed-4b92-82ab-e9ca4ad98e55-bundle" (OuterVolumeSpecName: "bundle") pod "63d42923-a2ed-4b92-82ab-e9ca4ad98e55" (UID: "63d42923-a2ed-4b92-82ab-e9ca4ad98e55"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 08:33:23 crc kubenswrapper[4881]: I1211 08:33:23.954052 4881 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/63d42923-a2ed-4b92-82ab-e9ca4ad98e55-bundle\") on node \"crc\" DevicePath \"\"" Dec 11 08:33:23 crc kubenswrapper[4881]: I1211 08:33:23.962187 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/63d42923-a2ed-4b92-82ab-e9ca4ad98e55-kube-api-access-8wtxh" (OuterVolumeSpecName: "kube-api-access-8wtxh") pod "63d42923-a2ed-4b92-82ab-e9ca4ad98e55" (UID: "63d42923-a2ed-4b92-82ab-e9ca4ad98e55"). InnerVolumeSpecName "kube-api-access-8wtxh". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:33:23 crc kubenswrapper[4881]: I1211 08:33:23.967474 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/63d42923-a2ed-4b92-82ab-e9ca4ad98e55-util" (OuterVolumeSpecName: "util") pod "63d42923-a2ed-4b92-82ab-e9ca4ad98e55" (UID: "63d42923-a2ed-4b92-82ab-e9ca4ad98e55"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 08:33:24 crc kubenswrapper[4881]: I1211 08:33:24.055622 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8wtxh\" (UniqueName: \"kubernetes.io/projected/63d42923-a2ed-4b92-82ab-e9ca4ad98e55-kube-api-access-8wtxh\") on node \"crc\" DevicePath \"\"" Dec 11 08:33:24 crc kubenswrapper[4881]: I1211 08:33:24.055678 4881 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/63d42923-a2ed-4b92-82ab-e9ca4ad98e55-util\") on node \"crc\" DevicePath \"\"" Dec 11 08:33:24 crc kubenswrapper[4881]: I1211 08:33:24.473766 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/b69ceae0a3beb1917c3cf44da9e05673c5eae51f07b46c3ef0a964a4f8l65q5" event={"ID":"63d42923-a2ed-4b92-82ab-e9ca4ad98e55","Type":"ContainerDied","Data":"663d761e9376b71227a4b165b60043bccfae2a1e017f74d8ea818c81898924d2"} Dec 11 08:33:24 crc kubenswrapper[4881]: I1211 08:33:24.473957 4881 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="663d761e9376b71227a4b165b60043bccfae2a1e017f74d8ea818c81898924d2" Dec 11 08:33:24 crc kubenswrapper[4881]: I1211 08:33:24.473825 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/b69ceae0a3beb1917c3cf44da9e05673c5eae51f07b46c3ef0a964a4f8l65q5" Dec 11 08:33:29 crc kubenswrapper[4881]: I1211 08:33:29.396960 4881 patch_prober.go:28] interesting pod/machine-config-daemon-z9nnh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 11 08:33:29 crc kubenswrapper[4881]: I1211 08:33:29.397518 4881 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 11 08:33:29 crc kubenswrapper[4881]: I1211 08:33:29.397563 4881 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" Dec 11 08:33:29 crc kubenswrapper[4881]: I1211 08:33:29.398309 4881 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"46ea07e17db7258f87fdf87e503b03bd1b18ffe826127eac80cec982af4ad0c0"} pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Dec 11 08:33:29 crc kubenswrapper[4881]: I1211 08:33:29.398412 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" containerName="machine-config-daemon" containerID="cri-o://46ea07e17db7258f87fdf87e503b03bd1b18ffe826127eac80cec982af4ad0c0" gracePeriod=600 Dec 11 08:33:31 crc kubenswrapper[4881]: I1211 08:33:31.735373 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-controller-operator-768c6dc6d6-plgdc"] Dec 11 08:33:31 crc kubenswrapper[4881]: E1211 08:33:31.736114 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="63d42923-a2ed-4b92-82ab-e9ca4ad98e55" containerName="util" Dec 11 08:33:31 crc kubenswrapper[4881]: I1211 08:33:31.736131 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="63d42923-a2ed-4b92-82ab-e9ca4ad98e55" containerName="util" Dec 11 08:33:31 crc kubenswrapper[4881]: E1211 08:33:31.736169 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="63d42923-a2ed-4b92-82ab-e9ca4ad98e55" containerName="extract" Dec 11 08:33:31 crc kubenswrapper[4881]: I1211 08:33:31.736178 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="63d42923-a2ed-4b92-82ab-e9ca4ad98e55" containerName="extract" Dec 11 08:33:31 crc kubenswrapper[4881]: E1211 08:33:31.736196 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="63d42923-a2ed-4b92-82ab-e9ca4ad98e55" containerName="pull" Dec 11 08:33:31 crc kubenswrapper[4881]: I1211 08:33:31.736204 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="63d42923-a2ed-4b92-82ab-e9ca4ad98e55" containerName="pull" Dec 11 08:33:31 crc kubenswrapper[4881]: I1211 08:33:31.736415 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="63d42923-a2ed-4b92-82ab-e9ca4ad98e55" containerName="extract" Dec 11 08:33:31 crc kubenswrapper[4881]: I1211 08:33:31.737729 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-operator-768c6dc6d6-plgdc" Dec 11 08:33:31 crc kubenswrapper[4881]: I1211 08:33:31.739917 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-operator-dockercfg-bnw8f" Dec 11 08:33:31 crc kubenswrapper[4881]: I1211 08:33:31.770497 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-operator-768c6dc6d6-plgdc"] Dec 11 08:33:31 crc kubenswrapper[4881]: I1211 08:33:31.913460 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bptjf\" (UniqueName: \"kubernetes.io/projected/36714c9d-bfd8-4c2e-9d06-971da594217f-kube-api-access-bptjf\") pod \"openstack-operator-controller-operator-768c6dc6d6-plgdc\" (UID: \"36714c9d-bfd8-4c2e-9d06-971da594217f\") " pod="openstack-operators/openstack-operator-controller-operator-768c6dc6d6-plgdc" Dec 11 08:33:32 crc kubenswrapper[4881]: I1211 08:33:32.015111 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bptjf\" (UniqueName: \"kubernetes.io/projected/36714c9d-bfd8-4c2e-9d06-971da594217f-kube-api-access-bptjf\") pod \"openstack-operator-controller-operator-768c6dc6d6-plgdc\" (UID: \"36714c9d-bfd8-4c2e-9d06-971da594217f\") " pod="openstack-operators/openstack-operator-controller-operator-768c6dc6d6-plgdc" Dec 11 08:33:32 crc kubenswrapper[4881]: I1211 08:33:32.035258 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bptjf\" (UniqueName: \"kubernetes.io/projected/36714c9d-bfd8-4c2e-9d06-971da594217f-kube-api-access-bptjf\") pod \"openstack-operator-controller-operator-768c6dc6d6-plgdc\" (UID: \"36714c9d-bfd8-4c2e-9d06-971da594217f\") " pod="openstack-operators/openstack-operator-controller-operator-768c6dc6d6-plgdc" Dec 11 08:33:32 crc kubenswrapper[4881]: I1211 08:33:32.055363 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-operator-768c6dc6d6-plgdc" Dec 11 08:33:32 crc kubenswrapper[4881]: I1211 08:33:32.536366 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-operator-768c6dc6d6-plgdc"] Dec 11 08:33:32 crc kubenswrapper[4881]: I1211 08:33:32.556852 4881 generic.go:334] "Generic (PLEG): container finished" podID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" containerID="46ea07e17db7258f87fdf87e503b03bd1b18ffe826127eac80cec982af4ad0c0" exitCode=0 Dec 11 08:33:32 crc kubenswrapper[4881]: I1211 08:33:32.556899 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" event={"ID":"56d69133-af36-4cbd-af7d-3a58cc4dd8ca","Type":"ContainerDied","Data":"46ea07e17db7258f87fdf87e503b03bd1b18ffe826127eac80cec982af4ad0c0"} Dec 11 08:33:32 crc kubenswrapper[4881]: I1211 08:33:32.556926 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" event={"ID":"56d69133-af36-4cbd-af7d-3a58cc4dd8ca","Type":"ContainerStarted","Data":"bf6d1efeca37e2539778b6f34be1560f88c12ad867f27057f394c09165f250e9"} Dec 11 08:33:32 crc kubenswrapper[4881]: I1211 08:33:32.556943 4881 scope.go:117] "RemoveContainer" containerID="77814b1ae10c46f437c76c46bb8c5b9eb3fd105add11c725ab5d0afd4052b630" Dec 11 08:33:33 crc kubenswrapper[4881]: I1211 08:33:33.564823 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-operator-768c6dc6d6-plgdc" event={"ID":"36714c9d-bfd8-4c2e-9d06-971da594217f","Type":"ContainerStarted","Data":"d02d27bb905a1d48769fa6bb21105fee80b0f8a2f17d0f7703e0dfc36b23ab3d"} Dec 11 08:33:36 crc kubenswrapper[4881]: I1211 08:33:36.589111 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-operator-768c6dc6d6-plgdc" event={"ID":"36714c9d-bfd8-4c2e-9d06-971da594217f","Type":"ContainerStarted","Data":"e5104b70bc7c4dc2bedcbdd05fab9eebc83bd71bc6b6276a90f7317edffe227d"} Dec 11 08:33:40 crc kubenswrapper[4881]: I1211 08:33:40.634270 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-operator-768c6dc6d6-plgdc" event={"ID":"36714c9d-bfd8-4c2e-9d06-971da594217f","Type":"ContainerStarted","Data":"2f3be0929b0505a65ef5ed8a949636d6bb9da06a2304959289b48006e18f3607"} Dec 11 08:33:40 crc kubenswrapper[4881]: I1211 08:33:40.636546 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-operator-768c6dc6d6-plgdc" Dec 11 08:33:40 crc kubenswrapper[4881]: I1211 08:33:40.705564 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-controller-operator-768c6dc6d6-plgdc" podStartSLOduration=2.745780404 podStartE2EDuration="9.705529604s" podCreationTimestamp="2025-12-11 08:33:31 +0000 UTC" firstStartedPulling="2025-12-11 08:33:32.547686495 +0000 UTC m=+1060.925055192" lastFinishedPulling="2025-12-11 08:33:39.507435695 +0000 UTC m=+1067.884804392" observedRunningTime="2025-12-11 08:33:40.68271706 +0000 UTC m=+1069.060085767" watchObservedRunningTime="2025-12-11 08:33:40.705529604 +0000 UTC m=+1069.082898311" Dec 11 08:33:41 crc kubenswrapper[4881]: I1211 08:33:41.758104 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-operator-768c6dc6d6-plgdc" Dec 11 08:33:59 crc kubenswrapper[4881]: I1211 08:33:59.693065 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/cinder-operator-controller-manager-748967c98-lzm95"] Dec 11 08:33:59 crc kubenswrapper[4881]: I1211 08:33:59.695218 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/cinder-operator-controller-manager-748967c98-lzm95" Dec 11 08:33:59 crc kubenswrapper[4881]: I1211 08:33:59.697404 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"cinder-operator-controller-manager-dockercfg-97cwl" Dec 11 08:33:59 crc kubenswrapper[4881]: I1211 08:33:59.704249 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/barbican-operator-controller-manager-5bfbbb859d-d4dsr"] Dec 11 08:33:59 crc kubenswrapper[4881]: I1211 08:33:59.705639 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/barbican-operator-controller-manager-5bfbbb859d-d4dsr" Dec 11 08:33:59 crc kubenswrapper[4881]: I1211 08:33:59.708612 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xvf7h\" (UniqueName: \"kubernetes.io/projected/80435f25-efd8-482d-9a9b-1c6caafd655e-kube-api-access-xvf7h\") pod \"cinder-operator-controller-manager-748967c98-lzm95\" (UID: \"80435f25-efd8-482d-9a9b-1c6caafd655e\") " pod="openstack-operators/cinder-operator-controller-manager-748967c98-lzm95" Dec 11 08:33:59 crc kubenswrapper[4881]: I1211 08:33:59.710982 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"barbican-operator-controller-manager-dockercfg-74xd9" Dec 11 08:33:59 crc kubenswrapper[4881]: I1211 08:33:59.717752 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/cinder-operator-controller-manager-748967c98-lzm95"] Dec 11 08:33:59 crc kubenswrapper[4881]: I1211 08:33:59.726815 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/designate-operator-controller-manager-6788cc6d75-6gtkq"] Dec 11 08:33:59 crc kubenswrapper[4881]: I1211 08:33:59.728241 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/designate-operator-controller-manager-6788cc6d75-6gtkq" Dec 11 08:33:59 crc kubenswrapper[4881]: I1211 08:33:59.746170 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"designate-operator-controller-manager-dockercfg-qpt62" Dec 11 08:33:59 crc kubenswrapper[4881]: I1211 08:33:59.782453 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/glance-operator-controller-manager-85fbd69fcd-twbpm"] Dec 11 08:33:59 crc kubenswrapper[4881]: I1211 08:33:59.783771 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/glance-operator-controller-manager-85fbd69fcd-twbpm" Dec 11 08:33:59 crc kubenswrapper[4881]: I1211 08:33:59.786036 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"glance-operator-controller-manager-dockercfg-crv9c" Dec 11 08:33:59 crc kubenswrapper[4881]: I1211 08:33:59.792923 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/designate-operator-controller-manager-6788cc6d75-6gtkq"] Dec 11 08:33:59 crc kubenswrapper[4881]: I1211 08:33:59.811395 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qs5mk\" (UniqueName: \"kubernetes.io/projected/e07ec193-8583-4299-9370-ce788e2e1ae1-kube-api-access-qs5mk\") pod \"designate-operator-controller-manager-6788cc6d75-6gtkq\" (UID: \"e07ec193-8583-4299-9370-ce788e2e1ae1\") " pod="openstack-operators/designate-operator-controller-manager-6788cc6d75-6gtkq" Dec 11 08:33:59 crc kubenswrapper[4881]: I1211 08:33:59.811482 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-256jp\" (UniqueName: \"kubernetes.io/projected/e5f3ecaa-a91f-4dc8-9baf-2866cf8df0f4-kube-api-access-256jp\") pod \"barbican-operator-controller-manager-5bfbbb859d-d4dsr\" (UID: \"e5f3ecaa-a91f-4dc8-9baf-2866cf8df0f4\") " pod="openstack-operators/barbican-operator-controller-manager-5bfbbb859d-d4dsr" Dec 11 08:33:59 crc kubenswrapper[4881]: I1211 08:33:59.811561 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xvf7h\" (UniqueName: \"kubernetes.io/projected/80435f25-efd8-482d-9a9b-1c6caafd655e-kube-api-access-xvf7h\") pod \"cinder-operator-controller-manager-748967c98-lzm95\" (UID: \"80435f25-efd8-482d-9a9b-1c6caafd655e\") " pod="openstack-operators/cinder-operator-controller-manager-748967c98-lzm95" Dec 11 08:33:59 crc kubenswrapper[4881]: I1211 08:33:59.821641 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/barbican-operator-controller-manager-5bfbbb859d-d4dsr"] Dec 11 08:33:59 crc kubenswrapper[4881]: I1211 08:33:59.829753 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/glance-operator-controller-manager-85fbd69fcd-twbpm"] Dec 11 08:33:59 crc kubenswrapper[4881]: I1211 08:33:59.847075 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xvf7h\" (UniqueName: \"kubernetes.io/projected/80435f25-efd8-482d-9a9b-1c6caafd655e-kube-api-access-xvf7h\") pod \"cinder-operator-controller-manager-748967c98-lzm95\" (UID: \"80435f25-efd8-482d-9a9b-1c6caafd655e\") " pod="openstack-operators/cinder-operator-controller-manager-748967c98-lzm95" Dec 11 08:33:59 crc kubenswrapper[4881]: I1211 08:33:59.872283 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/heat-operator-controller-manager-698d6fd7d6-rs5fd"] Dec 11 08:33:59 crc kubenswrapper[4881]: I1211 08:33:59.914619 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rp6cs\" (UniqueName: \"kubernetes.io/projected/d46284f5-997e-4ce9-a607-254c3ce33f31-kube-api-access-rp6cs\") pod \"glance-operator-controller-manager-85fbd69fcd-twbpm\" (UID: \"d46284f5-997e-4ce9-a607-254c3ce33f31\") " pod="openstack-operators/glance-operator-controller-manager-85fbd69fcd-twbpm" Dec 11 08:33:59 crc kubenswrapper[4881]: I1211 08:33:59.914892 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qs5mk\" (UniqueName: \"kubernetes.io/projected/e07ec193-8583-4299-9370-ce788e2e1ae1-kube-api-access-qs5mk\") pod \"designate-operator-controller-manager-6788cc6d75-6gtkq\" (UID: \"e07ec193-8583-4299-9370-ce788e2e1ae1\") " pod="openstack-operators/designate-operator-controller-manager-6788cc6d75-6gtkq" Dec 11 08:33:59 crc kubenswrapper[4881]: I1211 08:33:59.914957 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-256jp\" (UniqueName: \"kubernetes.io/projected/e5f3ecaa-a91f-4dc8-9baf-2866cf8df0f4-kube-api-access-256jp\") pod \"barbican-operator-controller-manager-5bfbbb859d-d4dsr\" (UID: \"e5f3ecaa-a91f-4dc8-9baf-2866cf8df0f4\") " pod="openstack-operators/barbican-operator-controller-manager-5bfbbb859d-d4dsr" Dec 11 08:33:59 crc kubenswrapper[4881]: I1211 08:33:59.921679 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/horizon-operator-controller-manager-7d5d9fd47f-dqhqx"] Dec 11 08:33:59 crc kubenswrapper[4881]: I1211 08:33:59.922050 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/heat-operator-controller-manager-698d6fd7d6-rs5fd" Dec 11 08:33:59 crc kubenswrapper[4881]: I1211 08:33:59.945272 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/horizon-operator-controller-manager-7d5d9fd47f-dqhqx" Dec 11 08:33:59 crc kubenswrapper[4881]: I1211 08:33:59.946045 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"heat-operator-controller-manager-dockercfg-xnmvk" Dec 11 08:33:59 crc kubenswrapper[4881]: I1211 08:33:59.947455 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"horizon-operator-controller-manager-dockercfg-85q49" Dec 11 08:33:59 crc kubenswrapper[4881]: I1211 08:33:59.951366 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/heat-operator-controller-manager-698d6fd7d6-rs5fd"] Dec 11 08:33:59 crc kubenswrapper[4881]: I1211 08:33:59.951891 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-256jp\" (UniqueName: \"kubernetes.io/projected/e5f3ecaa-a91f-4dc8-9baf-2866cf8df0f4-kube-api-access-256jp\") pod \"barbican-operator-controller-manager-5bfbbb859d-d4dsr\" (UID: \"e5f3ecaa-a91f-4dc8-9baf-2866cf8df0f4\") " pod="openstack-operators/barbican-operator-controller-manager-5bfbbb859d-d4dsr" Dec 11 08:33:59 crc kubenswrapper[4881]: I1211 08:33:59.965948 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qs5mk\" (UniqueName: \"kubernetes.io/projected/e07ec193-8583-4299-9370-ce788e2e1ae1-kube-api-access-qs5mk\") pod \"designate-operator-controller-manager-6788cc6d75-6gtkq\" (UID: \"e07ec193-8583-4299-9370-ce788e2e1ae1\") " pod="openstack-operators/designate-operator-controller-manager-6788cc6d75-6gtkq" Dec 11 08:33:59 crc kubenswrapper[4881]: I1211 08:33:59.984245 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/horizon-operator-controller-manager-7d5d9fd47f-dqhqx"] Dec 11 08:34:00 crc kubenswrapper[4881]: I1211 08:34:00.016583 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/infra-operator-controller-manager-6c55d8d69b-44khn"] Dec 11 08:34:00 crc kubenswrapper[4881]: I1211 08:34:00.016947 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rp6cs\" (UniqueName: \"kubernetes.io/projected/d46284f5-997e-4ce9-a607-254c3ce33f31-kube-api-access-rp6cs\") pod \"glance-operator-controller-manager-85fbd69fcd-twbpm\" (UID: \"d46284f5-997e-4ce9-a607-254c3ce33f31\") " pod="openstack-operators/glance-operator-controller-manager-85fbd69fcd-twbpm" Dec 11 08:34:00 crc kubenswrapper[4881]: I1211 08:34:00.018203 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-controller-manager-6c55d8d69b-44khn" Dec 11 08:34:00 crc kubenswrapper[4881]: I1211 08:34:00.019872 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-webhook-server-cert" Dec 11 08:34:00 crc kubenswrapper[4881]: I1211 08:34:00.021938 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-controller-manager-dockercfg-qlxrd" Dec 11 08:34:00 crc kubenswrapper[4881]: I1211 08:34:00.022249 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/cinder-operator-controller-manager-748967c98-lzm95" Dec 11 08:34:00 crc kubenswrapper[4881]: I1211 08:34:00.035694 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/barbican-operator-controller-manager-5bfbbb859d-d4dsr" Dec 11 08:34:00 crc kubenswrapper[4881]: I1211 08:34:00.049978 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/designate-operator-controller-manager-6788cc6d75-6gtkq" Dec 11 08:34:00 crc kubenswrapper[4881]: I1211 08:34:00.051192 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/ironic-operator-controller-manager-54485f899-mvlsx"] Dec 11 08:34:00 crc kubenswrapper[4881]: I1211 08:34:00.053857 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ironic-operator-controller-manager-54485f899-mvlsx" Dec 11 08:34:00 crc kubenswrapper[4881]: I1211 08:34:00.056832 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"ironic-operator-controller-manager-dockercfg-g62dj" Dec 11 08:34:00 crc kubenswrapper[4881]: I1211 08:34:00.057283 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rp6cs\" (UniqueName: \"kubernetes.io/projected/d46284f5-997e-4ce9-a607-254c3ce33f31-kube-api-access-rp6cs\") pod \"glance-operator-controller-manager-85fbd69fcd-twbpm\" (UID: \"d46284f5-997e-4ce9-a607-254c3ce33f31\") " pod="openstack-operators/glance-operator-controller-manager-85fbd69fcd-twbpm" Dec 11 08:34:00 crc kubenswrapper[4881]: I1211 08:34:00.066463 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-controller-manager-6c55d8d69b-44khn"] Dec 11 08:34:00 crc kubenswrapper[4881]: I1211 08:34:00.073719 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/keystone-operator-controller-manager-79cc9d59f5-qh8s8"] Dec 11 08:34:00 crc kubenswrapper[4881]: I1211 08:34:00.075126 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-controller-manager-79cc9d59f5-qh8s8" Dec 11 08:34:00 crc kubenswrapper[4881]: I1211 08:34:00.079378 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ironic-operator-controller-manager-54485f899-mvlsx"] Dec 11 08:34:00 crc kubenswrapper[4881]: I1211 08:34:00.091585 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"keystone-operator-controller-manager-dockercfg-q9wdm" Dec 11 08:34:00 crc kubenswrapper[4881]: I1211 08:34:00.092176 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-controller-manager-79cc9d59f5-qh8s8"] Dec 11 08:34:00 crc kubenswrapper[4881]: I1211 08:34:00.096152 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/manila-operator-controller-manager-5cbc8c7f96-ww55g"] Dec 11 08:34:00 crc kubenswrapper[4881]: I1211 08:34:00.097923 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/manila-operator-controller-manager-5cbc8c7f96-ww55g" Dec 11 08:34:00 crc kubenswrapper[4881]: I1211 08:34:00.100168 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"manila-operator-controller-manager-dockercfg-2jg7w" Dec 11 08:34:00 crc kubenswrapper[4881]: I1211 08:34:00.106844 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/glance-operator-controller-manager-85fbd69fcd-twbpm" Dec 11 08:34:00 crc kubenswrapper[4881]: I1211 08:34:00.118428 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sdkx2\" (UniqueName: \"kubernetes.io/projected/d4850972-2a52-4030-9822-af3de9cc647a-kube-api-access-sdkx2\") pod \"heat-operator-controller-manager-698d6fd7d6-rs5fd\" (UID: \"d4850972-2a52-4030-9822-af3de9cc647a\") " pod="openstack-operators/heat-operator-controller-manager-698d6fd7d6-rs5fd" Dec 11 08:34:00 crc kubenswrapper[4881]: I1211 08:34:00.118531 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8rfpt\" (UniqueName: \"kubernetes.io/projected/2dab1d4f-2c9a-4b32-a666-4b0802e51576-kube-api-access-8rfpt\") pod \"horizon-operator-controller-manager-7d5d9fd47f-dqhqx\" (UID: \"2dab1d4f-2c9a-4b32-a666-4b0802e51576\") " pod="openstack-operators/horizon-operator-controller-manager-7d5d9fd47f-dqhqx" Dec 11 08:34:00 crc kubenswrapper[4881]: I1211 08:34:00.131228 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/manila-operator-controller-manager-5cbc8c7f96-ww55g"] Dec 11 08:34:00 crc kubenswrapper[4881]: I1211 08:34:00.141317 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-64d7c556cd-qhz8n"] Dec 11 08:34:00 crc kubenswrapper[4881]: I1211 08:34:00.152709 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-controller-manager-64d7c556cd-qhz8n" Dec 11 08:34:00 crc kubenswrapper[4881]: I1211 08:34:00.160708 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"mariadb-operator-controller-manager-dockercfg-9nxq6" Dec 11 08:34:00 crc kubenswrapper[4881]: I1211 08:34:00.200378 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/neutron-operator-controller-manager-58879495c-kmpzx"] Dec 11 08:34:00 crc kubenswrapper[4881]: I1211 08:34:00.209567 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/neutron-operator-controller-manager-58879495c-kmpzx" Dec 11 08:34:00 crc kubenswrapper[4881]: I1211 08:34:00.212434 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-64d7c556cd-qhz8n"] Dec 11 08:34:00 crc kubenswrapper[4881]: I1211 08:34:00.213588 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"neutron-operator-controller-manager-dockercfg-zwbcb" Dec 11 08:34:00 crc kubenswrapper[4881]: I1211 08:34:00.220574 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/867bc48e-c043-4428-b201-0ce4dd830f3f-cert\") pod \"infra-operator-controller-manager-6c55d8d69b-44khn\" (UID: \"867bc48e-c043-4428-b201-0ce4dd830f3f\") " pod="openstack-operators/infra-operator-controller-manager-6c55d8d69b-44khn" Dec 11 08:34:00 crc kubenswrapper[4881]: I1211 08:34:00.220636 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sdkx2\" (UniqueName: \"kubernetes.io/projected/d4850972-2a52-4030-9822-af3de9cc647a-kube-api-access-sdkx2\") pod \"heat-operator-controller-manager-698d6fd7d6-rs5fd\" (UID: \"d4850972-2a52-4030-9822-af3de9cc647a\") " pod="openstack-operators/heat-operator-controller-manager-698d6fd7d6-rs5fd" Dec 11 08:34:00 crc kubenswrapper[4881]: I1211 08:34:00.220675 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cqhhm\" (UniqueName: \"kubernetes.io/projected/867bc48e-c043-4428-b201-0ce4dd830f3f-kube-api-access-cqhhm\") pod \"infra-operator-controller-manager-6c55d8d69b-44khn\" (UID: \"867bc48e-c043-4428-b201-0ce4dd830f3f\") " pod="openstack-operators/infra-operator-controller-manager-6c55d8d69b-44khn" Dec 11 08:34:00 crc kubenswrapper[4881]: I1211 08:34:00.220709 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8rfpt\" (UniqueName: \"kubernetes.io/projected/2dab1d4f-2c9a-4b32-a666-4b0802e51576-kube-api-access-8rfpt\") pod \"horizon-operator-controller-manager-7d5d9fd47f-dqhqx\" (UID: \"2dab1d4f-2c9a-4b32-a666-4b0802e51576\") " pod="openstack-operators/horizon-operator-controller-manager-7d5d9fd47f-dqhqx" Dec 11 08:34:00 crc kubenswrapper[4881]: I1211 08:34:00.220760 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jprsm\" (UniqueName: \"kubernetes.io/projected/2fd323b1-8fa8-456c-bcd8-d89872682762-kube-api-access-jprsm\") pod \"ironic-operator-controller-manager-54485f899-mvlsx\" (UID: \"2fd323b1-8fa8-456c-bcd8-d89872682762\") " pod="openstack-operators/ironic-operator-controller-manager-54485f899-mvlsx" Dec 11 08:34:00 crc kubenswrapper[4881]: I1211 08:34:00.220777 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6h9ll\" (UniqueName: \"kubernetes.io/projected/da8fe0e3-3416-453d-80b7-47d4ab23c610-kube-api-access-6h9ll\") pod \"manila-operator-controller-manager-5cbc8c7f96-ww55g\" (UID: \"da8fe0e3-3416-453d-80b7-47d4ab23c610\") " pod="openstack-operators/manila-operator-controller-manager-5cbc8c7f96-ww55g" Dec 11 08:34:00 crc kubenswrapper[4881]: I1211 08:34:00.220807 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zvkxx\" (UniqueName: \"kubernetes.io/projected/9b3ee431-6c33-4b49-8fdb-27056597fbe8-kube-api-access-zvkxx\") pod \"keystone-operator-controller-manager-79cc9d59f5-qh8s8\" (UID: \"9b3ee431-6c33-4b49-8fdb-27056597fbe8\") " pod="openstack-operators/keystone-operator-controller-manager-79cc9d59f5-qh8s8" Dec 11 08:34:00 crc kubenswrapper[4881]: I1211 08:34:00.225712 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/neutron-operator-controller-manager-58879495c-kmpzx"] Dec 11 08:34:00 crc kubenswrapper[4881]: I1211 08:34:00.235925 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/nova-operator-controller-manager-79d658b66d-2dxhd"] Dec 11 08:34:00 crc kubenswrapper[4881]: I1211 08:34:00.240886 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/nova-operator-controller-manager-79d658b66d-2dxhd" Dec 11 08:34:00 crc kubenswrapper[4881]: I1211 08:34:00.244254 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"nova-operator-controller-manager-dockercfg-mgng2" Dec 11 08:34:00 crc kubenswrapper[4881]: I1211 08:34:00.245204 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8rfpt\" (UniqueName: \"kubernetes.io/projected/2dab1d4f-2c9a-4b32-a666-4b0802e51576-kube-api-access-8rfpt\") pod \"horizon-operator-controller-manager-7d5d9fd47f-dqhqx\" (UID: \"2dab1d4f-2c9a-4b32-a666-4b0802e51576\") " pod="openstack-operators/horizon-operator-controller-manager-7d5d9fd47f-dqhqx" Dec 11 08:34:00 crc kubenswrapper[4881]: I1211 08:34:00.262148 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sdkx2\" (UniqueName: \"kubernetes.io/projected/d4850972-2a52-4030-9822-af3de9cc647a-kube-api-access-sdkx2\") pod \"heat-operator-controller-manager-698d6fd7d6-rs5fd\" (UID: \"d4850972-2a52-4030-9822-af3de9cc647a\") " pod="openstack-operators/heat-operator-controller-manager-698d6fd7d6-rs5fd" Dec 11 08:34:00 crc kubenswrapper[4881]: I1211 08:34:00.266923 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/octavia-operator-controller-manager-d5fb87cb8-q22gz"] Dec 11 08:34:00 crc kubenswrapper[4881]: I1211 08:34:00.268232 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/octavia-operator-controller-manager-d5fb87cb8-q22gz" Dec 11 08:34:00 crc kubenswrapper[4881]: I1211 08:34:00.283519 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/nova-operator-controller-manager-79d658b66d-2dxhd"] Dec 11 08:34:00 crc kubenswrapper[4881]: I1211 08:34:00.283847 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"octavia-operator-controller-manager-dockercfg-tl4gw" Dec 11 08:34:00 crc kubenswrapper[4881]: I1211 08:34:00.324317 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vdz5s\" (UniqueName: \"kubernetes.io/projected/21cbbd1f-7cfe-481a-b02a-f72c9d052519-kube-api-access-vdz5s\") pod \"mariadb-operator-controller-manager-64d7c556cd-qhz8n\" (UID: \"21cbbd1f-7cfe-481a-b02a-f72c9d052519\") " pod="openstack-operators/mariadb-operator-controller-manager-64d7c556cd-qhz8n" Dec 11 08:34:00 crc kubenswrapper[4881]: I1211 08:34:00.324410 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/867bc48e-c043-4428-b201-0ce4dd830f3f-cert\") pod \"infra-operator-controller-manager-6c55d8d69b-44khn\" (UID: \"867bc48e-c043-4428-b201-0ce4dd830f3f\") " pod="openstack-operators/infra-operator-controller-manager-6c55d8d69b-44khn" Dec 11 08:34:00 crc kubenswrapper[4881]: I1211 08:34:00.324449 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqhhm\" (UniqueName: \"kubernetes.io/projected/867bc48e-c043-4428-b201-0ce4dd830f3f-kube-api-access-cqhhm\") pod \"infra-operator-controller-manager-6c55d8d69b-44khn\" (UID: \"867bc48e-c043-4428-b201-0ce4dd830f3f\") " pod="openstack-operators/infra-operator-controller-manager-6c55d8d69b-44khn" Dec 11 08:34:00 crc kubenswrapper[4881]: I1211 08:34:00.324498 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jprsm\" (UniqueName: \"kubernetes.io/projected/2fd323b1-8fa8-456c-bcd8-d89872682762-kube-api-access-jprsm\") pod \"ironic-operator-controller-manager-54485f899-mvlsx\" (UID: \"2fd323b1-8fa8-456c-bcd8-d89872682762\") " pod="openstack-operators/ironic-operator-controller-manager-54485f899-mvlsx" Dec 11 08:34:00 crc kubenswrapper[4881]: I1211 08:34:00.324517 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6h9ll\" (UniqueName: \"kubernetes.io/projected/da8fe0e3-3416-453d-80b7-47d4ab23c610-kube-api-access-6h9ll\") pod \"manila-operator-controller-manager-5cbc8c7f96-ww55g\" (UID: \"da8fe0e3-3416-453d-80b7-47d4ab23c610\") " pod="openstack-operators/manila-operator-controller-manager-5cbc8c7f96-ww55g" Dec 11 08:34:00 crc kubenswrapper[4881]: I1211 08:34:00.324549 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zvkxx\" (UniqueName: \"kubernetes.io/projected/9b3ee431-6c33-4b49-8fdb-27056597fbe8-kube-api-access-zvkxx\") pod \"keystone-operator-controller-manager-79cc9d59f5-qh8s8\" (UID: \"9b3ee431-6c33-4b49-8fdb-27056597fbe8\") " pod="openstack-operators/keystone-operator-controller-manager-79cc9d59f5-qh8s8" Dec 11 08:34:00 crc kubenswrapper[4881]: I1211 08:34:00.324605 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-phfkx\" (UniqueName: \"kubernetes.io/projected/5079b14d-bd2f-4151-898d-91362a4b24c2-kube-api-access-phfkx\") pod \"neutron-operator-controller-manager-58879495c-kmpzx\" (UID: \"5079b14d-bd2f-4151-898d-91362a4b24c2\") " pod="openstack-operators/neutron-operator-controller-manager-58879495c-kmpzx" Dec 11 08:34:00 crc kubenswrapper[4881]: I1211 08:34:00.325882 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/octavia-operator-controller-manager-d5fb87cb8-q22gz"] Dec 11 08:34:00 crc kubenswrapper[4881]: I1211 08:34:00.329497 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/heat-operator-controller-manager-698d6fd7d6-rs5fd" Dec 11 08:34:00 crc kubenswrapper[4881]: I1211 08:34:00.336161 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-77868f484-kgwhp"] Dec 11 08:34:00 crc kubenswrapper[4881]: I1211 08:34:00.337560 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-baremetal-operator-controller-manager-77868f484-kgwhp" Dec 11 08:34:00 crc kubenswrapper[4881]: I1211 08:34:00.341721 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-controller-manager-dockercfg-lgl8h" Dec 11 08:34:00 crc kubenswrapper[4881]: I1211 08:34:00.343366 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/ovn-operator-controller-manager-5b67cfc8fb-nnlz4"] Dec 11 08:34:00 crc kubenswrapper[4881]: I1211 08:34:00.345222 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/867bc48e-c043-4428-b201-0ce4dd830f3f-cert\") pod \"infra-operator-controller-manager-6c55d8d69b-44khn\" (UID: \"867bc48e-c043-4428-b201-0ce4dd830f3f\") " pod="openstack-operators/infra-operator-controller-manager-6c55d8d69b-44khn" Dec 11 08:34:00 crc kubenswrapper[4881]: I1211 08:34:00.353824 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ovn-operator-controller-manager-5b67cfc8fb-nnlz4" Dec 11 08:34:00 crc kubenswrapper[4881]: I1211 08:34:00.356601 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"ovn-operator-controller-manager-dockercfg-s7qdq" Dec 11 08:34:00 crc kubenswrapper[4881]: I1211 08:34:00.358430 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-webhook-server-cert" Dec 11 08:34:00 crc kubenswrapper[4881]: I1211 08:34:00.365764 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cqhhm\" (UniqueName: \"kubernetes.io/projected/867bc48e-c043-4428-b201-0ce4dd830f3f-kube-api-access-cqhhm\") pod \"infra-operator-controller-manager-6c55d8d69b-44khn\" (UID: \"867bc48e-c043-4428-b201-0ce4dd830f3f\") " pod="openstack-operators/infra-operator-controller-manager-6c55d8d69b-44khn" Dec 11 08:34:00 crc kubenswrapper[4881]: I1211 08:34:00.369771 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zvkxx\" (UniqueName: \"kubernetes.io/projected/9b3ee431-6c33-4b49-8fdb-27056597fbe8-kube-api-access-zvkxx\") pod \"keystone-operator-controller-manager-79cc9d59f5-qh8s8\" (UID: \"9b3ee431-6c33-4b49-8fdb-27056597fbe8\") " pod="openstack-operators/keystone-operator-controller-manager-79cc9d59f5-qh8s8" Dec 11 08:34:00 crc kubenswrapper[4881]: I1211 08:34:00.370766 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/placement-operator-controller-manager-867d87977b-ftqqr"] Dec 11 08:34:00 crc kubenswrapper[4881]: I1211 08:34:00.372522 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/placement-operator-controller-manager-867d87977b-ftqqr" Dec 11 08:34:00 crc kubenswrapper[4881]: I1211 08:34:00.381718 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6h9ll\" (UniqueName: \"kubernetes.io/projected/da8fe0e3-3416-453d-80b7-47d4ab23c610-kube-api-access-6h9ll\") pod \"manila-operator-controller-manager-5cbc8c7f96-ww55g\" (UID: \"da8fe0e3-3416-453d-80b7-47d4ab23c610\") " pod="openstack-operators/manila-operator-controller-manager-5cbc8c7f96-ww55g" Dec 11 08:34:00 crc kubenswrapper[4881]: I1211 08:34:00.381880 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"placement-operator-controller-manager-dockercfg-npkgw" Dec 11 08:34:00 crc kubenswrapper[4881]: I1211 08:34:00.382192 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jprsm\" (UniqueName: \"kubernetes.io/projected/2fd323b1-8fa8-456c-bcd8-d89872682762-kube-api-access-jprsm\") pod \"ironic-operator-controller-manager-54485f899-mvlsx\" (UID: \"2fd323b1-8fa8-456c-bcd8-d89872682762\") " pod="openstack-operators/ironic-operator-controller-manager-54485f899-mvlsx" Dec 11 08:34:00 crc kubenswrapper[4881]: I1211 08:34:00.386802 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/swift-operator-controller-manager-8f6687c44-zkl8b"] Dec 11 08:34:00 crc kubenswrapper[4881]: I1211 08:34:00.391671 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/swift-operator-controller-manager-8f6687c44-zkl8b" Dec 11 08:34:00 crc kubenswrapper[4881]: I1211 08:34:00.395560 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"swift-operator-controller-manager-dockercfg-g8qtq" Dec 11 08:34:00 crc kubenswrapper[4881]: I1211 08:34:00.398606 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ovn-operator-controller-manager-5b67cfc8fb-nnlz4"] Dec 11 08:34:00 crc kubenswrapper[4881]: I1211 08:34:00.418068 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/swift-operator-controller-manager-8f6687c44-zkl8b"] Dec 11 08:34:00 crc kubenswrapper[4881]: I1211 08:34:00.422810 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/horizon-operator-controller-manager-7d5d9fd47f-dqhqx" Dec 11 08:34:00 crc kubenswrapper[4881]: I1211 08:34:00.435352 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-77868f484-kgwhp"] Dec 11 08:34:00 crc kubenswrapper[4881]: I1211 08:34:00.439346 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8rznp\" (UniqueName: \"kubernetes.io/projected/ac8a74d8-c81e-4154-b2dc-7ebb23d13aa7-kube-api-access-8rznp\") pod \"octavia-operator-controller-manager-d5fb87cb8-q22gz\" (UID: \"ac8a74d8-c81e-4154-b2dc-7ebb23d13aa7\") " pod="openstack-operators/octavia-operator-controller-manager-d5fb87cb8-q22gz" Dec 11 08:34:00 crc kubenswrapper[4881]: I1211 08:34:00.439413 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-89gkl\" (UniqueName: \"kubernetes.io/projected/05ef8d73-6d8a-4d91-83a3-93ec0fc14ae1-kube-api-access-89gkl\") pod \"nova-operator-controller-manager-79d658b66d-2dxhd\" (UID: \"05ef8d73-6d8a-4d91-83a3-93ec0fc14ae1\") " pod="openstack-operators/nova-operator-controller-manager-79d658b66d-2dxhd" Dec 11 08:34:00 crc kubenswrapper[4881]: I1211 08:34:00.439832 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-controller-manager-6c55d8d69b-44khn" Dec 11 08:34:00 crc kubenswrapper[4881]: I1211 08:34:00.440395 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hn4qd\" (UniqueName: \"kubernetes.io/projected/2621fa0b-89fb-4d65-aef3-98de0e9a8106-kube-api-access-hn4qd\") pod \"openstack-baremetal-operator-controller-manager-77868f484-kgwhp\" (UID: \"2621fa0b-89fb-4d65-aef3-98de0e9a8106\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-77868f484-kgwhp" Dec 11 08:34:00 crc kubenswrapper[4881]: I1211 08:34:00.440430 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/2621fa0b-89fb-4d65-aef3-98de0e9a8106-cert\") pod \"openstack-baremetal-operator-controller-manager-77868f484-kgwhp\" (UID: \"2621fa0b-89fb-4d65-aef3-98de0e9a8106\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-77868f484-kgwhp" Dec 11 08:34:00 crc kubenswrapper[4881]: I1211 08:34:00.440579 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-phfkx\" (UniqueName: \"kubernetes.io/projected/5079b14d-bd2f-4151-898d-91362a4b24c2-kube-api-access-phfkx\") pod \"neutron-operator-controller-manager-58879495c-kmpzx\" (UID: \"5079b14d-bd2f-4151-898d-91362a4b24c2\") " pod="openstack-operators/neutron-operator-controller-manager-58879495c-kmpzx" Dec 11 08:34:00 crc kubenswrapper[4881]: I1211 08:34:00.440630 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vdz5s\" (UniqueName: \"kubernetes.io/projected/21cbbd1f-7cfe-481a-b02a-f72c9d052519-kube-api-access-vdz5s\") pod \"mariadb-operator-controller-manager-64d7c556cd-qhz8n\" (UID: \"21cbbd1f-7cfe-481a-b02a-f72c9d052519\") " pod="openstack-operators/mariadb-operator-controller-manager-64d7c556cd-qhz8n" Dec 11 08:34:00 crc kubenswrapper[4881]: I1211 08:34:00.461283 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ironic-operator-controller-manager-54485f899-mvlsx" Dec 11 08:34:00 crc kubenswrapper[4881]: I1211 08:34:00.480087 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-phfkx\" (UniqueName: \"kubernetes.io/projected/5079b14d-bd2f-4151-898d-91362a4b24c2-kube-api-access-phfkx\") pod \"neutron-operator-controller-manager-58879495c-kmpzx\" (UID: \"5079b14d-bd2f-4151-898d-91362a4b24c2\") " pod="openstack-operators/neutron-operator-controller-manager-58879495c-kmpzx" Dec 11 08:34:00 crc kubenswrapper[4881]: I1211 08:34:00.480232 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-fb56f4744-vgmrx"] Dec 11 08:34:00 crc kubenswrapper[4881]: I1211 08:34:00.482151 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-controller-manager-79cc9d59f5-qh8s8" Dec 11 08:34:00 crc kubenswrapper[4881]: I1211 08:34:00.497557 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/manila-operator-controller-manager-5cbc8c7f96-ww55g" Dec 11 08:34:00 crc kubenswrapper[4881]: I1211 08:34:00.539912 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/telemetry-operator-controller-manager-fb56f4744-vgmrx" Dec 11 08:34:00 crc kubenswrapper[4881]: I1211 08:34:00.542436 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8rznp\" (UniqueName: \"kubernetes.io/projected/ac8a74d8-c81e-4154-b2dc-7ebb23d13aa7-kube-api-access-8rznp\") pod \"octavia-operator-controller-manager-d5fb87cb8-q22gz\" (UID: \"ac8a74d8-c81e-4154-b2dc-7ebb23d13aa7\") " pod="openstack-operators/octavia-operator-controller-manager-d5fb87cb8-q22gz" Dec 11 08:34:00 crc kubenswrapper[4881]: I1211 08:34:00.542485 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-89gkl\" (UniqueName: \"kubernetes.io/projected/05ef8d73-6d8a-4d91-83a3-93ec0fc14ae1-kube-api-access-89gkl\") pod \"nova-operator-controller-manager-79d658b66d-2dxhd\" (UID: \"05ef8d73-6d8a-4d91-83a3-93ec0fc14ae1\") " pod="openstack-operators/nova-operator-controller-manager-79d658b66d-2dxhd" Dec 11 08:34:00 crc kubenswrapper[4881]: I1211 08:34:00.542531 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jf4mm\" (UniqueName: \"kubernetes.io/projected/91ca18da-4852-496b-bf77-558e8010aabe-kube-api-access-jf4mm\") pod \"ovn-operator-controller-manager-5b67cfc8fb-nnlz4\" (UID: \"91ca18da-4852-496b-bf77-558e8010aabe\") " pod="openstack-operators/ovn-operator-controller-manager-5b67cfc8fb-nnlz4" Dec 11 08:34:00 crc kubenswrapper[4881]: I1211 08:34:00.542559 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hn4qd\" (UniqueName: \"kubernetes.io/projected/2621fa0b-89fb-4d65-aef3-98de0e9a8106-kube-api-access-hn4qd\") pod \"openstack-baremetal-operator-controller-manager-77868f484-kgwhp\" (UID: \"2621fa0b-89fb-4d65-aef3-98de0e9a8106\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-77868f484-kgwhp" Dec 11 08:34:00 crc kubenswrapper[4881]: I1211 08:34:00.542581 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lb8f4\" (UniqueName: \"kubernetes.io/projected/246f8ac7-b65e-40b1-aba1-ba1defde43ef-kube-api-access-lb8f4\") pod \"swift-operator-controller-manager-8f6687c44-zkl8b\" (UID: \"246f8ac7-b65e-40b1-aba1-ba1defde43ef\") " pod="openstack-operators/swift-operator-controller-manager-8f6687c44-zkl8b" Dec 11 08:34:00 crc kubenswrapper[4881]: I1211 08:34:00.542610 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/2621fa0b-89fb-4d65-aef3-98de0e9a8106-cert\") pod \"openstack-baremetal-operator-controller-manager-77868f484-kgwhp\" (UID: \"2621fa0b-89fb-4d65-aef3-98de0e9a8106\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-77868f484-kgwhp" Dec 11 08:34:00 crc kubenswrapper[4881]: I1211 08:34:00.542660 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cfgnb\" (UniqueName: \"kubernetes.io/projected/02aa2201-6757-40f8-b24d-fbad39b79069-kube-api-access-cfgnb\") pod \"placement-operator-controller-manager-867d87977b-ftqqr\" (UID: \"02aa2201-6757-40f8-b24d-fbad39b79069\") " pod="openstack-operators/placement-operator-controller-manager-867d87977b-ftqqr" Dec 11 08:34:00 crc kubenswrapper[4881]: E1211 08:34:00.551149 4881 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Dec 11 08:34:00 crc kubenswrapper[4881]: E1211 08:34:00.551263 4881 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/2621fa0b-89fb-4d65-aef3-98de0e9a8106-cert podName:2621fa0b-89fb-4d65-aef3-98de0e9a8106 nodeName:}" failed. No retries permitted until 2025-12-11 08:34:01.051234038 +0000 UTC m=+1089.428602735 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/2621fa0b-89fb-4d65-aef3-98de0e9a8106-cert") pod "openstack-baremetal-operator-controller-manager-77868f484-kgwhp" (UID: "2621fa0b-89fb-4d65-aef3-98de0e9a8106") : secret "openstack-baremetal-operator-webhook-server-cert" not found Dec 11 08:34:00 crc kubenswrapper[4881]: I1211 08:34:00.551926 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/neutron-operator-controller-manager-58879495c-kmpzx" Dec 11 08:34:00 crc kubenswrapper[4881]: I1211 08:34:00.552666 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vdz5s\" (UniqueName: \"kubernetes.io/projected/21cbbd1f-7cfe-481a-b02a-f72c9d052519-kube-api-access-vdz5s\") pod \"mariadb-operator-controller-manager-64d7c556cd-qhz8n\" (UID: \"21cbbd1f-7cfe-481a-b02a-f72c9d052519\") " pod="openstack-operators/mariadb-operator-controller-manager-64d7c556cd-qhz8n" Dec 11 08:34:00 crc kubenswrapper[4881]: I1211 08:34:00.556674 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/placement-operator-controller-manager-867d87977b-ftqqr"] Dec 11 08:34:00 crc kubenswrapper[4881]: I1211 08:34:00.575912 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"telemetry-operator-controller-manager-dockercfg-2bdwz" Dec 11 08:34:00 crc kubenswrapper[4881]: I1211 08:34:00.592742 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hn4qd\" (UniqueName: \"kubernetes.io/projected/2621fa0b-89fb-4d65-aef3-98de0e9a8106-kube-api-access-hn4qd\") pod \"openstack-baremetal-operator-controller-manager-77868f484-kgwhp\" (UID: \"2621fa0b-89fb-4d65-aef3-98de0e9a8106\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-77868f484-kgwhp" Dec 11 08:34:00 crc kubenswrapper[4881]: I1211 08:34:00.594422 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8rznp\" (UniqueName: \"kubernetes.io/projected/ac8a74d8-c81e-4154-b2dc-7ebb23d13aa7-kube-api-access-8rznp\") pod \"octavia-operator-controller-manager-d5fb87cb8-q22gz\" (UID: \"ac8a74d8-c81e-4154-b2dc-7ebb23d13aa7\") " pod="openstack-operators/octavia-operator-controller-manager-d5fb87cb8-q22gz" Dec 11 08:34:00 crc kubenswrapper[4881]: I1211 08:34:00.620863 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-89gkl\" (UniqueName: \"kubernetes.io/projected/05ef8d73-6d8a-4d91-83a3-93ec0fc14ae1-kube-api-access-89gkl\") pod \"nova-operator-controller-manager-79d658b66d-2dxhd\" (UID: \"05ef8d73-6d8a-4d91-83a3-93ec0fc14ae1\") " pod="openstack-operators/nova-operator-controller-manager-79d658b66d-2dxhd" Dec 11 08:34:00 crc kubenswrapper[4881]: I1211 08:34:00.628441 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-fb56f4744-vgmrx"] Dec 11 08:34:00 crc kubenswrapper[4881]: I1211 08:34:00.630458 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/octavia-operator-controller-manager-d5fb87cb8-q22gz" Dec 11 08:34:00 crc kubenswrapper[4881]: I1211 08:34:00.648428 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lb8f4\" (UniqueName: \"kubernetes.io/projected/246f8ac7-b65e-40b1-aba1-ba1defde43ef-kube-api-access-lb8f4\") pod \"swift-operator-controller-manager-8f6687c44-zkl8b\" (UID: \"246f8ac7-b65e-40b1-aba1-ba1defde43ef\") " pod="openstack-operators/swift-operator-controller-manager-8f6687c44-zkl8b" Dec 11 08:34:00 crc kubenswrapper[4881]: I1211 08:34:00.648631 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cfgnb\" (UniqueName: \"kubernetes.io/projected/02aa2201-6757-40f8-b24d-fbad39b79069-kube-api-access-cfgnb\") pod \"placement-operator-controller-manager-867d87977b-ftqqr\" (UID: \"02aa2201-6757-40f8-b24d-fbad39b79069\") " pod="openstack-operators/placement-operator-controller-manager-867d87977b-ftqqr" Dec 11 08:34:00 crc kubenswrapper[4881]: I1211 08:34:00.648800 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9bgf9\" (UniqueName: \"kubernetes.io/projected/de3e8077-0bfa-4e55-aba0-0e5dca0e598d-kube-api-access-9bgf9\") pod \"telemetry-operator-controller-manager-fb56f4744-vgmrx\" (UID: \"de3e8077-0bfa-4e55-aba0-0e5dca0e598d\") " pod="openstack-operators/telemetry-operator-controller-manager-fb56f4744-vgmrx" Dec 11 08:34:00 crc kubenswrapper[4881]: I1211 08:34:00.648831 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jf4mm\" (UniqueName: \"kubernetes.io/projected/91ca18da-4852-496b-bf77-558e8010aabe-kube-api-access-jf4mm\") pod \"ovn-operator-controller-manager-5b67cfc8fb-nnlz4\" (UID: \"91ca18da-4852-496b-bf77-558e8010aabe\") " pod="openstack-operators/ovn-operator-controller-manager-5b67cfc8fb-nnlz4" Dec 11 08:34:00 crc kubenswrapper[4881]: I1211 08:34:00.675406 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lb8f4\" (UniqueName: \"kubernetes.io/projected/246f8ac7-b65e-40b1-aba1-ba1defde43ef-kube-api-access-lb8f4\") pod \"swift-operator-controller-manager-8f6687c44-zkl8b\" (UID: \"246f8ac7-b65e-40b1-aba1-ba1defde43ef\") " pod="openstack-operators/swift-operator-controller-manager-8f6687c44-zkl8b" Dec 11 08:34:00 crc kubenswrapper[4881]: I1211 08:34:00.676846 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jf4mm\" (UniqueName: \"kubernetes.io/projected/91ca18da-4852-496b-bf77-558e8010aabe-kube-api-access-jf4mm\") pod \"ovn-operator-controller-manager-5b67cfc8fb-nnlz4\" (UID: \"91ca18da-4852-496b-bf77-558e8010aabe\") " pod="openstack-operators/ovn-operator-controller-manager-5b67cfc8fb-nnlz4" Dec 11 08:34:00 crc kubenswrapper[4881]: I1211 08:34:00.680233 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cfgnb\" (UniqueName: \"kubernetes.io/projected/02aa2201-6757-40f8-b24d-fbad39b79069-kube-api-access-cfgnb\") pod \"placement-operator-controller-manager-867d87977b-ftqqr\" (UID: \"02aa2201-6757-40f8-b24d-fbad39b79069\") " pod="openstack-operators/placement-operator-controller-manager-867d87977b-ftqqr" Dec 11 08:34:00 crc kubenswrapper[4881]: I1211 08:34:00.690514 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/test-operator-controller-manager-bb86466d8-6mz9j"] Dec 11 08:34:00 crc kubenswrapper[4881]: I1211 08:34:00.692846 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/test-operator-controller-manager-bb86466d8-6mz9j" Dec 11 08:34:00 crc kubenswrapper[4881]: I1211 08:34:00.699149 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"test-operator-controller-manager-dockercfg-hrb2c" Dec 11 08:34:00 crc kubenswrapper[4881]: I1211 08:34:00.700055 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ovn-operator-controller-manager-5b67cfc8fb-nnlz4" Dec 11 08:34:00 crc kubenswrapper[4881]: I1211 08:34:00.714249 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/test-operator-controller-manager-bb86466d8-6mz9j"] Dec 11 08:34:00 crc kubenswrapper[4881]: I1211 08:34:00.732815 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/placement-operator-controller-manager-867d87977b-ftqqr" Dec 11 08:34:00 crc kubenswrapper[4881]: I1211 08:34:00.749864 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/swift-operator-controller-manager-8f6687c44-zkl8b" Dec 11 08:34:00 crc kubenswrapper[4881]: I1211 08:34:00.750434 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9bgf9\" (UniqueName: \"kubernetes.io/projected/de3e8077-0bfa-4e55-aba0-0e5dca0e598d-kube-api-access-9bgf9\") pod \"telemetry-operator-controller-manager-fb56f4744-vgmrx\" (UID: \"de3e8077-0bfa-4e55-aba0-0e5dca0e598d\") " pod="openstack-operators/telemetry-operator-controller-manager-fb56f4744-vgmrx" Dec 11 08:34:00 crc kubenswrapper[4881]: I1211 08:34:00.776224 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9bgf9\" (UniqueName: \"kubernetes.io/projected/de3e8077-0bfa-4e55-aba0-0e5dca0e598d-kube-api-access-9bgf9\") pod \"telemetry-operator-controller-manager-fb56f4744-vgmrx\" (UID: \"de3e8077-0bfa-4e55-aba0-0e5dca0e598d\") " pod="openstack-operators/telemetry-operator-controller-manager-fb56f4744-vgmrx" Dec 11 08:34:00 crc kubenswrapper[4881]: I1211 08:34:00.805176 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/watcher-operator-controller-manager-6b56b8849f-mj8lx"] Dec 11 08:34:00 crc kubenswrapper[4881]: I1211 08:34:00.807787 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/watcher-operator-controller-manager-6b56b8849f-mj8lx" Dec 11 08:34:00 crc kubenswrapper[4881]: I1211 08:34:00.811269 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-controller-manager-64d7c556cd-qhz8n" Dec 11 08:34:00 crc kubenswrapper[4881]: I1211 08:34:00.812119 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"watcher-operator-controller-manager-dockercfg-qkmnt" Dec 11 08:34:00 crc kubenswrapper[4881]: I1211 08:34:00.813397 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/watcher-operator-controller-manager-6b56b8849f-mj8lx"] Dec 11 08:34:00 crc kubenswrapper[4881]: I1211 08:34:00.855017 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rlhxc\" (UniqueName: \"kubernetes.io/projected/06cd21f3-b69e-4238-9894-8c4f0f77ee53-kube-api-access-rlhxc\") pod \"test-operator-controller-manager-bb86466d8-6mz9j\" (UID: \"06cd21f3-b69e-4238-9894-8c4f0f77ee53\") " pod="openstack-operators/test-operator-controller-manager-bb86466d8-6mz9j" Dec 11 08:34:00 crc kubenswrapper[4881]: I1211 08:34:00.863110 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/nova-operator-controller-manager-79d658b66d-2dxhd" Dec 11 08:34:00 crc kubenswrapper[4881]: I1211 08:34:00.878929 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/telemetry-operator-controller-manager-fb56f4744-vgmrx" Dec 11 08:34:00 crc kubenswrapper[4881]: I1211 08:34:00.886514 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-controller-manager-b95f4d4f8-phlkr"] Dec 11 08:34:00 crc kubenswrapper[4881]: I1211 08:34:00.888478 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-manager-b95f4d4f8-phlkr" Dec 11 08:34:00 crc kubenswrapper[4881]: I1211 08:34:00.898056 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"webhook-server-cert" Dec 11 08:34:00 crc kubenswrapper[4881]: I1211 08:34:00.898382 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-manager-dockercfg-tshtd" Dec 11 08:34:00 crc kubenswrapper[4881]: I1211 08:34:00.959277 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rlhxc\" (UniqueName: \"kubernetes.io/projected/06cd21f3-b69e-4238-9894-8c4f0f77ee53-kube-api-access-rlhxc\") pod \"test-operator-controller-manager-bb86466d8-6mz9j\" (UID: \"06cd21f3-b69e-4238-9894-8c4f0f77ee53\") " pod="openstack-operators/test-operator-controller-manager-bb86466d8-6mz9j" Dec 11 08:34:00 crc kubenswrapper[4881]: I1211 08:34:00.964016 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-748967c98-lzm95" event={"ID":"80435f25-efd8-482d-9a9b-1c6caafd655e","Type":"ContainerStarted","Data":"fb9185dfeea5dc3c2fecf42671b68d49f4bc89a47939de5b3a43e29cfeb62ccd"} Dec 11 08:34:00 crc kubenswrapper[4881]: I1211 08:34:00.965754 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-manager-b95f4d4f8-phlkr"] Dec 11 08:34:00 crc kubenswrapper[4881]: I1211 08:34:00.966206 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x7srs\" (UniqueName: \"kubernetes.io/projected/dd07d5b4-cfe3-4580-a859-64558daab601-kube-api-access-x7srs\") pod \"watcher-operator-controller-manager-6b56b8849f-mj8lx\" (UID: \"dd07d5b4-cfe3-4580-a859-64558daab601\") " pod="openstack-operators/watcher-operator-controller-manager-6b56b8849f-mj8lx" Dec 11 08:34:00 crc kubenswrapper[4881]: I1211 08:34:00.983118 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rlhxc\" (UniqueName: \"kubernetes.io/projected/06cd21f3-b69e-4238-9894-8c4f0f77ee53-kube-api-access-rlhxc\") pod \"test-operator-controller-manager-bb86466d8-6mz9j\" (UID: \"06cd21f3-b69e-4238-9894-8c4f0f77ee53\") " pod="openstack-operators/test-operator-controller-manager-bb86466d8-6mz9j" Dec 11 08:34:00 crc kubenswrapper[4881]: I1211 08:34:00.984133 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-22zs6"] Dec 11 08:34:00 crc kubenswrapper[4881]: I1211 08:34:00.985429 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-22zs6" Dec 11 08:34:00 crc kubenswrapper[4881]: I1211 08:34:00.986877 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"rabbitmq-cluster-operator-controller-manager-dockercfg-p9gv2" Dec 11 08:34:00 crc kubenswrapper[4881]: I1211 08:34:00.991993 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-22zs6"] Dec 11 08:34:01 crc kubenswrapper[4881]: I1211 08:34:01.031169 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/test-operator-controller-manager-bb86466d8-6mz9j" Dec 11 08:34:01 crc kubenswrapper[4881]: I1211 08:34:01.041733 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/cinder-operator-controller-manager-748967c98-lzm95"] Dec 11 08:34:01 crc kubenswrapper[4881]: I1211 08:34:01.067382 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/128ea8d0-53b4-410c-8587-165aa960d46c-cert\") pod \"openstack-operator-controller-manager-b95f4d4f8-phlkr\" (UID: \"128ea8d0-53b4-410c-8587-165aa960d46c\") " pod="openstack-operators/openstack-operator-controller-manager-b95f4d4f8-phlkr" Dec 11 08:34:01 crc kubenswrapper[4881]: I1211 08:34:01.067448 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x7srs\" (UniqueName: \"kubernetes.io/projected/dd07d5b4-cfe3-4580-a859-64558daab601-kube-api-access-x7srs\") pod \"watcher-operator-controller-manager-6b56b8849f-mj8lx\" (UID: \"dd07d5b4-cfe3-4580-a859-64558daab601\") " pod="openstack-operators/watcher-operator-controller-manager-6b56b8849f-mj8lx" Dec 11 08:34:01 crc kubenswrapper[4881]: I1211 08:34:01.067527 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/2621fa0b-89fb-4d65-aef3-98de0e9a8106-cert\") pod \"openstack-baremetal-operator-controller-manager-77868f484-kgwhp\" (UID: \"2621fa0b-89fb-4d65-aef3-98de0e9a8106\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-77868f484-kgwhp" Dec 11 08:34:01 crc kubenswrapper[4881]: I1211 08:34:01.067600 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m6nkh\" (UniqueName: \"kubernetes.io/projected/128ea8d0-53b4-410c-8587-165aa960d46c-kube-api-access-m6nkh\") pod \"openstack-operator-controller-manager-b95f4d4f8-phlkr\" (UID: \"128ea8d0-53b4-410c-8587-165aa960d46c\") " pod="openstack-operators/openstack-operator-controller-manager-b95f4d4f8-phlkr" Dec 11 08:34:01 crc kubenswrapper[4881]: I1211 08:34:01.083415 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/barbican-operator-controller-manager-5bfbbb859d-d4dsr"] Dec 11 08:34:01 crc kubenswrapper[4881]: I1211 08:34:01.096000 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/2621fa0b-89fb-4d65-aef3-98de0e9a8106-cert\") pod \"openstack-baremetal-operator-controller-manager-77868f484-kgwhp\" (UID: \"2621fa0b-89fb-4d65-aef3-98de0e9a8106\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-77868f484-kgwhp" Dec 11 08:34:01 crc kubenswrapper[4881]: I1211 08:34:01.113985 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x7srs\" (UniqueName: \"kubernetes.io/projected/dd07d5b4-cfe3-4580-a859-64558daab601-kube-api-access-x7srs\") pod \"watcher-operator-controller-manager-6b56b8849f-mj8lx\" (UID: \"dd07d5b4-cfe3-4580-a859-64558daab601\") " pod="openstack-operators/watcher-operator-controller-manager-6b56b8849f-mj8lx" Dec 11 08:34:01 crc kubenswrapper[4881]: I1211 08:34:01.135552 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/watcher-operator-controller-manager-6b56b8849f-mj8lx" Dec 11 08:34:01 crc kubenswrapper[4881]: I1211 08:34:01.139569 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/glance-operator-controller-manager-85fbd69fcd-twbpm"] Dec 11 08:34:01 crc kubenswrapper[4881]: I1211 08:34:01.170246 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m6nkh\" (UniqueName: \"kubernetes.io/projected/128ea8d0-53b4-410c-8587-165aa960d46c-kube-api-access-m6nkh\") pod \"openstack-operator-controller-manager-b95f4d4f8-phlkr\" (UID: \"128ea8d0-53b4-410c-8587-165aa960d46c\") " pod="openstack-operators/openstack-operator-controller-manager-b95f4d4f8-phlkr" Dec 11 08:34:01 crc kubenswrapper[4881]: I1211 08:34:01.170315 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/128ea8d0-53b4-410c-8587-165aa960d46c-cert\") pod \"openstack-operator-controller-manager-b95f4d4f8-phlkr\" (UID: \"128ea8d0-53b4-410c-8587-165aa960d46c\") " pod="openstack-operators/openstack-operator-controller-manager-b95f4d4f8-phlkr" Dec 11 08:34:01 crc kubenswrapper[4881]: I1211 08:34:01.170417 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pwp42\" (UniqueName: \"kubernetes.io/projected/14d65f13-7dce-49b7-9c8e-0a6ea9b57132-kube-api-access-pwp42\") pod \"rabbitmq-cluster-operator-manager-5f97d8c699-22zs6\" (UID: \"14d65f13-7dce-49b7-9c8e-0a6ea9b57132\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-22zs6" Dec 11 08:34:01 crc kubenswrapper[4881]: E1211 08:34:01.170874 4881 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Dec 11 08:34:01 crc kubenswrapper[4881]: E1211 08:34:01.170923 4881 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/128ea8d0-53b4-410c-8587-165aa960d46c-cert podName:128ea8d0-53b4-410c-8587-165aa960d46c nodeName:}" failed. No retries permitted until 2025-12-11 08:34:01.670906283 +0000 UTC m=+1090.048274970 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/128ea8d0-53b4-410c-8587-165aa960d46c-cert") pod "openstack-operator-controller-manager-b95f4d4f8-phlkr" (UID: "128ea8d0-53b4-410c-8587-165aa960d46c") : secret "webhook-server-cert" not found Dec 11 08:34:01 crc kubenswrapper[4881]: I1211 08:34:01.214525 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m6nkh\" (UniqueName: \"kubernetes.io/projected/128ea8d0-53b4-410c-8587-165aa960d46c-kube-api-access-m6nkh\") pod \"openstack-operator-controller-manager-b95f4d4f8-phlkr\" (UID: \"128ea8d0-53b4-410c-8587-165aa960d46c\") " pod="openstack-operators/openstack-operator-controller-manager-b95f4d4f8-phlkr" Dec 11 08:34:01 crc kubenswrapper[4881]: I1211 08:34:01.261721 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-baremetal-operator-controller-manager-77868f484-kgwhp" Dec 11 08:34:01 crc kubenswrapper[4881]: I1211 08:34:01.274585 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pwp42\" (UniqueName: \"kubernetes.io/projected/14d65f13-7dce-49b7-9c8e-0a6ea9b57132-kube-api-access-pwp42\") pod \"rabbitmq-cluster-operator-manager-5f97d8c699-22zs6\" (UID: \"14d65f13-7dce-49b7-9c8e-0a6ea9b57132\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-22zs6" Dec 11 08:34:01 crc kubenswrapper[4881]: I1211 08:34:01.301682 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pwp42\" (UniqueName: \"kubernetes.io/projected/14d65f13-7dce-49b7-9c8e-0a6ea9b57132-kube-api-access-pwp42\") pod \"rabbitmq-cluster-operator-manager-5f97d8c699-22zs6\" (UID: \"14d65f13-7dce-49b7-9c8e-0a6ea9b57132\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-22zs6" Dec 11 08:34:01 crc kubenswrapper[4881]: I1211 08:34:01.478857 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/heat-operator-controller-manager-698d6fd7d6-rs5fd"] Dec 11 08:34:01 crc kubenswrapper[4881]: I1211 08:34:01.508807 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-22zs6" Dec 11 08:34:01 crc kubenswrapper[4881]: I1211 08:34:01.515543 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/designate-operator-controller-manager-6788cc6d75-6gtkq"] Dec 11 08:34:01 crc kubenswrapper[4881]: I1211 08:34:01.685220 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/128ea8d0-53b4-410c-8587-165aa960d46c-cert\") pod \"openstack-operator-controller-manager-b95f4d4f8-phlkr\" (UID: \"128ea8d0-53b4-410c-8587-165aa960d46c\") " pod="openstack-operators/openstack-operator-controller-manager-b95f4d4f8-phlkr" Dec 11 08:34:01 crc kubenswrapper[4881]: I1211 08:34:01.690567 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/128ea8d0-53b4-410c-8587-165aa960d46c-cert\") pod \"openstack-operator-controller-manager-b95f4d4f8-phlkr\" (UID: \"128ea8d0-53b4-410c-8587-165aa960d46c\") " pod="openstack-operators/openstack-operator-controller-manager-b95f4d4f8-phlkr" Dec 11 08:34:01 crc kubenswrapper[4881]: I1211 08:34:01.751883 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-manager-b95f4d4f8-phlkr" Dec 11 08:34:01 crc kubenswrapper[4881]: W1211 08:34:01.928537 4881 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9b3ee431_6c33_4b49_8fdb_27056597fbe8.slice/crio-90fb07d2bdd085936ad9593ca33f0ee9967080f0045a859692f07499b5c75764 WatchSource:0}: Error finding container 90fb07d2bdd085936ad9593ca33f0ee9967080f0045a859692f07499b5c75764: Status 404 returned error can't find the container with id 90fb07d2bdd085936ad9593ca33f0ee9967080f0045a859692f07499b5c75764 Dec 11 08:34:01 crc kubenswrapper[4881]: I1211 08:34:01.928766 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-controller-manager-79cc9d59f5-qh8s8"] Dec 11 08:34:01 crc kubenswrapper[4881]: I1211 08:34:01.978784 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/manila-operator-controller-manager-5cbc8c7f96-ww55g"] Dec 11 08:34:01 crc kubenswrapper[4881]: W1211 08:34:01.981770 4881 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podda8fe0e3_3416_453d_80b7_47d4ab23c610.slice/crio-eac43db0cab008744d94895dae82c835aa43b71e0224b0b798441e2f1a27274b WatchSource:0}: Error finding container eac43db0cab008744d94895dae82c835aa43b71e0224b0b798441e2f1a27274b: Status 404 returned error can't find the container with id eac43db0cab008744d94895dae82c835aa43b71e0224b0b798441e2f1a27274b Dec 11 08:34:02 crc kubenswrapper[4881]: I1211 08:34:02.002399 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-79cc9d59f5-qh8s8" event={"ID":"9b3ee431-6c33-4b49-8fdb-27056597fbe8","Type":"ContainerStarted","Data":"90fb07d2bdd085936ad9593ca33f0ee9967080f0045a859692f07499b5c75764"} Dec 11 08:34:02 crc kubenswrapper[4881]: I1211 08:34:02.003766 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/horizon-operator-controller-manager-7d5d9fd47f-dqhqx"] Dec 11 08:34:02 crc kubenswrapper[4881]: I1211 08:34:02.004262 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-85fbd69fcd-twbpm" event={"ID":"d46284f5-997e-4ce9-a607-254c3ce33f31","Type":"ContainerStarted","Data":"ce6dc881955ff5950bb1325d61be4a14c8f41bfe4b906d2ae5c0ad6546faac6b"} Dec 11 08:34:02 crc kubenswrapper[4881]: I1211 08:34:02.013017 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-6788cc6d75-6gtkq" event={"ID":"e07ec193-8583-4299-9370-ce788e2e1ae1","Type":"ContainerStarted","Data":"9f6d273f7d4006953870e4cb0bb9359c7fffb0f907e27a31fff0fcd24f42eeac"} Dec 11 08:34:02 crc kubenswrapper[4881]: I1211 08:34:02.015515 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-5bfbbb859d-d4dsr" event={"ID":"e5f3ecaa-a91f-4dc8-9baf-2866cf8df0f4","Type":"ContainerStarted","Data":"5ec7217237b021aac3ec64406639500ea725e085498208cf7beee7092de9a68b"} Dec 11 08:34:02 crc kubenswrapper[4881]: I1211 08:34:02.018560 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-controller-manager-6c55d8d69b-44khn"] Dec 11 08:34:02 crc kubenswrapper[4881]: I1211 08:34:02.019167 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-698d6fd7d6-rs5fd" event={"ID":"d4850972-2a52-4030-9822-af3de9cc647a","Type":"ContainerStarted","Data":"b182533f0e7a1ebaaba8f62d826c4b6c069621a26002808fa9e6ef85957f53f3"} Dec 11 08:34:02 crc kubenswrapper[4881]: I1211 08:34:02.224635 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ovn-operator-controller-manager-5b67cfc8fb-nnlz4"] Dec 11 08:34:02 crc kubenswrapper[4881]: W1211 08:34:02.226463 4881 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod91ca18da_4852_496b_bf77_558e8010aabe.slice/crio-7bcd084ea6c4734b610c8725e4b01ce1b6338863c3460ac16bad81c4bc61af9e WatchSource:0}: Error finding container 7bcd084ea6c4734b610c8725e4b01ce1b6338863c3460ac16bad81c4bc61af9e: Status 404 returned error can't find the container with id 7bcd084ea6c4734b610c8725e4b01ce1b6338863c3460ac16bad81c4bc61af9e Dec 11 08:34:02 crc kubenswrapper[4881]: I1211 08:34:02.231546 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/placement-operator-controller-manager-867d87977b-ftqqr"] Dec 11 08:34:02 crc kubenswrapper[4881]: I1211 08:34:02.238561 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ironic-operator-controller-manager-54485f899-mvlsx"] Dec 11 08:34:02 crc kubenswrapper[4881]: I1211 08:34:02.245172 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/neutron-operator-controller-manager-58879495c-kmpzx"] Dec 11 08:34:02 crc kubenswrapper[4881]: W1211 08:34:02.245618 4881 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5079b14d_bd2f_4151_898d_91362a4b24c2.slice/crio-cfd7904a718089824f9bb36a64aaacdb3c2aefaac140274cb6b18722dc909e5b WatchSource:0}: Error finding container cfd7904a718089824f9bb36a64aaacdb3c2aefaac140274cb6b18722dc909e5b: Status 404 returned error can't find the container with id cfd7904a718089824f9bb36a64aaacdb3c2aefaac140274cb6b18722dc909e5b Dec 11 08:34:02 crc kubenswrapper[4881]: I1211 08:34:02.250102 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/octavia-operator-controller-manager-d5fb87cb8-q22gz"] Dec 11 08:34:02 crc kubenswrapper[4881]: W1211 08:34:02.256872 4881 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podac8a74d8_c81e_4154_b2dc_7ebb23d13aa7.slice/crio-9040a6f68998d60abc8545973198d8b2ba3fd60a08e6458dc47d219ace870373 WatchSource:0}: Error finding container 9040a6f68998d60abc8545973198d8b2ba3fd60a08e6458dc47d219ace870373: Status 404 returned error can't find the container with id 9040a6f68998d60abc8545973198d8b2ba3fd60a08e6458dc47d219ace870373 Dec 11 08:34:02 crc kubenswrapper[4881]: W1211 08:34:02.260051 4881 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2fd323b1_8fa8_456c_bcd8_d89872682762.slice/crio-0494a777dfcea75eece300d0938d73ba8bef6972d4d8e0c3f5fac65fa8925ce4 WatchSource:0}: Error finding container 0494a777dfcea75eece300d0938d73ba8bef6972d4d8e0c3f5fac65fa8925ce4: Status 404 returned error can't find the container with id 0494a777dfcea75eece300d0938d73ba8bef6972d4d8e0c3f5fac65fa8925ce4 Dec 11 08:34:03 crc kubenswrapper[4881]: I1211 08:34:03.086991 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-64d7c556cd-qhz8n"] Dec 11 08:34:03 crc kubenswrapper[4881]: I1211 08:34:03.087304 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-d5fb87cb8-q22gz" event={"ID":"ac8a74d8-c81e-4154-b2dc-7ebb23d13aa7","Type":"ContainerStarted","Data":"9040a6f68998d60abc8545973198d8b2ba3fd60a08e6458dc47d219ace870373"} Dec 11 08:34:03 crc kubenswrapper[4881]: I1211 08:34:03.087361 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-58879495c-kmpzx" event={"ID":"5079b14d-bd2f-4151-898d-91362a4b24c2","Type":"ContainerStarted","Data":"cfd7904a718089824f9bb36a64aaacdb3c2aefaac140274cb6b18722dc909e5b"} Dec 11 08:34:03 crc kubenswrapper[4881]: I1211 08:34:03.087372 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-5cbc8c7f96-ww55g" event={"ID":"da8fe0e3-3416-453d-80b7-47d4ab23c610","Type":"ContainerStarted","Data":"eac43db0cab008744d94895dae82c835aa43b71e0224b0b798441e2f1a27274b"} Dec 11 08:34:03 crc kubenswrapper[4881]: I1211 08:34:03.087384 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-54485f899-mvlsx" event={"ID":"2fd323b1-8fa8-456c-bcd8-d89872682762","Type":"ContainerStarted","Data":"0494a777dfcea75eece300d0938d73ba8bef6972d4d8e0c3f5fac65fa8925ce4"} Dec 11 08:34:03 crc kubenswrapper[4881]: I1211 08:34:03.087395 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-7d5d9fd47f-dqhqx" event={"ID":"2dab1d4f-2c9a-4b32-a666-4b0802e51576","Type":"ContainerStarted","Data":"3362a81133bed57b4135e38113d1b04e8a4efeb50a638b67b35cdf40f6d7b07e"} Dec 11 08:34:03 crc kubenswrapper[4881]: I1211 08:34:03.089996 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-6c55d8d69b-44khn" event={"ID":"867bc48e-c043-4428-b201-0ce4dd830f3f","Type":"ContainerStarted","Data":"df6534f6269270dd1cb561a212187ab307210004e6a32eb09227d68e87079d8b"} Dec 11 08:34:03 crc kubenswrapper[4881]: I1211 08:34:03.092954 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-5b67cfc8fb-nnlz4" event={"ID":"91ca18da-4852-496b-bf77-558e8010aabe","Type":"ContainerStarted","Data":"7bcd084ea6c4734b610c8725e4b01ce1b6338863c3460ac16bad81c4bc61af9e"} Dec 11 08:34:03 crc kubenswrapper[4881]: I1211 08:34:03.095382 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-867d87977b-ftqqr" event={"ID":"02aa2201-6757-40f8-b24d-fbad39b79069","Type":"ContainerStarted","Data":"6d6eeab9f8696fb1d1c544a10c64924de34e1d9662689a074b5044ddcb292eee"} Dec 11 08:34:03 crc kubenswrapper[4881]: I1211 08:34:03.101325 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/swift-operator-controller-manager-8f6687c44-zkl8b"] Dec 11 08:34:03 crc kubenswrapper[4881]: I1211 08:34:03.125914 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-fb56f4744-vgmrx"] Dec 11 08:34:03 crc kubenswrapper[4881]: I1211 08:34:03.138473 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/test-operator-controller-manager-bb86466d8-6mz9j"] Dec 11 08:34:03 crc kubenswrapper[4881]: I1211 08:34:03.154887 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/watcher-operator-controller-manager-6b56b8849f-mj8lx"] Dec 11 08:34:03 crc kubenswrapper[4881]: I1211 08:34:03.164000 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/nova-operator-controller-manager-79d658b66d-2dxhd"] Dec 11 08:34:03 crc kubenswrapper[4881]: I1211 08:34:03.175100 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-77868f484-kgwhp"] Dec 11 08:34:03 crc kubenswrapper[4881]: I1211 08:34:03.187875 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-22zs6"] Dec 11 08:34:03 crc kubenswrapper[4881]: I1211 08:34:03.199783 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-manager-b95f4d4f8-phlkr"] Dec 11 08:34:06 crc kubenswrapper[4881]: W1211 08:34:06.079427 4881 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod06cd21f3_b69e_4238_9894_8c4f0f77ee53.slice/crio-6492d170fc3b461720c4130cbff9568ab10653d91470940dfe860f14f8e2a9fd WatchSource:0}: Error finding container 6492d170fc3b461720c4130cbff9568ab10653d91470940dfe860f14f8e2a9fd: Status 404 returned error can't find the container with id 6492d170fc3b461720c4130cbff9568ab10653d91470940dfe860f14f8e2a9fd Dec 11 08:34:06 crc kubenswrapper[4881]: I1211 08:34:06.126655 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-8f6687c44-zkl8b" event={"ID":"246f8ac7-b65e-40b1-aba1-ba1defde43ef","Type":"ContainerStarted","Data":"eeec96fbe41390a00c15068abafa9a12fc50d765892a90b03b4cc97f3f83b0ec"} Dec 11 08:34:06 crc kubenswrapper[4881]: I1211 08:34:06.128057 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-b95f4d4f8-phlkr" event={"ID":"128ea8d0-53b4-410c-8587-165aa960d46c","Type":"ContainerStarted","Data":"8c3c0002431d5ae1d162255d02dfd6fe7fc4e27c6858686299af217d9d6c4eb0"} Dec 11 08:34:06 crc kubenswrapper[4881]: I1211 08:34:06.129259 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-fb56f4744-vgmrx" event={"ID":"de3e8077-0bfa-4e55-aba0-0e5dca0e598d","Type":"ContainerStarted","Data":"dc299bd8ef8d63633970ce93e0f79041446bca262e130e8209ac4accb14cbf06"} Dec 11 08:34:06 crc kubenswrapper[4881]: I1211 08:34:06.131249 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-bb86466d8-6mz9j" event={"ID":"06cd21f3-b69e-4238-9894-8c4f0f77ee53","Type":"ContainerStarted","Data":"6492d170fc3b461720c4130cbff9568ab10653d91470940dfe860f14f8e2a9fd"} Dec 11 08:34:06 crc kubenswrapper[4881]: W1211 08:34:06.787827 4881 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poddd07d5b4_cfe3_4580_a859_64558daab601.slice/crio-99673adbe21a39f994a3b5a0d7bef4cfa5335a03874e1a1d8ceeca49069c7edc WatchSource:0}: Error finding container 99673adbe21a39f994a3b5a0d7bef4cfa5335a03874e1a1d8ceeca49069c7edc: Status 404 returned error can't find the container with id 99673adbe21a39f994a3b5a0d7bef4cfa5335a03874e1a1d8ceeca49069c7edc Dec 11 08:34:06 crc kubenswrapper[4881]: W1211 08:34:06.789790 4881 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod05ef8d73_6d8a_4d91_83a3_93ec0fc14ae1.slice/crio-bfceb767fd78d5e86df518408de746ccb894f532e759178988bea9d26df84fd7 WatchSource:0}: Error finding container bfceb767fd78d5e86df518408de746ccb894f532e759178988bea9d26df84fd7: Status 404 returned error can't find the container with id bfceb767fd78d5e86df518408de746ccb894f532e759178988bea9d26df84fd7 Dec 11 08:34:06 crc kubenswrapper[4881]: W1211 08:34:06.798482 4881 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod14d65f13_7dce_49b7_9c8e_0a6ea9b57132.slice/crio-2333517563fca2b2f8586ecbbff5c9955497f06b41aadf3d418d8b02f0b0c055 WatchSource:0}: Error finding container 2333517563fca2b2f8586ecbbff5c9955497f06b41aadf3d418d8b02f0b0c055: Status 404 returned error can't find the container with id 2333517563fca2b2f8586ecbbff5c9955497f06b41aadf3d418d8b02f0b0c055 Dec 11 08:34:06 crc kubenswrapper[4881]: W1211 08:34:06.799854 4881 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2621fa0b_89fb_4d65_aef3_98de0e9a8106.slice/crio-b8e67d91072765f96c809e167346c965ab1f36c1c4bc0adc029b71e9a3045948 WatchSource:0}: Error finding container b8e67d91072765f96c809e167346c965ab1f36c1c4bc0adc029b71e9a3045948: Status 404 returned error can't find the container with id b8e67d91072765f96c809e167346c965ab1f36c1c4bc0adc029b71e9a3045948 Dec 11 08:34:06 crc kubenswrapper[4881]: W1211 08:34:06.808635 4881 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod21cbbd1f_7cfe_481a_b02a_f72c9d052519.slice/crio-4bb42bb0ba9c05232ede1842b3a9325b8caf647d3d6693865968d379b7b99b70 WatchSource:0}: Error finding container 4bb42bb0ba9c05232ede1842b3a9325b8caf647d3d6693865968d379b7b99b70: Status 404 returned error can't find the container with id 4bb42bb0ba9c05232ede1842b3a9325b8caf647d3d6693865968d379b7b99b70 Dec 11 08:34:07 crc kubenswrapper[4881]: I1211 08:34:07.155206 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-64d7c556cd-qhz8n" event={"ID":"21cbbd1f-7cfe-481a-b02a-f72c9d052519","Type":"ContainerStarted","Data":"4bb42bb0ba9c05232ede1842b3a9325b8caf647d3d6693865968d379b7b99b70"} Dec 11 08:34:07 crc kubenswrapper[4881]: I1211 08:34:07.156902 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-77868f484-kgwhp" event={"ID":"2621fa0b-89fb-4d65-aef3-98de0e9a8106","Type":"ContainerStarted","Data":"b8e67d91072765f96c809e167346c965ab1f36c1c4bc0adc029b71e9a3045948"} Dec 11 08:34:07 crc kubenswrapper[4881]: I1211 08:34:07.158533 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-22zs6" event={"ID":"14d65f13-7dce-49b7-9c8e-0a6ea9b57132","Type":"ContainerStarted","Data":"2333517563fca2b2f8586ecbbff5c9955497f06b41aadf3d418d8b02f0b0c055"} Dec 11 08:34:07 crc kubenswrapper[4881]: I1211 08:34:07.160486 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-6b56b8849f-mj8lx" event={"ID":"dd07d5b4-cfe3-4580-a859-64558daab601","Type":"ContainerStarted","Data":"99673adbe21a39f994a3b5a0d7bef4cfa5335a03874e1a1d8ceeca49069c7edc"} Dec 11 08:34:07 crc kubenswrapper[4881]: I1211 08:34:07.166289 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-79d658b66d-2dxhd" event={"ID":"05ef8d73-6d8a-4d91-83a3-93ec0fc14ae1","Type":"ContainerStarted","Data":"bfceb767fd78d5e86df518408de746ccb894f532e759178988bea9d26df84fd7"} Dec 11 08:34:15 crc kubenswrapper[4881]: E1211 08:34:15.721207 4881 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/heat-operator@sha256:1739eeeb2c05142ddf835739758ffd04ad06cad353125e2ceff687f237ecda57" Dec 11 08:34:15 crc kubenswrapper[4881]: E1211 08:34:15.721918 4881 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/heat-operator@sha256:1739eeeb2c05142ddf835739758ffd04ad06cad353125e2ceff687f237ecda57,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-sdkx2,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod heat-operator-controller-manager-698d6fd7d6-rs5fd_openstack-operators(d4850972-2a52-4030-9822-af3de9cc647a): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 11 08:34:18 crc kubenswrapper[4881]: E1211 08:34:18.538126 4881 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/glance-operator@sha256:f4b6baa2b8a661351cfc24fff5aacee5aa4198106618700cfa47ec3a75f88b31" Dec 11 08:34:18 crc kubenswrapper[4881]: E1211 08:34:18.538787 4881 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/glance-operator@sha256:f4b6baa2b8a661351cfc24fff5aacee5aa4198106618700cfa47ec3a75f88b31,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-rp6cs,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod glance-operator-controller-manager-85fbd69fcd-twbpm_openstack-operators(d46284f5-997e-4ce9-a607-254c3ce33f31): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 11 08:34:18 crc kubenswrapper[4881]: E1211 08:34:18.956511 4881 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/horizon-operator@sha256:2811f492f5663ec8660767dcb699060691c10dd809b1bb5f3a1f6b803946a653" Dec 11 08:34:18 crc kubenswrapper[4881]: E1211 08:34:18.956683 4881 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/horizon-operator@sha256:2811f492f5663ec8660767dcb699060691c10dd809b1bb5f3a1f6b803946a653,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-8rfpt,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod horizon-operator-controller-manager-7d5d9fd47f-dqhqx_openstack-operators(2dab1d4f-2c9a-4b32-a666-4b0802e51576): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 11 08:34:26 crc kubenswrapper[4881]: E1211 08:34:26.129583 4881 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/manila-operator@sha256:57d9cb0034a7d5c7a39410fcb619ade2010e6855344dc3a0bc2bfd98cdf345d8" Dec 11 08:34:26 crc kubenswrapper[4881]: E1211 08:34:26.130401 4881 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/manila-operator@sha256:57d9cb0034a7d5c7a39410fcb619ade2010e6855344dc3a0bc2bfd98cdf345d8,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-6h9ll,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod manila-operator-controller-manager-5cbc8c7f96-ww55g_openstack-operators(da8fe0e3-3416-453d-80b7-47d4ab23c610): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 11 08:34:26 crc kubenswrapper[4881]: E1211 08:34:26.478835 4881 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/keystone-operator@sha256:4f799c74da2f1c864af24fcd5efd91ec64848972a95246eac6b5c6c4d71c1756" Dec 11 08:34:26 crc kubenswrapper[4881]: E1211 08:34:26.479043 4881 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/keystone-operator@sha256:4f799c74da2f1c864af24fcd5efd91ec64848972a95246eac6b5c6c4d71c1756,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-zvkxx,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod keystone-operator-controller-manager-79cc9d59f5-qh8s8_openstack-operators(9b3ee431-6c33-4b49-8fdb-27056597fbe8): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 11 08:34:28 crc kubenswrapper[4881]: E1211 08:34:28.882423 4881 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/watcher-operator@sha256:1988aaf9cd245150cda123aaaa21718ccb552c47f1623b7d68804f13c47f2c6a" Dec 11 08:34:28 crc kubenswrapper[4881]: E1211 08:34:28.883080 4881 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/watcher-operator@sha256:1988aaf9cd245150cda123aaaa21718ccb552c47f1623b7d68804f13c47f2c6a,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-x7srs,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod watcher-operator-controller-manager-6b56b8849f-mj8lx_openstack-operators(dd07d5b4-cfe3-4580-a859-64558daab601): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 11 08:34:29 crc kubenswrapper[4881]: E1211 08:34:29.367410 4881 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/test-operator@sha256:49180c7bd4f0071e43ae7044260a3a97c4aa34fcbcb2d0d4573df449765ed391" Dec 11 08:34:29 crc kubenswrapper[4881]: E1211 08:34:29.367655 4881 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/test-operator@sha256:49180c7bd4f0071e43ae7044260a3a97c4aa34fcbcb2d0d4573df449765ed391,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-rlhxc,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod test-operator-controller-manager-bb86466d8-6mz9j_openstack-operators(06cd21f3-b69e-4238-9894-8c4f0f77ee53): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 11 08:34:29 crc kubenswrapper[4881]: E1211 08:34:29.836351 4881 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/placement-operator@sha256:fd917de0cf800ec284ee0c3f2906a06d85ea18cb75a5b06c8eb305750467986d" Dec 11 08:34:29 crc kubenswrapper[4881]: E1211 08:34:29.836489 4881 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/placement-operator@sha256:fd917de0cf800ec284ee0c3f2906a06d85ea18cb75a5b06c8eb305750467986d,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-cfgnb,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod placement-operator-controller-manager-867d87977b-ftqqr_openstack-operators(02aa2201-6757-40f8-b24d-fbad39b79069): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 11 08:34:30 crc kubenswrapper[4881]: E1211 08:34:30.241053 4881 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/openstack-baremetal-operator@sha256:66928f0eae5206f671ac7b21f79953e37009c54187d768dc6e03fe0a3d202b3b" Dec 11 08:34:30 crc kubenswrapper[4881]: E1211 08:34:30.241925 4881 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/openstack-baremetal-operator@sha256:66928f0eae5206f671ac7b21f79953e37009c54187d768dc6e03fe0a3d202b3b,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:true,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/openstack-k8s-operators/openstack-baremetal-operator-agent:latest,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_ANSIBLEEE_IMAGE_URL_DEFAULT,Value:quay.io/openstack-k8s-operators/openstack-ansibleee-runner:latest,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_AODH_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-aodh-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_AODH_EVALUATOR_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-aodh-evaluator:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_AODH_LISTENER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-aodh-listener:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_AODH_NOTIFIER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-aodh-notifier:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_APACHE_IMAGE_URL_DEFAULT,Value:registry.redhat.io/ubi9/httpd-24:latest,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_BARBICAN_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-barbican-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_BARBICAN_KEYSTONE_LISTENER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-barbican-keystone-listener:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_BARBICAN_WORKER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-barbican-worker:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CEILOMETER_CENTRAL_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ceilometer-central:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CEILOMETER_COMPUTE_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ceilometer-compute:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CEILOMETER_IPMI_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CEILOMETER_MYSQLD_EXPORTER_IMAGE_URL_DEFAULT,Value:quay.io/prometheus/mysqld-exporter:v0.15.1,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CEILOMETER_NOTIFICATION_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ceilometer-notification:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CEILOMETER_SGCORE_IMAGE_URL_DEFAULT,Value:quay.io/openstack-k8s-operators/sg-core:latest,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CINDER_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CINDER_BACKUP_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-cinder-backup:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CINDER_SCHEDULER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-cinder-scheduler:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CINDER_VOLUME_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-cinder-volume:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CLOUDKITTY_API_IMAGE_URL_DEFAULT,Value:quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CLOUDKITTY_PROC_IMAGE_URL_DEFAULT,Value:quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-processor:current,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-designate-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_BACKENDBIND9_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-designate-backend-bind9:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_CENTRAL_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-designate-central:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_MDNS_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-designate-mdns:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_PRODUCER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-designate-producer:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_UNBOUND_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-unbound:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_WORKER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-designate-worker:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_FRR_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-frr:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_ISCSID_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-iscsid:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_KEPLER_IMAGE_URL_DEFAULT,Value:quay.io/sustainable_computing_io/kepler:release-0.7.12,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_LOGROTATE_CROND_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-cron:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_MULTIPATHD_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-multipathd:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_NEUTRON_DHCP_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-neutron-dhcp-agent:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_NEUTRON_METADATA_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_NEUTRON_OVN_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-neutron-ovn-agent:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_NEUTRON_SRIOV_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-neutron-sriov-agent:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_NODE_EXPORTER_IMAGE_URL_DEFAULT,Value:quay.io/prometheus/node-exporter:v1.5.0,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_OVN_BGP_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ovn-bgp-agent:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_PODMAN_EXPORTER_IMAGE_URL_DEFAULT,Value:quay.io/navidys/prometheus-podman-exporter:v1.10.1,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_GLANCE_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-glance-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_HEAT_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-heat-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_HEAT_CFNAPI_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-heat-api-cfn:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_HEAT_ENGINE_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-heat-engine:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_HORIZON_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-horizon:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_INFRA_MEMCACHED_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-memcached:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_INFRA_REDIS_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-redis:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_IRONIC_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ironic-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_IRONIC_CONDUCTOR_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ironic-conductor:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_IRONIC_INSPECTOR_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ironic-inspector:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_IRONIC_NEUTRON_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ironic-neutron-agent:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_IRONIC_PXE_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ironic-pxe:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_IRONIC_PYTHON_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/ironic-python-agent:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_KEYSTONE_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-keystone:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_KSM_IMAGE_URL_DEFAULT,Value:registry.k8s.io/kube-state-metrics/kube-state-metrics:v2.15.0,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_MANILA_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-manila-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_MANILA_SCHEDULER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-manila-scheduler:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_MANILA_SHARE_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-manila-share:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_MARIADB_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-mariadb:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NET_UTILS_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-netutils:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NEUTRON_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NOVA_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-nova-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NOVA_COMPUTE_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-nova-compute:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NOVA_CONDUCTOR_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-nova-conductor:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NOVA_NOVNC_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-nova-novncproxy:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NOVA_SCHEDULER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-nova-scheduler:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OCTAVIA_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-octavia-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OCTAVIA_HEALTHMANAGER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-octavia-health-manager:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OCTAVIA_HOUSEKEEPING_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-octavia-housekeeping:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OCTAVIA_RSYSLOG_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-rsyslog:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OCTAVIA_WORKER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-octavia-worker:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OPENSTACK_CLIENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-openstackclient:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OPENSTACK_MUST_GATHER_DEFAULT,Value:quay.io/openstack-k8s-operators/openstack-must-gather:latest,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OPENSTACK_NETWORK_EXPORTER_IMAGE_URL_DEFAULT,Value:quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OS_CONTAINER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/edpm-hardened-uefi:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OVN_CONTROLLER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OVN_CONTROLLER_OVS_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ovn-base:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OVN_NB_DBCLUSTER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ovn-nb-db-server:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OVN_NORTHD_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ovn-northd:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OVN_SB_DBCLUSTER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ovn-sb-db-server:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_PLACEMENT_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-placement-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_RABBITMQ_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_SWIFT_ACCOUNT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-swift-account:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_SWIFT_CONTAINER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-swift-container:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_SWIFT_OBJECT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-swift-object:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_SWIFT_PROXY_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-swift-proxy-server:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_TEST_TEMPEST_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-tempest-all:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_WATCHER_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-master-centos9/openstack-watcher-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_WATCHER_APPLIER_IMAGE_URL_DEFAULT,Value:quay.io/podified-master-centos9/openstack-watcher-applier:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_WATCHER_DECISION_ENGINE_IMAGE_URL_DEFAULT,Value:quay.io/podified-master-centos9/openstack-watcher-decision-engine:current-podified,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:cert,ReadOnly:true,MountPath:/tmp/k8s-webhook-server/serving-certs,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-hn4qd,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod openstack-baremetal-operator-controller-manager-77868f484-kgwhp_openstack-operators(2621fa0b-89fb-4d65-aef3-98de0e9a8106): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 11 08:34:30 crc kubenswrapper[4881]: E1211 08:34:30.779177 4881 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/mariadb-operator@sha256:2c4fe20e044dd8ea1f60f2f3f5e3844d932b4b79439835bd8771c73f16b38312" Dec 11 08:34:30 crc kubenswrapper[4881]: E1211 08:34:30.779606 4881 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/mariadb-operator@sha256:2c4fe20e044dd8ea1f60f2f3f5e3844d932b4b79439835bd8771c73f16b38312,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-vdz5s,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod mariadb-operator-controller-manager-64d7c556cd-qhz8n_openstack-operators(21cbbd1f-7cfe-481a-b02a-f72c9d052519): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 11 08:34:31 crc kubenswrapper[4881]: E1211 08:34:31.197578 4881 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/nova-operator@sha256:debe5d6d29a007374b270b0e114e69b2136eee61dabab8576baf4010c951edb9" Dec 11 08:34:31 crc kubenswrapper[4881]: E1211 08:34:31.198174 4881 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/nova-operator@sha256:debe5d6d29a007374b270b0e114e69b2136eee61dabab8576baf4010c951edb9,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-89gkl,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod nova-operator-controller-manager-79d658b66d-2dxhd_openstack-operators(05ef8d73-6d8a-4d91-83a3-93ec0fc14ae1): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 11 08:34:31 crc kubenswrapper[4881]: E1211 08:34:31.641182 4881 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/swift-operator@sha256:f076b8d9e85881d9c3cb5272b13db7f5e05d2e9da884c17b677a844112831907" Dec 11 08:34:31 crc kubenswrapper[4881]: E1211 08:34:31.641422 4881 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/swift-operator@sha256:f076b8d9e85881d9c3cb5272b13db7f5e05d2e9da884c17b677a844112831907,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-lb8f4,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod swift-operator-controller-manager-8f6687c44-zkl8b_openstack-operators(246f8ac7-b65e-40b1-aba1-ba1defde43ef): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 11 08:34:35 crc kubenswrapper[4881]: E1211 08:34:35.533796 4881 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2" Dec 11 08:34:35 crc kubenswrapper[4881]: E1211 08:34:35.534508 4881 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:operator,Image:quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2,Command:[/manager],Args:[],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:metrics,HostPort:0,ContainerPort:9782,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:OPERATOR_NAMESPACE,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{200 -3} {} 200m DecimalSI},memory: {{524288000 0} {} 500Mi BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-pwp42,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod rabbitmq-cluster-operator-manager-5f97d8c699-22zs6_openstack-operators(14d65f13-7dce-49b7-9c8e-0a6ea9b57132): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 11 08:34:35 crc kubenswrapper[4881]: E1211 08:34:35.535708 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-22zs6" podUID="14d65f13-7dce-49b7-9c8e-0a6ea9b57132" Dec 11 08:34:35 crc kubenswrapper[4881]: E1211 08:34:35.593358 4881 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.9:5001/openstack-k8s-operators/telemetry-operator:30f617403195366c610a5c61c5e3f09a4bb73c0f" Dec 11 08:34:35 crc kubenswrapper[4881]: E1211 08:34:35.593419 4881 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.9:5001/openstack-k8s-operators/telemetry-operator:30f617403195366c610a5c61c5e3f09a4bb73c0f" Dec 11 08:34:35 crc kubenswrapper[4881]: E1211 08:34:35.593594 4881 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:38.102.83.9:5001/openstack-k8s-operators/telemetry-operator:30f617403195366c610a5c61c5e3f09a4bb73c0f,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-9bgf9,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod telemetry-operator-controller-manager-fb56f4744-vgmrx_openstack-operators(de3e8077-0bfa-4e55-aba0-0e5dca0e598d): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 11 08:34:36 crc kubenswrapper[4881]: E1211 08:34:36.375717 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/keystone-operator-controller-manager-79cc9d59f5-qh8s8" podUID="9b3ee431-6c33-4b49-8fdb-27056597fbe8" Dec 11 08:34:36 crc kubenswrapper[4881]: E1211 08:34:36.408683 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/placement-operator-controller-manager-867d87977b-ftqqr" podUID="02aa2201-6757-40f8-b24d-fbad39b79069" Dec 11 08:34:36 crc kubenswrapper[4881]: E1211 08:34:36.411565 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/test-operator-controller-manager-bb86466d8-6mz9j" podUID="06cd21f3-b69e-4238-9894-8c4f0f77ee53" Dec 11 08:34:36 crc kubenswrapper[4881]: I1211 08:34:36.429311 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-6788cc6d75-6gtkq" event={"ID":"e07ec193-8583-4299-9370-ce788e2e1ae1","Type":"ContainerStarted","Data":"fc452734b356a791d3e0ac091441ad360da20ebd83c12ae73bdd4c0458e38774"} Dec 11 08:34:36 crc kubenswrapper[4881]: I1211 08:34:36.431005 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-79cc9d59f5-qh8s8" event={"ID":"9b3ee431-6c33-4b49-8fdb-27056597fbe8","Type":"ContainerStarted","Data":"5a3e0427a8bd1d7f72fa64d27cb6234b5a059c78fe75774fa735fbc90404e353"} Dec 11 08:34:36 crc kubenswrapper[4881]: E1211 08:34:36.432529 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/keystone-operator@sha256:4f799c74da2f1c864af24fcd5efd91ec64848972a95246eac6b5c6c4d71c1756\\\"\"" pod="openstack-operators/keystone-operator-controller-manager-79cc9d59f5-qh8s8" podUID="9b3ee431-6c33-4b49-8fdb-27056597fbe8" Dec 11 08:34:36 crc kubenswrapper[4881]: I1211 08:34:36.433199 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-bb86466d8-6mz9j" event={"ID":"06cd21f3-b69e-4238-9894-8c4f0f77ee53","Type":"ContainerStarted","Data":"eefcef3adbae4ea622e59d78d83295978a1ff7cb90bff7bd171224ca550b4704"} Dec 11 08:34:36 crc kubenswrapper[4881]: E1211 08:34:36.434776 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/test-operator@sha256:49180c7bd4f0071e43ae7044260a3a97c4aa34fcbcb2d0d4573df449765ed391\\\"\"" pod="openstack-operators/test-operator-controller-manager-bb86466d8-6mz9j" podUID="06cd21f3-b69e-4238-9894-8c4f0f77ee53" Dec 11 08:34:36 crc kubenswrapper[4881]: I1211 08:34:36.436650 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-867d87977b-ftqqr" event={"ID":"02aa2201-6757-40f8-b24d-fbad39b79069","Type":"ContainerStarted","Data":"3e9a9874eb19c60385e129758a04d34a49f4d2c033307138695237e67759f463"} Dec 11 08:34:36 crc kubenswrapper[4881]: E1211 08:34:36.437418 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2\\\"\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-22zs6" podUID="14d65f13-7dce-49b7-9c8e-0a6ea9b57132" Dec 11 08:34:36 crc kubenswrapper[4881]: E1211 08:34:36.440136 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/placement-operator@sha256:fd917de0cf800ec284ee0c3f2906a06d85ea18cb75a5b06c8eb305750467986d\\\"\"" pod="openstack-operators/placement-operator-controller-manager-867d87977b-ftqqr" podUID="02aa2201-6757-40f8-b24d-fbad39b79069" Dec 11 08:34:36 crc kubenswrapper[4881]: E1211 08:34:36.457238 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/manila-operator-controller-manager-5cbc8c7f96-ww55g" podUID="da8fe0e3-3416-453d-80b7-47d4ab23c610" Dec 11 08:34:36 crc kubenswrapper[4881]: E1211 08:34:36.477595 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/glance-operator-controller-manager-85fbd69fcd-twbpm" podUID="d46284f5-997e-4ce9-a607-254c3ce33f31" Dec 11 08:34:36 crc kubenswrapper[4881]: E1211 08:34:36.488644 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/watcher-operator-controller-manager-6b56b8849f-mj8lx" podUID="dd07d5b4-cfe3-4580-a859-64558daab601" Dec 11 08:34:36 crc kubenswrapper[4881]: E1211 08:34:36.552637 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/openstack-baremetal-operator-controller-manager-77868f484-kgwhp" podUID="2621fa0b-89fb-4d65-aef3-98de0e9a8106" Dec 11 08:34:36 crc kubenswrapper[4881]: E1211 08:34:36.555653 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/heat-operator-controller-manager-698d6fd7d6-rs5fd" podUID="d4850972-2a52-4030-9822-af3de9cc647a" Dec 11 08:34:36 crc kubenswrapper[4881]: E1211 08:34:36.619728 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/mariadb-operator-controller-manager-64d7c556cd-qhz8n" podUID="21cbbd1f-7cfe-481a-b02a-f72c9d052519" Dec 11 08:34:37 crc kubenswrapper[4881]: I1211 08:34:37.443694 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-748967c98-lzm95" event={"ID":"80435f25-efd8-482d-9a9b-1c6caafd655e","Type":"ContainerStarted","Data":"7fafb5562ca0ce22bf58458dd409a6675553a5067bb11f16c7b52f92392c77f1"} Dec 11 08:34:37 crc kubenswrapper[4881]: I1211 08:34:37.444828 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-6c55d8d69b-44khn" event={"ID":"867bc48e-c043-4428-b201-0ce4dd830f3f","Type":"ContainerStarted","Data":"d2c867f303c6dc174b8f43df894efa2765bcc29e1cb92ee8d9fd6c5782e865a1"} Dec 11 08:34:37 crc kubenswrapper[4881]: I1211 08:34:37.446073 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-85fbd69fcd-twbpm" event={"ID":"d46284f5-997e-4ce9-a607-254c3ce33f31","Type":"ContainerStarted","Data":"3aec2137344df7da5e0d1a8bead1ba560cb44d49df5bb91a82e75e562e589b81"} Dec 11 08:34:37 crc kubenswrapper[4881]: I1211 08:34:37.447429 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-698d6fd7d6-rs5fd" event={"ID":"d4850972-2a52-4030-9822-af3de9cc647a","Type":"ContainerStarted","Data":"f8e80d6a146eecdfdf06b30eeaf89d0427f8fb719dff4a686df58ca1e6aa6912"} Dec 11 08:34:37 crc kubenswrapper[4881]: I1211 08:34:37.448856 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-6b56b8849f-mj8lx" event={"ID":"dd07d5b4-cfe3-4580-a859-64558daab601","Type":"ContainerStarted","Data":"5acb7353ca8e258e3f062b2d6541f772f71a2631862451a750f5a1b8af9f4040"} Dec 11 08:34:37 crc kubenswrapper[4881]: E1211 08:34:37.450520 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/watcher-operator@sha256:1988aaf9cd245150cda123aaaa21718ccb552c47f1623b7d68804f13c47f2c6a\\\"\"" pod="openstack-operators/watcher-operator-controller-manager-6b56b8849f-mj8lx" podUID="dd07d5b4-cfe3-4580-a859-64558daab601" Dec 11 08:34:37 crc kubenswrapper[4881]: I1211 08:34:37.451327 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-64d7c556cd-qhz8n" event={"ID":"21cbbd1f-7cfe-481a-b02a-f72c9d052519","Type":"ContainerStarted","Data":"8b8a977946363a2db5b1007c5768cd9e5baf824fe9dc0b569a335b8bb5434fd9"} Dec 11 08:34:37 crc kubenswrapper[4881]: E1211 08:34:37.452277 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/mariadb-operator@sha256:2c4fe20e044dd8ea1f60f2f3f5e3844d932b4b79439835bd8771c73f16b38312\\\"\"" pod="openstack-operators/mariadb-operator-controller-manager-64d7c556cd-qhz8n" podUID="21cbbd1f-7cfe-481a-b02a-f72c9d052519" Dec 11 08:34:37 crc kubenswrapper[4881]: I1211 08:34:37.453206 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-58879495c-kmpzx" event={"ID":"5079b14d-bd2f-4151-898d-91362a4b24c2","Type":"ContainerStarted","Data":"46309208670b96e40186a419c36600fa4b57dcd9aaf8435e85889de0ccd80b75"} Dec 11 08:34:37 crc kubenswrapper[4881]: I1211 08:34:37.455058 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-5cbc8c7f96-ww55g" event={"ID":"da8fe0e3-3416-453d-80b7-47d4ab23c610","Type":"ContainerStarted","Data":"783acaca87a11e6780240be0a0cc0ce508f3d11c13b38c00e31f2c258d588f88"} Dec 11 08:34:37 crc kubenswrapper[4881]: I1211 08:34:37.456811 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-77868f484-kgwhp" event={"ID":"2621fa0b-89fb-4d65-aef3-98de0e9a8106","Type":"ContainerStarted","Data":"111b700a7ec5a22e08ce0d6f2c036c8b211195540f56a0e284f6679fae457ce3"} Dec 11 08:34:37 crc kubenswrapper[4881]: E1211 08:34:37.457967 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/placement-operator@sha256:fd917de0cf800ec284ee0c3f2906a06d85ea18cb75a5b06c8eb305750467986d\\\"\"" pod="openstack-operators/placement-operator-controller-manager-867d87977b-ftqqr" podUID="02aa2201-6757-40f8-b24d-fbad39b79069" Dec 11 08:34:37 crc kubenswrapper[4881]: E1211 08:34:37.458549 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/test-operator@sha256:49180c7bd4f0071e43ae7044260a3a97c4aa34fcbcb2d0d4573df449765ed391\\\"\"" pod="openstack-operators/test-operator-controller-manager-bb86466d8-6mz9j" podUID="06cd21f3-b69e-4238-9894-8c4f0f77ee53" Dec 11 08:34:37 crc kubenswrapper[4881]: E1211 08:34:37.458605 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/openstack-baremetal-operator@sha256:66928f0eae5206f671ac7b21f79953e37009c54187d768dc6e03fe0a3d202b3b\\\"\"" pod="openstack-operators/openstack-baremetal-operator-controller-manager-77868f484-kgwhp" podUID="2621fa0b-89fb-4d65-aef3-98de0e9a8106" Dec 11 08:34:38 crc kubenswrapper[4881]: E1211 08:34:38.466955 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/openstack-baremetal-operator@sha256:66928f0eae5206f671ac7b21f79953e37009c54187d768dc6e03fe0a3d202b3b\\\"\"" pod="openstack-operators/openstack-baremetal-operator-controller-manager-77868f484-kgwhp" podUID="2621fa0b-89fb-4d65-aef3-98de0e9a8106" Dec 11 08:34:38 crc kubenswrapper[4881]: E1211 08:34:38.466957 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/watcher-operator@sha256:1988aaf9cd245150cda123aaaa21718ccb552c47f1623b7d68804f13c47f2c6a\\\"\"" pod="openstack-operators/watcher-operator-controller-manager-6b56b8849f-mj8lx" podUID="dd07d5b4-cfe3-4580-a859-64558daab601" Dec 11 08:34:38 crc kubenswrapper[4881]: E1211 08:34:38.466976 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/mariadb-operator@sha256:2c4fe20e044dd8ea1f60f2f3f5e3844d932b4b79439835bd8771c73f16b38312\\\"\"" pod="openstack-operators/mariadb-operator-controller-manager-64d7c556cd-qhz8n" podUID="21cbbd1f-7cfe-481a-b02a-f72c9d052519" Dec 11 08:34:39 crc kubenswrapper[4881]: I1211 08:34:39.475033 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-79d658b66d-2dxhd" event={"ID":"05ef8d73-6d8a-4d91-83a3-93ec0fc14ae1","Type":"ContainerStarted","Data":"affbbc0d36e3b0e9cc64e943fcd997ec7c69e071e35a27cf4864711297553a20"} Dec 11 08:34:39 crc kubenswrapper[4881]: E1211 08:34:39.986428 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/nova-operator-controller-manager-79d658b66d-2dxhd" podUID="05ef8d73-6d8a-4d91-83a3-93ec0fc14ae1" Dec 11 08:34:40 crc kubenswrapper[4881]: E1211 08:34:40.043864 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/swift-operator-controller-manager-8f6687c44-zkl8b" podUID="246f8ac7-b65e-40b1-aba1-ba1defde43ef" Dec 11 08:34:40 crc kubenswrapper[4881]: E1211 08:34:40.044450 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/horizon-operator-controller-manager-7d5d9fd47f-dqhqx" podUID="2dab1d4f-2c9a-4b32-a666-4b0802e51576" Dec 11 08:34:40 crc kubenswrapper[4881]: E1211 08:34:40.050601 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/telemetry-operator-controller-manager-fb56f4744-vgmrx" podUID="de3e8077-0bfa-4e55-aba0-0e5dca0e598d" Dec 11 08:34:40 crc kubenswrapper[4881]: I1211 08:34:40.504108 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-6788cc6d75-6gtkq" event={"ID":"e07ec193-8583-4299-9370-ce788e2e1ae1","Type":"ContainerStarted","Data":"fe30c9fb6e90c3be93db7e58b18be4f4133bb9c7b2479ac2dfa0ad6c0d7c10e0"} Dec 11 08:34:40 crc kubenswrapper[4881]: I1211 08:34:40.504382 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/designate-operator-controller-manager-6788cc6d75-6gtkq" Dec 11 08:34:40 crc kubenswrapper[4881]: I1211 08:34:40.524957 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/designate-operator-controller-manager-6788cc6d75-6gtkq" podStartSLOduration=16.956347797 podStartE2EDuration="41.524926705s" podCreationTimestamp="2025-12-11 08:33:59 +0000 UTC" firstStartedPulling="2025-12-11 08:34:01.541376371 +0000 UTC m=+1089.918745068" lastFinishedPulling="2025-12-11 08:34:26.109955279 +0000 UTC m=+1114.487323976" observedRunningTime="2025-12-11 08:34:40.522312129 +0000 UTC m=+1128.899680826" watchObservedRunningTime="2025-12-11 08:34:40.524926705 +0000 UTC m=+1128.902295402" Dec 11 08:34:40 crc kubenswrapper[4881]: I1211 08:34:40.553218 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-54485f899-mvlsx" event={"ID":"2fd323b1-8fa8-456c-bcd8-d89872682762","Type":"ContainerStarted","Data":"3567701f4284d2b1dc93a583926f0b0ce2f49460961e34b56f4bc30bbc82390c"} Dec 11 08:34:40 crc kubenswrapper[4881]: I1211 08:34:40.553263 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-54485f899-mvlsx" event={"ID":"2fd323b1-8fa8-456c-bcd8-d89872682762","Type":"ContainerStarted","Data":"7748b9d229bcc87c7427286b8cc7b7394dcdedc794867fea9ba7a6ebdef86fc8"} Dec 11 08:34:40 crc kubenswrapper[4881]: I1211 08:34:40.554228 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ironic-operator-controller-manager-54485f899-mvlsx" Dec 11 08:34:40 crc kubenswrapper[4881]: I1211 08:34:40.573994 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/ironic-operator-controller-manager-54485f899-mvlsx" podStartSLOduration=8.319516319 podStartE2EDuration="41.573976113s" podCreationTimestamp="2025-12-11 08:33:59 +0000 UTC" firstStartedPulling="2025-12-11 08:34:02.263224544 +0000 UTC m=+1090.640593241" lastFinishedPulling="2025-12-11 08:34:35.517684338 +0000 UTC m=+1123.895053035" observedRunningTime="2025-12-11 08:34:40.569212514 +0000 UTC m=+1128.946581231" watchObservedRunningTime="2025-12-11 08:34:40.573976113 +0000 UTC m=+1128.951344810" Dec 11 08:34:40 crc kubenswrapper[4881]: I1211 08:34:40.575092 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-7d5d9fd47f-dqhqx" event={"ID":"2dab1d4f-2c9a-4b32-a666-4b0802e51576","Type":"ContainerStarted","Data":"c5969c3ba8b43155fa214ad3e748a268c8e05bae930681eb38197290f5eddc6c"} Dec 11 08:34:40 crc kubenswrapper[4881]: I1211 08:34:40.595763 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-d5fb87cb8-q22gz" event={"ID":"ac8a74d8-c81e-4154-b2dc-7ebb23d13aa7","Type":"ContainerStarted","Data":"83b6790d91e9e27769fc2366b698e1a2a92975ce8918e0be7c6c94310c7b6a33"} Dec 11 08:34:40 crc kubenswrapper[4881]: I1211 08:34:40.622608 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-b95f4d4f8-phlkr" event={"ID":"128ea8d0-53b4-410c-8587-165aa960d46c","Type":"ContainerStarted","Data":"f46175647fd277514cb4e8bfef1e685124fe9ded65aa9e20924cf200d11740fd"} Dec 11 08:34:40 crc kubenswrapper[4881]: I1211 08:34:40.635435 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-748967c98-lzm95" event={"ID":"80435f25-efd8-482d-9a9b-1c6caafd655e","Type":"ContainerStarted","Data":"00e981e55a9e21129a5b55cb4285b2ea48ab4ab007153541aa64b4f114100f1d"} Dec 11 08:34:40 crc kubenswrapper[4881]: I1211 08:34:40.636506 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/cinder-operator-controller-manager-748967c98-lzm95" Dec 11 08:34:40 crc kubenswrapper[4881]: I1211 08:34:40.649449 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-58879495c-kmpzx" event={"ID":"5079b14d-bd2f-4151-898d-91362a4b24c2","Type":"ContainerStarted","Data":"5e3b7702e2b43335d55020c6d5fb610b5d7282d29bb0c49ce31dc6700c0937df"} Dec 11 08:34:40 crc kubenswrapper[4881]: I1211 08:34:40.650032 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/neutron-operator-controller-manager-58879495c-kmpzx" Dec 11 08:34:40 crc kubenswrapper[4881]: I1211 08:34:40.661680 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-8f6687c44-zkl8b" event={"ID":"246f8ac7-b65e-40b1-aba1-ba1defde43ef","Type":"ContainerStarted","Data":"473b491badc81dc39ebb878ab51573d07996075276ba1c76dc0e3a4c3cd6a6ad"} Dec 11 08:34:40 crc kubenswrapper[4881]: I1211 08:34:40.666901 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-5b67cfc8fb-nnlz4" event={"ID":"91ca18da-4852-496b-bf77-558e8010aabe","Type":"ContainerStarted","Data":"64060a764f32db516dc7cd7888f42243c58ee09330d15eb18dafddf73595d7ac"} Dec 11 08:34:40 crc kubenswrapper[4881]: E1211 08:34:40.667306 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/swift-operator@sha256:f076b8d9e85881d9c3cb5272b13db7f5e05d2e9da884c17b677a844112831907\\\"\"" pod="openstack-operators/swift-operator-controller-manager-8f6687c44-zkl8b" podUID="246f8ac7-b65e-40b1-aba1-ba1defde43ef" Dec 11 08:34:40 crc kubenswrapper[4881]: I1211 08:34:40.679284 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-5bfbbb859d-d4dsr" event={"ID":"e5f3ecaa-a91f-4dc8-9baf-2866cf8df0f4","Type":"ContainerStarted","Data":"f0994ec0fe105cb593d71e65d399105952f500d4e9a3a28b00a0f3d71b6a3025"} Dec 11 08:34:40 crc kubenswrapper[4881]: I1211 08:34:40.695489 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/cinder-operator-controller-manager-748967c98-lzm95" podStartSLOduration=13.148837512 podStartE2EDuration="41.695465516s" podCreationTimestamp="2025-12-11 08:33:59 +0000 UTC" firstStartedPulling="2025-12-11 08:34:00.81154874 +0000 UTC m=+1089.188917437" lastFinishedPulling="2025-12-11 08:34:29.358176744 +0000 UTC m=+1117.735545441" observedRunningTime="2025-12-11 08:34:40.689046815 +0000 UTC m=+1129.066415512" watchObservedRunningTime="2025-12-11 08:34:40.695465516 +0000 UTC m=+1129.072834203" Dec 11 08:34:40 crc kubenswrapper[4881]: I1211 08:34:40.739747 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-fb56f4744-vgmrx" event={"ID":"de3e8077-0bfa-4e55-aba0-0e5dca0e598d","Type":"ContainerStarted","Data":"e1bb43a7e1761242e287753e7b2d50b8a1f8afd61e6b47576c69eefcaecccd12"} Dec 11 08:34:40 crc kubenswrapper[4881]: E1211 08:34:40.751306 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"38.102.83.9:5001/openstack-k8s-operators/telemetry-operator:30f617403195366c610a5c61c5e3f09a4bb73c0f\\\"\"" pod="openstack-operators/telemetry-operator-controller-manager-fb56f4744-vgmrx" podUID="de3e8077-0bfa-4e55-aba0-0e5dca0e598d" Dec 11 08:34:40 crc kubenswrapper[4881]: I1211 08:34:40.779996 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-6c55d8d69b-44khn" event={"ID":"867bc48e-c043-4428-b201-0ce4dd830f3f","Type":"ContainerStarted","Data":"ae039dabdbe33d643e5256edb43bf91494f6e5bd1c6fa8da7970cb54abe504e6"} Dec 11 08:34:40 crc kubenswrapper[4881]: I1211 08:34:40.780158 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/infra-operator-controller-manager-6c55d8d69b-44khn" Dec 11 08:34:40 crc kubenswrapper[4881]: E1211 08:34:40.785622 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/nova-operator@sha256:debe5d6d29a007374b270b0e114e69b2136eee61dabab8576baf4010c951edb9\\\"\"" pod="openstack-operators/nova-operator-controller-manager-79d658b66d-2dxhd" podUID="05ef8d73-6d8a-4d91-83a3-93ec0fc14ae1" Dec 11 08:34:40 crc kubenswrapper[4881]: I1211 08:34:40.796106 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/neutron-operator-controller-manager-58879495c-kmpzx" podStartSLOduration=8.462135347 podStartE2EDuration="41.796071436s" podCreationTimestamp="2025-12-11 08:33:59 +0000 UTC" firstStartedPulling="2025-12-11 08:34:02.246964642 +0000 UTC m=+1090.624333339" lastFinishedPulling="2025-12-11 08:34:35.580900731 +0000 UTC m=+1123.958269428" observedRunningTime="2025-12-11 08:34:40.739621131 +0000 UTC m=+1129.116989828" watchObservedRunningTime="2025-12-11 08:34:40.796071436 +0000 UTC m=+1129.173440133" Dec 11 08:34:40 crc kubenswrapper[4881]: I1211 08:34:40.864623 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/infra-operator-controller-manager-6c55d8d69b-44khn" podStartSLOduration=8.291321878 podStartE2EDuration="41.864601471s" podCreationTimestamp="2025-12-11 08:33:59 +0000 UTC" firstStartedPulling="2025-12-11 08:34:02.007766982 +0000 UTC m=+1090.385135679" lastFinishedPulling="2025-12-11 08:34:35.581046575 +0000 UTC m=+1123.958415272" observedRunningTime="2025-12-11 08:34:40.847443751 +0000 UTC m=+1129.224812448" watchObservedRunningTime="2025-12-11 08:34:40.864601471 +0000 UTC m=+1129.241970168" Dec 11 08:34:41 crc kubenswrapper[4881]: I1211 08:34:41.788290 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-79cc9d59f5-qh8s8" event={"ID":"9b3ee431-6c33-4b49-8fdb-27056597fbe8","Type":"ContainerStarted","Data":"2fc50e392a46c549746fa7a38df79ee11c4a1f34e304ece350e101f9f600837e"} Dec 11 08:34:41 crc kubenswrapper[4881]: I1211 08:34:41.789467 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/keystone-operator-controller-manager-79cc9d59f5-qh8s8" Dec 11 08:34:41 crc kubenswrapper[4881]: I1211 08:34:41.790163 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-b95f4d4f8-phlkr" event={"ID":"128ea8d0-53b4-410c-8587-165aa960d46c","Type":"ContainerStarted","Data":"410d92b050364f2c1769dad171ec5cb513d26ee9d49442aaefe1c387c1afb0aa"} Dec 11 08:34:41 crc kubenswrapper[4881]: I1211 08:34:41.790296 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-manager-b95f4d4f8-phlkr" Dec 11 08:34:41 crc kubenswrapper[4881]: I1211 08:34:41.791944 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-7d5d9fd47f-dqhqx" event={"ID":"2dab1d4f-2c9a-4b32-a666-4b0802e51576","Type":"ContainerStarted","Data":"5b0d9976099625b77734ad0085c4dc7136f8b58abb7fa1af4da9aae819a31c43"} Dec 11 08:34:41 crc kubenswrapper[4881]: I1211 08:34:41.792079 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/horizon-operator-controller-manager-7d5d9fd47f-dqhqx" Dec 11 08:34:41 crc kubenswrapper[4881]: I1211 08:34:41.793541 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-698d6fd7d6-rs5fd" event={"ID":"d4850972-2a52-4030-9822-af3de9cc647a","Type":"ContainerStarted","Data":"f1207eb47e7a6228dd4e14652f4b529812f266fef607577b258bfa51738e3d79"} Dec 11 08:34:41 crc kubenswrapper[4881]: I1211 08:34:41.793668 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/heat-operator-controller-manager-698d6fd7d6-rs5fd" Dec 11 08:34:41 crc kubenswrapper[4881]: I1211 08:34:41.795028 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-5b67cfc8fb-nnlz4" event={"ID":"91ca18da-4852-496b-bf77-558e8010aabe","Type":"ContainerStarted","Data":"455da811426e24c98d355eebc02d9b7fbf62c099903d1099f9b1b5acb5a4ccf0"} Dec 11 08:34:41 crc kubenswrapper[4881]: I1211 08:34:41.795206 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ovn-operator-controller-manager-5b67cfc8fb-nnlz4" Dec 11 08:34:41 crc kubenswrapper[4881]: I1211 08:34:41.796536 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-85fbd69fcd-twbpm" event={"ID":"d46284f5-997e-4ce9-a607-254c3ce33f31","Type":"ContainerStarted","Data":"0592eb4ea40c06f1221593dd58726e02f5c4ceb9f7d814543772e05d4c63c293"} Dec 11 08:34:41 crc kubenswrapper[4881]: I1211 08:34:41.796646 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/glance-operator-controller-manager-85fbd69fcd-twbpm" Dec 11 08:34:41 crc kubenswrapper[4881]: I1211 08:34:41.798069 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-d5fb87cb8-q22gz" event={"ID":"ac8a74d8-c81e-4154-b2dc-7ebb23d13aa7","Type":"ContainerStarted","Data":"e131e835139838b5de42bce7be5f81b1b682c3dc3e5fadc67658cb7d26d32fa3"} Dec 11 08:34:41 crc kubenswrapper[4881]: I1211 08:34:41.798180 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/octavia-operator-controller-manager-d5fb87cb8-q22gz" Dec 11 08:34:41 crc kubenswrapper[4881]: I1211 08:34:41.801250 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-5bfbbb859d-d4dsr" event={"ID":"e5f3ecaa-a91f-4dc8-9baf-2866cf8df0f4","Type":"ContainerStarted","Data":"d70a322b961f16cc9fcd28e9c23e1ef873d0098c55197fd1ce5cb79537a896d4"} Dec 11 08:34:41 crc kubenswrapper[4881]: I1211 08:34:41.801368 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/barbican-operator-controller-manager-5bfbbb859d-d4dsr" Dec 11 08:34:41 crc kubenswrapper[4881]: I1211 08:34:41.802891 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-5cbc8c7f96-ww55g" event={"ID":"da8fe0e3-3416-453d-80b7-47d4ab23c610","Type":"ContainerStarted","Data":"9eb8525dd70e8c2c0ebe5c563e1c91b8c203eb00cc5b16e3b0a6f987bb2e701d"} Dec 11 08:34:41 crc kubenswrapper[4881]: I1211 08:34:41.805692 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/cinder-operator-controller-manager-748967c98-lzm95" Dec 11 08:34:41 crc kubenswrapper[4881]: I1211 08:34:41.805993 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/designate-operator-controller-manager-6788cc6d75-6gtkq" Dec 11 08:34:41 crc kubenswrapper[4881]: E1211 08:34:41.806529 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"38.102.83.9:5001/openstack-k8s-operators/telemetry-operator:30f617403195366c610a5c61c5e3f09a4bb73c0f\\\"\"" pod="openstack-operators/telemetry-operator-controller-manager-fb56f4744-vgmrx" podUID="de3e8077-0bfa-4e55-aba0-0e5dca0e598d" Dec 11 08:34:41 crc kubenswrapper[4881]: I1211 08:34:41.808254 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/infra-operator-controller-manager-6c55d8d69b-44khn" Dec 11 08:34:41 crc kubenswrapper[4881]: I1211 08:34:41.809406 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/neutron-operator-controller-manager-58879495c-kmpzx" Dec 11 08:34:41 crc kubenswrapper[4881]: I1211 08:34:41.848051 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/keystone-operator-controller-manager-79cc9d59f5-qh8s8" podStartSLOduration=4.49597759 podStartE2EDuration="42.848026029s" podCreationTimestamp="2025-12-11 08:33:59 +0000 UTC" firstStartedPulling="2025-12-11 08:34:01.945071701 +0000 UTC m=+1090.322440398" lastFinishedPulling="2025-12-11 08:34:40.29712014 +0000 UTC m=+1128.674488837" observedRunningTime="2025-12-11 08:34:41.824636463 +0000 UTC m=+1130.202005160" watchObservedRunningTime="2025-12-11 08:34:41.848026029 +0000 UTC m=+1130.225394726" Dec 11 08:34:41 crc kubenswrapper[4881]: I1211 08:34:41.862553 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/horizon-operator-controller-manager-7d5d9fd47f-dqhqx" podStartSLOduration=3.5540407050000002 podStartE2EDuration="42.862537942s" podCreationTimestamp="2025-12-11 08:33:59 +0000 UTC" firstStartedPulling="2025-12-11 08:34:01.997855207 +0000 UTC m=+1090.375223904" lastFinishedPulling="2025-12-11 08:34:41.306352444 +0000 UTC m=+1129.683721141" observedRunningTime="2025-12-11 08:34:41.85762616 +0000 UTC m=+1130.234994857" watchObservedRunningTime="2025-12-11 08:34:41.862537942 +0000 UTC m=+1130.239906639" Dec 11 08:34:41 crc kubenswrapper[4881]: I1211 08:34:41.924459 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/manila-operator-controller-manager-5cbc8c7f96-ww55g" podStartSLOduration=4.424348503 podStartE2EDuration="42.924440682s" podCreationTimestamp="2025-12-11 08:33:59 +0000 UTC" firstStartedPulling="2025-12-11 08:34:01.988629999 +0000 UTC m=+1090.365998696" lastFinishedPulling="2025-12-11 08:34:40.488722178 +0000 UTC m=+1128.866090875" observedRunningTime="2025-12-11 08:34:41.917289104 +0000 UTC m=+1130.294657801" watchObservedRunningTime="2025-12-11 08:34:41.924440682 +0000 UTC m=+1130.301809379" Dec 11 08:34:41 crc kubenswrapper[4881]: I1211 08:34:41.957551 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/barbican-operator-controller-manager-5bfbbb859d-d4dsr" podStartSLOduration=20.830159975 podStartE2EDuration="42.957531551s" podCreationTimestamp="2025-12-11 08:33:59 +0000 UTC" firstStartedPulling="2025-12-11 08:34:01.114099297 +0000 UTC m=+1089.491467994" lastFinishedPulling="2025-12-11 08:34:23.241470873 +0000 UTC m=+1111.618839570" observedRunningTime="2025-12-11 08:34:41.946492035 +0000 UTC m=+1130.323860732" watchObservedRunningTime="2025-12-11 08:34:41.957531551 +0000 UTC m=+1130.334900248" Dec 11 08:34:42 crc kubenswrapper[4881]: I1211 08:34:42.000802 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-controller-manager-b95f4d4f8-phlkr" podStartSLOduration=42.000775385 podStartE2EDuration="42.000775385s" podCreationTimestamp="2025-12-11 08:34:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 08:34:41.970729743 +0000 UTC m=+1130.348098430" watchObservedRunningTime="2025-12-11 08:34:42.000775385 +0000 UTC m=+1130.378144082" Dec 11 08:34:42 crc kubenswrapper[4881]: I1211 08:34:42.058761 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/glance-operator-controller-manager-85fbd69fcd-twbpm" podStartSLOduration=4.110127856 podStartE2EDuration="43.058728966s" podCreationTimestamp="2025-12-11 08:33:59 +0000 UTC" firstStartedPulling="2025-12-11 08:34:01.210310458 +0000 UTC m=+1089.587679155" lastFinishedPulling="2025-12-11 08:34:40.158911568 +0000 UTC m=+1128.536280265" observedRunningTime="2025-12-11 08:34:42.035244148 +0000 UTC m=+1130.412612845" watchObservedRunningTime="2025-12-11 08:34:42.058728966 +0000 UTC m=+1130.436097713" Dec 11 08:34:42 crc kubenswrapper[4881]: I1211 08:34:42.064737 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/heat-operator-controller-manager-698d6fd7d6-rs5fd" podStartSLOduration=4.339977526 podStartE2EDuration="43.064726316s" podCreationTimestamp="2025-12-11 08:33:59 +0000 UTC" firstStartedPulling="2025-12-11 08:34:01.498817148 +0000 UTC m=+1089.876185845" lastFinishedPulling="2025-12-11 08:34:40.223565938 +0000 UTC m=+1128.600934635" observedRunningTime="2025-12-11 08:34:42.050182692 +0000 UTC m=+1130.427551389" watchObservedRunningTime="2025-12-11 08:34:42.064726316 +0000 UTC m=+1130.442095013" Dec 11 08:34:42 crc kubenswrapper[4881]: I1211 08:34:42.102324 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/ovn-operator-controller-manager-5b67cfc8fb-nnlz4" podStartSLOduration=9.817218236 podStartE2EDuration="43.102301647s" podCreationTimestamp="2025-12-11 08:33:59 +0000 UTC" firstStartedPulling="2025-12-11 08:34:02.231459408 +0000 UTC m=+1090.608828105" lastFinishedPulling="2025-12-11 08:34:35.516542819 +0000 UTC m=+1123.893911516" observedRunningTime="2025-12-11 08:34:42.08841114 +0000 UTC m=+1130.465779847" watchObservedRunningTime="2025-12-11 08:34:42.102301647 +0000 UTC m=+1130.479670344" Dec 11 08:34:42 crc kubenswrapper[4881]: I1211 08:34:42.114891 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/octavia-operator-controller-manager-d5fb87cb8-q22gz" podStartSLOduration=9.797610585 podStartE2EDuration="43.114856731s" podCreationTimestamp="2025-12-11 08:33:59 +0000 UTC" firstStartedPulling="2025-12-11 08:34:02.26386987 +0000 UTC m=+1090.641238557" lastFinishedPulling="2025-12-11 08:34:35.581115996 +0000 UTC m=+1123.958484703" observedRunningTime="2025-12-11 08:34:42.108701337 +0000 UTC m=+1130.486070024" watchObservedRunningTime="2025-12-11 08:34:42.114856731 +0000 UTC m=+1130.492225428" Dec 11 08:34:42 crc kubenswrapper[4881]: I1211 08:34:42.812891 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-8f6687c44-zkl8b" event={"ID":"246f8ac7-b65e-40b1-aba1-ba1defde43ef","Type":"ContainerStarted","Data":"8878fd63dc1c09e34c320433aa5f76d0fb8cbb2e54a7b029bd2e635a27585cd2"} Dec 11 08:34:42 crc kubenswrapper[4881]: I1211 08:34:42.813723 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/manila-operator-controller-manager-5cbc8c7f96-ww55g" Dec 11 08:34:42 crc kubenswrapper[4881]: I1211 08:34:42.837218 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/swift-operator-controller-manager-8f6687c44-zkl8b" podStartSLOduration=6.458824062 podStartE2EDuration="42.837195221s" podCreationTimestamp="2025-12-11 08:34:00 +0000 UTC" firstStartedPulling="2025-12-11 08:34:06.083832382 +0000 UTC m=+1094.461201089" lastFinishedPulling="2025-12-11 08:34:42.462203531 +0000 UTC m=+1130.839572248" observedRunningTime="2025-12-11 08:34:42.828601166 +0000 UTC m=+1131.205969863" watchObservedRunningTime="2025-12-11 08:34:42.837195221 +0000 UTC m=+1131.214563918" Dec 11 08:34:47 crc kubenswrapper[4881]: I1211 08:34:47.855133 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-22zs6" event={"ID":"14d65f13-7dce-49b7-9c8e-0a6ea9b57132","Type":"ContainerStarted","Data":"7dfde03f43a64f8684967166eb21a676054c97e9ab17202ec26eff2f87d72e8d"} Dec 11 08:34:47 crc kubenswrapper[4881]: I1211 08:34:47.868823 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-22zs6" podStartSLOduration=7.211743286 podStartE2EDuration="47.868805789s" podCreationTimestamp="2025-12-11 08:34:00 +0000 UTC" firstStartedPulling="2025-12-11 08:34:06.804963568 +0000 UTC m=+1095.182332255" lastFinishedPulling="2025-12-11 08:34:47.462026021 +0000 UTC m=+1135.839394758" observedRunningTime="2025-12-11 08:34:47.867177518 +0000 UTC m=+1136.244546225" watchObservedRunningTime="2025-12-11 08:34:47.868805789 +0000 UTC m=+1136.246174486" Dec 11 08:34:48 crc kubenswrapper[4881]: I1211 08:34:48.866865 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-867d87977b-ftqqr" event={"ID":"02aa2201-6757-40f8-b24d-fbad39b79069","Type":"ContainerStarted","Data":"7f20dc0eedfa298995453c848058b150de0f41656c80b94870c7e40b6fb6564f"} Dec 11 08:34:48 crc kubenswrapper[4881]: I1211 08:34:48.867470 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/placement-operator-controller-manager-867d87977b-ftqqr" Dec 11 08:34:48 crc kubenswrapper[4881]: I1211 08:34:48.892708 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/placement-operator-controller-manager-867d87977b-ftqqr" podStartSLOduration=2.682750836 podStartE2EDuration="48.892685779s" podCreationTimestamp="2025-12-11 08:34:00 +0000 UTC" firstStartedPulling="2025-12-11 08:34:02.240075601 +0000 UTC m=+1090.617444308" lastFinishedPulling="2025-12-11 08:34:48.450010554 +0000 UTC m=+1136.827379251" observedRunningTime="2025-12-11 08:34:48.886270639 +0000 UTC m=+1137.263639336" watchObservedRunningTime="2025-12-11 08:34:48.892685779 +0000 UTC m=+1137.270054466" Dec 11 08:34:49 crc kubenswrapper[4881]: I1211 08:34:49.878580 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-bb86466d8-6mz9j" event={"ID":"06cd21f3-b69e-4238-9894-8c4f0f77ee53","Type":"ContainerStarted","Data":"a75dade4d1bc856df30a5aff9586d9db1dd525d621b7090956a846b4ddecd2e4"} Dec 11 08:34:49 crc kubenswrapper[4881]: I1211 08:34:49.879156 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/test-operator-controller-manager-bb86466d8-6mz9j" Dec 11 08:34:49 crc kubenswrapper[4881]: I1211 08:34:49.895843 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/test-operator-controller-manager-bb86466d8-6mz9j" podStartSLOduration=6.474496276 podStartE2EDuration="49.895821682s" podCreationTimestamp="2025-12-11 08:34:00 +0000 UTC" firstStartedPulling="2025-12-11 08:34:06.085197476 +0000 UTC m=+1094.462566183" lastFinishedPulling="2025-12-11 08:34:49.506522892 +0000 UTC m=+1137.883891589" observedRunningTime="2025-12-11 08:34:49.893989505 +0000 UTC m=+1138.271358232" watchObservedRunningTime="2025-12-11 08:34:49.895821682 +0000 UTC m=+1138.273190389" Dec 11 08:34:50 crc kubenswrapper[4881]: I1211 08:34:50.038403 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/barbican-operator-controller-manager-5bfbbb859d-d4dsr" Dec 11 08:34:50 crc kubenswrapper[4881]: I1211 08:34:50.114857 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/glance-operator-controller-manager-85fbd69fcd-twbpm" Dec 11 08:34:50 crc kubenswrapper[4881]: I1211 08:34:50.333150 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/heat-operator-controller-manager-698d6fd7d6-rs5fd" Dec 11 08:34:50 crc kubenswrapper[4881]: I1211 08:34:50.427637 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/horizon-operator-controller-manager-7d5d9fd47f-dqhqx" Dec 11 08:34:50 crc kubenswrapper[4881]: I1211 08:34:50.469091 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/ironic-operator-controller-manager-54485f899-mvlsx" Dec 11 08:34:50 crc kubenswrapper[4881]: I1211 08:34:50.501641 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/keystone-operator-controller-manager-79cc9d59f5-qh8s8" Dec 11 08:34:50 crc kubenswrapper[4881]: I1211 08:34:50.503472 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/manila-operator-controller-manager-5cbc8c7f96-ww55g" Dec 11 08:34:50 crc kubenswrapper[4881]: I1211 08:34:50.633581 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/octavia-operator-controller-manager-d5fb87cb8-q22gz" Dec 11 08:34:50 crc kubenswrapper[4881]: I1211 08:34:50.705099 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/ovn-operator-controller-manager-5b67cfc8fb-nnlz4" Dec 11 08:34:50 crc kubenswrapper[4881]: I1211 08:34:50.751177 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/swift-operator-controller-manager-8f6687c44-zkl8b" Dec 11 08:34:50 crc kubenswrapper[4881]: I1211 08:34:50.753098 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/swift-operator-controller-manager-8f6687c44-zkl8b" Dec 11 08:34:51 crc kubenswrapper[4881]: I1211 08:34:51.758109 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-manager-b95f4d4f8-phlkr" Dec 11 08:34:52 crc kubenswrapper[4881]: I1211 08:34:52.929525 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-77868f484-kgwhp" event={"ID":"2621fa0b-89fb-4d65-aef3-98de0e9a8106","Type":"ContainerStarted","Data":"3729f76c755326c515c60d2eff248e680e5fdb7eb0f58d3d96c75e45d4da17e9"} Dec 11 08:34:52 crc kubenswrapper[4881]: I1211 08:34:52.930479 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-baremetal-operator-controller-manager-77868f484-kgwhp" Dec 11 08:34:52 crc kubenswrapper[4881]: I1211 08:34:52.961132 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-baremetal-operator-controller-manager-77868f484-kgwhp" podStartSLOduration=8.287101592 podStartE2EDuration="53.961105646s" podCreationTimestamp="2025-12-11 08:33:59 +0000 UTC" firstStartedPulling="2025-12-11 08:34:06.802495886 +0000 UTC m=+1095.179864583" lastFinishedPulling="2025-12-11 08:34:52.47649993 +0000 UTC m=+1140.853868637" observedRunningTime="2025-12-11 08:34:52.956600593 +0000 UTC m=+1141.333969300" watchObservedRunningTime="2025-12-11 08:34:52.961105646 +0000 UTC m=+1141.338474343" Dec 11 08:34:53 crc kubenswrapper[4881]: I1211 08:34:53.953840 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-64d7c556cd-qhz8n" event={"ID":"21cbbd1f-7cfe-481a-b02a-f72c9d052519","Type":"ContainerStarted","Data":"3bfa57f8c1453f2eecd1fb3a53d0f71044be44e436271b09360df7ae6aeeed96"} Dec 11 08:34:53 crc kubenswrapper[4881]: I1211 08:34:53.955898 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/mariadb-operator-controller-manager-64d7c556cd-qhz8n" Dec 11 08:34:53 crc kubenswrapper[4881]: I1211 08:34:53.961014 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-fb56f4744-vgmrx" event={"ID":"de3e8077-0bfa-4e55-aba0-0e5dca0e598d","Type":"ContainerStarted","Data":"e70f69b4aad7e0ce086c73137db3621cc1af2666398ac5476a5ce96049b12c23"} Dec 11 08:34:53 crc kubenswrapper[4881]: I1211 08:34:53.961396 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/telemetry-operator-controller-manager-fb56f4744-vgmrx" Dec 11 08:34:53 crc kubenswrapper[4881]: I1211 08:34:53.986405 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/mariadb-operator-controller-manager-64d7c556cd-qhz8n" podStartSLOduration=8.934125773 podStartE2EDuration="54.986383552s" podCreationTimestamp="2025-12-11 08:33:59 +0000 UTC" firstStartedPulling="2025-12-11 08:34:06.810977996 +0000 UTC m=+1095.188346693" lastFinishedPulling="2025-12-11 08:34:52.863235765 +0000 UTC m=+1141.240604472" observedRunningTime="2025-12-11 08:34:53.975405407 +0000 UTC m=+1142.352774114" watchObservedRunningTime="2025-12-11 08:34:53.986383552 +0000 UTC m=+1142.363752249" Dec 11 08:34:54 crc kubenswrapper[4881]: I1211 08:34:54.006581 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/telemetry-operator-controller-manager-fb56f4744-vgmrx" podStartSLOduration=7.008148054 podStartE2EDuration="54.006555297s" podCreationTimestamp="2025-12-11 08:34:00 +0000 UTC" firstStartedPulling="2025-12-11 08:34:06.068855711 +0000 UTC m=+1094.446224428" lastFinishedPulling="2025-12-11 08:34:53.067262964 +0000 UTC m=+1141.444631671" observedRunningTime="2025-12-11 08:34:53.99310407 +0000 UTC m=+1142.370472767" watchObservedRunningTime="2025-12-11 08:34:54.006555297 +0000 UTC m=+1142.383923994" Dec 11 08:34:54 crc kubenswrapper[4881]: I1211 08:34:54.968819 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-6b56b8849f-mj8lx" event={"ID":"dd07d5b4-cfe3-4580-a859-64558daab601","Type":"ContainerStarted","Data":"4deb31c69eef09785e0f90e238a7adc1a51f7ed538802e64f3b34ae4948cb5bf"} Dec 11 08:34:54 crc kubenswrapper[4881]: I1211 08:34:54.969256 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/watcher-operator-controller-manager-6b56b8849f-mj8lx" Dec 11 08:34:54 crc kubenswrapper[4881]: I1211 08:34:54.971618 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-79d658b66d-2dxhd" event={"ID":"05ef8d73-6d8a-4d91-83a3-93ec0fc14ae1","Type":"ContainerStarted","Data":"6d61b35f02e794d10ce18d58528c618df33640568fbc62dfce8e981f8da0f52c"} Dec 11 08:34:54 crc kubenswrapper[4881]: I1211 08:34:54.971851 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/nova-operator-controller-manager-79d658b66d-2dxhd" Dec 11 08:34:54 crc kubenswrapper[4881]: I1211 08:34:54.992897 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/watcher-operator-controller-manager-6b56b8849f-mj8lx" podStartSLOduration=8.000953444 podStartE2EDuration="54.992880578s" podCreationTimestamp="2025-12-11 08:34:00 +0000 UTC" firstStartedPulling="2025-12-11 08:34:06.790807858 +0000 UTC m=+1095.168176555" lastFinishedPulling="2025-12-11 08:34:53.782734992 +0000 UTC m=+1142.160103689" observedRunningTime="2025-12-11 08:34:54.986850517 +0000 UTC m=+1143.364219214" watchObservedRunningTime="2025-12-11 08:34:54.992880578 +0000 UTC m=+1143.370249275" Dec 11 08:34:55 crc kubenswrapper[4881]: I1211 08:34:55.005066 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/nova-operator-controller-manager-79d658b66d-2dxhd" podStartSLOduration=9.065293235 podStartE2EDuration="56.005045833s" podCreationTimestamp="2025-12-11 08:33:59 +0000 UTC" firstStartedPulling="2025-12-11 08:34:06.792394207 +0000 UTC m=+1095.169762904" lastFinishedPulling="2025-12-11 08:34:53.732146805 +0000 UTC m=+1142.109515502" observedRunningTime="2025-12-11 08:34:55.003191915 +0000 UTC m=+1143.380560622" watchObservedRunningTime="2025-12-11 08:34:55.005045833 +0000 UTC m=+1143.382414530" Dec 11 08:35:00 crc kubenswrapper[4881]: I1211 08:35:00.736356 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/placement-operator-controller-manager-867d87977b-ftqqr" Dec 11 08:35:00 crc kubenswrapper[4881]: I1211 08:35:00.814067 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/mariadb-operator-controller-manager-64d7c556cd-qhz8n" Dec 11 08:35:00 crc kubenswrapper[4881]: I1211 08:35:00.866457 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/nova-operator-controller-manager-79d658b66d-2dxhd" Dec 11 08:35:00 crc kubenswrapper[4881]: I1211 08:35:00.884225 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/telemetry-operator-controller-manager-fb56f4744-vgmrx" Dec 11 08:35:01 crc kubenswrapper[4881]: I1211 08:35:01.034464 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/test-operator-controller-manager-bb86466d8-6mz9j" Dec 11 08:35:01 crc kubenswrapper[4881]: I1211 08:35:01.137906 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/watcher-operator-controller-manager-6b56b8849f-mj8lx" Dec 11 08:35:01 crc kubenswrapper[4881]: I1211 08:35:01.269316 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-baremetal-operator-controller-manager-77868f484-kgwhp" Dec 11 08:35:18 crc kubenswrapper[4881]: I1211 08:35:18.135223 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-wkdgt"] Dec 11 08:35:18 crc kubenswrapper[4881]: I1211 08:35:18.145561 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-wkdgt"] Dec 11 08:35:18 crc kubenswrapper[4881]: I1211 08:35:18.145706 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-wkdgt" Dec 11 08:35:18 crc kubenswrapper[4881]: I1211 08:35:18.149405 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dnsmasq-dns-dockercfg-dphwr" Dec 11 08:35:18 crc kubenswrapper[4881]: I1211 08:35:18.149614 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"kube-root-ca.crt" Dec 11 08:35:18 crc kubenswrapper[4881]: I1211 08:35:18.149785 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openshift-service-ca.crt" Dec 11 08:35:18 crc kubenswrapper[4881]: I1211 08:35:18.155793 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns" Dec 11 08:35:18 crc kubenswrapper[4881]: I1211 08:35:18.209118 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-qwfp5"] Dec 11 08:35:18 crc kubenswrapper[4881]: I1211 08:35:18.213264 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-qwfp5" Dec 11 08:35:18 crc kubenswrapper[4881]: I1211 08:35:18.217358 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns-svc" Dec 11 08:35:18 crc kubenswrapper[4881]: I1211 08:35:18.224814 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-qwfp5"] Dec 11 08:35:18 crc kubenswrapper[4881]: I1211 08:35:18.311652 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d33da677-6c43-4e7e-87de-648d14ae9a8d-dns-svc\") pod \"dnsmasq-dns-78dd6ddcc-qwfp5\" (UID: \"d33da677-6c43-4e7e-87de-648d14ae9a8d\") " pod="openstack/dnsmasq-dns-78dd6ddcc-qwfp5" Dec 11 08:35:18 crc kubenswrapper[4881]: I1211 08:35:18.311712 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mqcl7\" (UniqueName: \"kubernetes.io/projected/1e6bcde5-0508-48b6-bb9e-9a2cb7c5fa3a-kube-api-access-mqcl7\") pod \"dnsmasq-dns-675f4bcbfc-wkdgt\" (UID: \"1e6bcde5-0508-48b6-bb9e-9a2cb7c5fa3a\") " pod="openstack/dnsmasq-dns-675f4bcbfc-wkdgt" Dec 11 08:35:18 crc kubenswrapper[4881]: I1211 08:35:18.311743 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d33da677-6c43-4e7e-87de-648d14ae9a8d-config\") pod \"dnsmasq-dns-78dd6ddcc-qwfp5\" (UID: \"d33da677-6c43-4e7e-87de-648d14ae9a8d\") " pod="openstack/dnsmasq-dns-78dd6ddcc-qwfp5" Dec 11 08:35:18 crc kubenswrapper[4881]: I1211 08:35:18.311783 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1e6bcde5-0508-48b6-bb9e-9a2cb7c5fa3a-config\") pod \"dnsmasq-dns-675f4bcbfc-wkdgt\" (UID: \"1e6bcde5-0508-48b6-bb9e-9a2cb7c5fa3a\") " pod="openstack/dnsmasq-dns-675f4bcbfc-wkdgt" Dec 11 08:35:18 crc kubenswrapper[4881]: I1211 08:35:18.311809 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dbd9w\" (UniqueName: \"kubernetes.io/projected/d33da677-6c43-4e7e-87de-648d14ae9a8d-kube-api-access-dbd9w\") pod \"dnsmasq-dns-78dd6ddcc-qwfp5\" (UID: \"d33da677-6c43-4e7e-87de-648d14ae9a8d\") " pod="openstack/dnsmasq-dns-78dd6ddcc-qwfp5" Dec 11 08:35:18 crc kubenswrapper[4881]: I1211 08:35:18.413206 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d33da677-6c43-4e7e-87de-648d14ae9a8d-dns-svc\") pod \"dnsmasq-dns-78dd6ddcc-qwfp5\" (UID: \"d33da677-6c43-4e7e-87de-648d14ae9a8d\") " pod="openstack/dnsmasq-dns-78dd6ddcc-qwfp5" Dec 11 08:35:18 crc kubenswrapper[4881]: I1211 08:35:18.413265 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mqcl7\" (UniqueName: \"kubernetes.io/projected/1e6bcde5-0508-48b6-bb9e-9a2cb7c5fa3a-kube-api-access-mqcl7\") pod \"dnsmasq-dns-675f4bcbfc-wkdgt\" (UID: \"1e6bcde5-0508-48b6-bb9e-9a2cb7c5fa3a\") " pod="openstack/dnsmasq-dns-675f4bcbfc-wkdgt" Dec 11 08:35:18 crc kubenswrapper[4881]: I1211 08:35:18.413291 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d33da677-6c43-4e7e-87de-648d14ae9a8d-config\") pod \"dnsmasq-dns-78dd6ddcc-qwfp5\" (UID: \"d33da677-6c43-4e7e-87de-648d14ae9a8d\") " pod="openstack/dnsmasq-dns-78dd6ddcc-qwfp5" Dec 11 08:35:18 crc kubenswrapper[4881]: I1211 08:35:18.413351 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1e6bcde5-0508-48b6-bb9e-9a2cb7c5fa3a-config\") pod \"dnsmasq-dns-675f4bcbfc-wkdgt\" (UID: \"1e6bcde5-0508-48b6-bb9e-9a2cb7c5fa3a\") " pod="openstack/dnsmasq-dns-675f4bcbfc-wkdgt" Dec 11 08:35:18 crc kubenswrapper[4881]: I1211 08:35:18.413380 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dbd9w\" (UniqueName: \"kubernetes.io/projected/d33da677-6c43-4e7e-87de-648d14ae9a8d-kube-api-access-dbd9w\") pod \"dnsmasq-dns-78dd6ddcc-qwfp5\" (UID: \"d33da677-6c43-4e7e-87de-648d14ae9a8d\") " pod="openstack/dnsmasq-dns-78dd6ddcc-qwfp5" Dec 11 08:35:18 crc kubenswrapper[4881]: I1211 08:35:18.414467 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d33da677-6c43-4e7e-87de-648d14ae9a8d-dns-svc\") pod \"dnsmasq-dns-78dd6ddcc-qwfp5\" (UID: \"d33da677-6c43-4e7e-87de-648d14ae9a8d\") " pod="openstack/dnsmasq-dns-78dd6ddcc-qwfp5" Dec 11 08:35:18 crc kubenswrapper[4881]: I1211 08:35:18.415136 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d33da677-6c43-4e7e-87de-648d14ae9a8d-config\") pod \"dnsmasq-dns-78dd6ddcc-qwfp5\" (UID: \"d33da677-6c43-4e7e-87de-648d14ae9a8d\") " pod="openstack/dnsmasq-dns-78dd6ddcc-qwfp5" Dec 11 08:35:18 crc kubenswrapper[4881]: I1211 08:35:18.415737 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1e6bcde5-0508-48b6-bb9e-9a2cb7c5fa3a-config\") pod \"dnsmasq-dns-675f4bcbfc-wkdgt\" (UID: \"1e6bcde5-0508-48b6-bb9e-9a2cb7c5fa3a\") " pod="openstack/dnsmasq-dns-675f4bcbfc-wkdgt" Dec 11 08:35:18 crc kubenswrapper[4881]: I1211 08:35:18.433276 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dbd9w\" (UniqueName: \"kubernetes.io/projected/d33da677-6c43-4e7e-87de-648d14ae9a8d-kube-api-access-dbd9w\") pod \"dnsmasq-dns-78dd6ddcc-qwfp5\" (UID: \"d33da677-6c43-4e7e-87de-648d14ae9a8d\") " pod="openstack/dnsmasq-dns-78dd6ddcc-qwfp5" Dec 11 08:35:18 crc kubenswrapper[4881]: I1211 08:35:18.449377 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mqcl7\" (UniqueName: \"kubernetes.io/projected/1e6bcde5-0508-48b6-bb9e-9a2cb7c5fa3a-kube-api-access-mqcl7\") pod \"dnsmasq-dns-675f4bcbfc-wkdgt\" (UID: \"1e6bcde5-0508-48b6-bb9e-9a2cb7c5fa3a\") " pod="openstack/dnsmasq-dns-675f4bcbfc-wkdgt" Dec 11 08:35:18 crc kubenswrapper[4881]: I1211 08:35:18.472806 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-wkdgt" Dec 11 08:35:18 crc kubenswrapper[4881]: I1211 08:35:18.540391 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-qwfp5" Dec 11 08:35:19 crc kubenswrapper[4881]: I1211 08:35:19.049321 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-wkdgt"] Dec 11 08:35:19 crc kubenswrapper[4881]: I1211 08:35:19.120121 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-qwfp5"] Dec 11 08:35:19 crc kubenswrapper[4881]: W1211 08:35:19.121906 4881 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd33da677_6c43_4e7e_87de_648d14ae9a8d.slice/crio-daebff3496f4adc4f453839efdd40c33e1b72a2127d7e5a75a9322c3911389cd WatchSource:0}: Error finding container daebff3496f4adc4f453839efdd40c33e1b72a2127d7e5a75a9322c3911389cd: Status 404 returned error can't find the container with id daebff3496f4adc4f453839efdd40c33e1b72a2127d7e5a75a9322c3911389cd Dec 11 08:35:19 crc kubenswrapper[4881]: I1211 08:35:19.196434 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-78dd6ddcc-qwfp5" event={"ID":"d33da677-6c43-4e7e-87de-648d14ae9a8d","Type":"ContainerStarted","Data":"daebff3496f4adc4f453839efdd40c33e1b72a2127d7e5a75a9322c3911389cd"} Dec 11 08:35:19 crc kubenswrapper[4881]: I1211 08:35:19.197209 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-675f4bcbfc-wkdgt" event={"ID":"1e6bcde5-0508-48b6-bb9e-9a2cb7c5fa3a","Type":"ContainerStarted","Data":"0e84f9ae82630896561c505b76187cfb529226b2b06ef517f6ec8173630b0548"} Dec 11 08:35:21 crc kubenswrapper[4881]: I1211 08:35:21.092970 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-wkdgt"] Dec 11 08:35:21 crc kubenswrapper[4881]: I1211 08:35:21.126678 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-5485d"] Dec 11 08:35:21 crc kubenswrapper[4881]: I1211 08:35:21.128774 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-666b6646f7-5485d" Dec 11 08:35:21 crc kubenswrapper[4881]: I1211 08:35:21.142788 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-5485d"] Dec 11 08:35:21 crc kubenswrapper[4881]: I1211 08:35:21.271444 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5821aceb-9d65-4e6f-940d-2a20117397f6-config\") pod \"dnsmasq-dns-666b6646f7-5485d\" (UID: \"5821aceb-9d65-4e6f-940d-2a20117397f6\") " pod="openstack/dnsmasq-dns-666b6646f7-5485d" Dec 11 08:35:21 crc kubenswrapper[4881]: I1211 08:35:21.271506 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/5821aceb-9d65-4e6f-940d-2a20117397f6-dns-svc\") pod \"dnsmasq-dns-666b6646f7-5485d\" (UID: \"5821aceb-9d65-4e6f-940d-2a20117397f6\") " pod="openstack/dnsmasq-dns-666b6646f7-5485d" Dec 11 08:35:21 crc kubenswrapper[4881]: I1211 08:35:21.271688 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g8sl4\" (UniqueName: \"kubernetes.io/projected/5821aceb-9d65-4e6f-940d-2a20117397f6-kube-api-access-g8sl4\") pod \"dnsmasq-dns-666b6646f7-5485d\" (UID: \"5821aceb-9d65-4e6f-940d-2a20117397f6\") " pod="openstack/dnsmasq-dns-666b6646f7-5485d" Dec 11 08:35:21 crc kubenswrapper[4881]: I1211 08:35:21.387044 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5821aceb-9d65-4e6f-940d-2a20117397f6-config\") pod \"dnsmasq-dns-666b6646f7-5485d\" (UID: \"5821aceb-9d65-4e6f-940d-2a20117397f6\") " pod="openstack/dnsmasq-dns-666b6646f7-5485d" Dec 11 08:35:21 crc kubenswrapper[4881]: I1211 08:35:21.387119 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/5821aceb-9d65-4e6f-940d-2a20117397f6-dns-svc\") pod \"dnsmasq-dns-666b6646f7-5485d\" (UID: \"5821aceb-9d65-4e6f-940d-2a20117397f6\") " pod="openstack/dnsmasq-dns-666b6646f7-5485d" Dec 11 08:35:21 crc kubenswrapper[4881]: I1211 08:35:21.387423 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g8sl4\" (UniqueName: \"kubernetes.io/projected/5821aceb-9d65-4e6f-940d-2a20117397f6-kube-api-access-g8sl4\") pod \"dnsmasq-dns-666b6646f7-5485d\" (UID: \"5821aceb-9d65-4e6f-940d-2a20117397f6\") " pod="openstack/dnsmasq-dns-666b6646f7-5485d" Dec 11 08:35:21 crc kubenswrapper[4881]: I1211 08:35:21.388976 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5821aceb-9d65-4e6f-940d-2a20117397f6-config\") pod \"dnsmasq-dns-666b6646f7-5485d\" (UID: \"5821aceb-9d65-4e6f-940d-2a20117397f6\") " pod="openstack/dnsmasq-dns-666b6646f7-5485d" Dec 11 08:35:21 crc kubenswrapper[4881]: I1211 08:35:21.389349 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/5821aceb-9d65-4e6f-940d-2a20117397f6-dns-svc\") pod \"dnsmasq-dns-666b6646f7-5485d\" (UID: \"5821aceb-9d65-4e6f-940d-2a20117397f6\") " pod="openstack/dnsmasq-dns-666b6646f7-5485d" Dec 11 08:35:21 crc kubenswrapper[4881]: I1211 08:35:21.425713 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-qwfp5"] Dec 11 08:35:21 crc kubenswrapper[4881]: I1211 08:35:21.452916 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g8sl4\" (UniqueName: \"kubernetes.io/projected/5821aceb-9d65-4e6f-940d-2a20117397f6-kube-api-access-g8sl4\") pod \"dnsmasq-dns-666b6646f7-5485d\" (UID: \"5821aceb-9d65-4e6f-940d-2a20117397f6\") " pod="openstack/dnsmasq-dns-666b6646f7-5485d" Dec 11 08:35:21 crc kubenswrapper[4881]: I1211 08:35:21.460666 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-666b6646f7-5485d" Dec 11 08:35:21 crc kubenswrapper[4881]: I1211 08:35:21.476152 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-vkxg2"] Dec 11 08:35:21 crc kubenswrapper[4881]: I1211 08:35:21.485267 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-vkxg2" Dec 11 08:35:21 crc kubenswrapper[4881]: I1211 08:35:21.490761 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-vkxg2"] Dec 11 08:35:21 crc kubenswrapper[4881]: I1211 08:35:21.593045 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t9zr6\" (UniqueName: \"kubernetes.io/projected/ee76bb27-a55c-48e8-a943-712c8f0036f4-kube-api-access-t9zr6\") pod \"dnsmasq-dns-57d769cc4f-vkxg2\" (UID: \"ee76bb27-a55c-48e8-a943-712c8f0036f4\") " pod="openstack/dnsmasq-dns-57d769cc4f-vkxg2" Dec 11 08:35:21 crc kubenswrapper[4881]: I1211 08:35:21.593826 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ee76bb27-a55c-48e8-a943-712c8f0036f4-config\") pod \"dnsmasq-dns-57d769cc4f-vkxg2\" (UID: \"ee76bb27-a55c-48e8-a943-712c8f0036f4\") " pod="openstack/dnsmasq-dns-57d769cc4f-vkxg2" Dec 11 08:35:21 crc kubenswrapper[4881]: I1211 08:35:21.593925 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ee76bb27-a55c-48e8-a943-712c8f0036f4-dns-svc\") pod \"dnsmasq-dns-57d769cc4f-vkxg2\" (UID: \"ee76bb27-a55c-48e8-a943-712c8f0036f4\") " pod="openstack/dnsmasq-dns-57d769cc4f-vkxg2" Dec 11 08:35:21 crc kubenswrapper[4881]: I1211 08:35:21.695096 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ee76bb27-a55c-48e8-a943-712c8f0036f4-config\") pod \"dnsmasq-dns-57d769cc4f-vkxg2\" (UID: \"ee76bb27-a55c-48e8-a943-712c8f0036f4\") " pod="openstack/dnsmasq-dns-57d769cc4f-vkxg2" Dec 11 08:35:21 crc kubenswrapper[4881]: I1211 08:35:21.695163 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ee76bb27-a55c-48e8-a943-712c8f0036f4-dns-svc\") pod \"dnsmasq-dns-57d769cc4f-vkxg2\" (UID: \"ee76bb27-a55c-48e8-a943-712c8f0036f4\") " pod="openstack/dnsmasq-dns-57d769cc4f-vkxg2" Dec 11 08:35:21 crc kubenswrapper[4881]: I1211 08:35:21.695225 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t9zr6\" (UniqueName: \"kubernetes.io/projected/ee76bb27-a55c-48e8-a943-712c8f0036f4-kube-api-access-t9zr6\") pod \"dnsmasq-dns-57d769cc4f-vkxg2\" (UID: \"ee76bb27-a55c-48e8-a943-712c8f0036f4\") " pod="openstack/dnsmasq-dns-57d769cc4f-vkxg2" Dec 11 08:35:21 crc kubenswrapper[4881]: I1211 08:35:21.697660 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ee76bb27-a55c-48e8-a943-712c8f0036f4-config\") pod \"dnsmasq-dns-57d769cc4f-vkxg2\" (UID: \"ee76bb27-a55c-48e8-a943-712c8f0036f4\") " pod="openstack/dnsmasq-dns-57d769cc4f-vkxg2" Dec 11 08:35:21 crc kubenswrapper[4881]: I1211 08:35:21.698793 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ee76bb27-a55c-48e8-a943-712c8f0036f4-dns-svc\") pod \"dnsmasq-dns-57d769cc4f-vkxg2\" (UID: \"ee76bb27-a55c-48e8-a943-712c8f0036f4\") " pod="openstack/dnsmasq-dns-57d769cc4f-vkxg2" Dec 11 08:35:21 crc kubenswrapper[4881]: I1211 08:35:21.718302 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t9zr6\" (UniqueName: \"kubernetes.io/projected/ee76bb27-a55c-48e8-a943-712c8f0036f4-kube-api-access-t9zr6\") pod \"dnsmasq-dns-57d769cc4f-vkxg2\" (UID: \"ee76bb27-a55c-48e8-a943-712c8f0036f4\") " pod="openstack/dnsmasq-dns-57d769cc4f-vkxg2" Dec 11 08:35:21 crc kubenswrapper[4881]: I1211 08:35:21.829615 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-vkxg2" Dec 11 08:35:22 crc kubenswrapper[4881]: I1211 08:35:22.236888 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-5485d"] Dec 11 08:35:22 crc kubenswrapper[4881]: W1211 08:35:22.242772 4881 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod5821aceb_9d65_4e6f_940d_2a20117397f6.slice/crio-deaa8d121fc24b0dee390ec6f33b93af21996dd0190bc540f1a4a92964bfdb1a WatchSource:0}: Error finding container deaa8d121fc24b0dee390ec6f33b93af21996dd0190bc540f1a4a92964bfdb1a: Status 404 returned error can't find the container with id deaa8d121fc24b0dee390ec6f33b93af21996dd0190bc540f1a4a92964bfdb1a Dec 11 08:35:22 crc kubenswrapper[4881]: I1211 08:35:22.266011 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-server-0"] Dec 11 08:35:22 crc kubenswrapper[4881]: I1211 08:35:22.272849 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Dec 11 08:35:22 crc kubenswrapper[4881]: I1211 08:35:22.296741 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-erlang-cookie" Dec 11 08:35:22 crc kubenswrapper[4881]: I1211 08:35:22.297086 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-server-dockercfg-wcm7n" Dec 11 08:35:22 crc kubenswrapper[4881]: I1211 08:35:22.297093 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-svc" Dec 11 08:35:22 crc kubenswrapper[4881]: I1211 08:35:22.297233 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-config-data" Dec 11 08:35:22 crc kubenswrapper[4881]: I1211 08:35:22.298102 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-default-user" Dec 11 08:35:22 crc kubenswrapper[4881]: I1211 08:35:22.298176 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-server-conf" Dec 11 08:35:22 crc kubenswrapper[4881]: I1211 08:35:22.304310 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Dec 11 08:35:22 crc kubenswrapper[4881]: I1211 08:35:22.309075 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-plugins-conf" Dec 11 08:35:22 crc kubenswrapper[4881]: I1211 08:35:22.359893 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-vkxg2"] Dec 11 08:35:22 crc kubenswrapper[4881]: I1211 08:35:22.415500 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/ed841687-cd89-4419-8726-85e086a5cc21-pod-info\") pod \"rabbitmq-server-0\" (UID: \"ed841687-cd89-4419-8726-85e086a5cc21\") " pod="openstack/rabbitmq-server-0" Dec 11 08:35:22 crc kubenswrapper[4881]: I1211 08:35:22.415584 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/ed841687-cd89-4419-8726-85e086a5cc21-config-data\") pod \"rabbitmq-server-0\" (UID: \"ed841687-cd89-4419-8726-85e086a5cc21\") " pod="openstack/rabbitmq-server-0" Dec 11 08:35:22 crc kubenswrapper[4881]: I1211 08:35:22.415607 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/ed841687-cd89-4419-8726-85e086a5cc21-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"ed841687-cd89-4419-8726-85e086a5cc21\") " pod="openstack/rabbitmq-server-0" Dec 11 08:35:22 crc kubenswrapper[4881]: I1211 08:35:22.415632 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/ed841687-cd89-4419-8726-85e086a5cc21-server-conf\") pod \"rabbitmq-server-0\" (UID: \"ed841687-cd89-4419-8726-85e086a5cc21\") " pod="openstack/rabbitmq-server-0" Dec 11 08:35:22 crc kubenswrapper[4881]: I1211 08:35:22.415678 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/ed841687-cd89-4419-8726-85e086a5cc21-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"ed841687-cd89-4419-8726-85e086a5cc21\") " pod="openstack/rabbitmq-server-0" Dec 11 08:35:22 crc kubenswrapper[4881]: I1211 08:35:22.415917 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/ed841687-cd89-4419-8726-85e086a5cc21-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"ed841687-cd89-4419-8726-85e086a5cc21\") " pod="openstack/rabbitmq-server-0" Dec 11 08:35:22 crc kubenswrapper[4881]: I1211 08:35:22.415986 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"rabbitmq-server-0\" (UID: \"ed841687-cd89-4419-8726-85e086a5cc21\") " pod="openstack/rabbitmq-server-0" Dec 11 08:35:22 crc kubenswrapper[4881]: I1211 08:35:22.416016 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/ed841687-cd89-4419-8726-85e086a5cc21-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"ed841687-cd89-4419-8726-85e086a5cc21\") " pod="openstack/rabbitmq-server-0" Dec 11 08:35:22 crc kubenswrapper[4881]: I1211 08:35:22.416081 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8jt69\" (UniqueName: \"kubernetes.io/projected/ed841687-cd89-4419-8726-85e086a5cc21-kube-api-access-8jt69\") pod \"rabbitmq-server-0\" (UID: \"ed841687-cd89-4419-8726-85e086a5cc21\") " pod="openstack/rabbitmq-server-0" Dec 11 08:35:22 crc kubenswrapper[4881]: I1211 08:35:22.416146 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/ed841687-cd89-4419-8726-85e086a5cc21-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"ed841687-cd89-4419-8726-85e086a5cc21\") " pod="openstack/rabbitmq-server-0" Dec 11 08:35:22 crc kubenswrapper[4881]: I1211 08:35:22.416170 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/ed841687-cd89-4419-8726-85e086a5cc21-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"ed841687-cd89-4419-8726-85e086a5cc21\") " pod="openstack/rabbitmq-server-0" Dec 11 08:35:22 crc kubenswrapper[4881]: I1211 08:35:22.517416 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"rabbitmq-server-0\" (UID: \"ed841687-cd89-4419-8726-85e086a5cc21\") " pod="openstack/rabbitmq-server-0" Dec 11 08:35:22 crc kubenswrapper[4881]: I1211 08:35:22.517477 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/ed841687-cd89-4419-8726-85e086a5cc21-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"ed841687-cd89-4419-8726-85e086a5cc21\") " pod="openstack/rabbitmq-server-0" Dec 11 08:35:22 crc kubenswrapper[4881]: I1211 08:35:22.517528 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8jt69\" (UniqueName: \"kubernetes.io/projected/ed841687-cd89-4419-8726-85e086a5cc21-kube-api-access-8jt69\") pod \"rabbitmq-server-0\" (UID: \"ed841687-cd89-4419-8726-85e086a5cc21\") " pod="openstack/rabbitmq-server-0" Dec 11 08:35:22 crc kubenswrapper[4881]: I1211 08:35:22.517565 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/ed841687-cd89-4419-8726-85e086a5cc21-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"ed841687-cd89-4419-8726-85e086a5cc21\") " pod="openstack/rabbitmq-server-0" Dec 11 08:35:22 crc kubenswrapper[4881]: I1211 08:35:22.517588 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/ed841687-cd89-4419-8726-85e086a5cc21-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"ed841687-cd89-4419-8726-85e086a5cc21\") " pod="openstack/rabbitmq-server-0" Dec 11 08:35:22 crc kubenswrapper[4881]: I1211 08:35:22.517636 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/ed841687-cd89-4419-8726-85e086a5cc21-pod-info\") pod \"rabbitmq-server-0\" (UID: \"ed841687-cd89-4419-8726-85e086a5cc21\") " pod="openstack/rabbitmq-server-0" Dec 11 08:35:22 crc kubenswrapper[4881]: I1211 08:35:22.517722 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/ed841687-cd89-4419-8726-85e086a5cc21-config-data\") pod \"rabbitmq-server-0\" (UID: \"ed841687-cd89-4419-8726-85e086a5cc21\") " pod="openstack/rabbitmq-server-0" Dec 11 08:35:22 crc kubenswrapper[4881]: I1211 08:35:22.517748 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/ed841687-cd89-4419-8726-85e086a5cc21-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"ed841687-cd89-4419-8726-85e086a5cc21\") " pod="openstack/rabbitmq-server-0" Dec 11 08:35:22 crc kubenswrapper[4881]: I1211 08:35:22.517777 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/ed841687-cd89-4419-8726-85e086a5cc21-server-conf\") pod \"rabbitmq-server-0\" (UID: \"ed841687-cd89-4419-8726-85e086a5cc21\") " pod="openstack/rabbitmq-server-0" Dec 11 08:35:22 crc kubenswrapper[4881]: I1211 08:35:22.517841 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/ed841687-cd89-4419-8726-85e086a5cc21-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"ed841687-cd89-4419-8726-85e086a5cc21\") " pod="openstack/rabbitmq-server-0" Dec 11 08:35:22 crc kubenswrapper[4881]: I1211 08:35:22.517894 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/ed841687-cd89-4419-8726-85e086a5cc21-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"ed841687-cd89-4419-8726-85e086a5cc21\") " pod="openstack/rabbitmq-server-0" Dec 11 08:35:22 crc kubenswrapper[4881]: I1211 08:35:22.517979 4881 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"rabbitmq-server-0\" (UID: \"ed841687-cd89-4419-8726-85e086a5cc21\") device mount path \"/mnt/openstack/pv09\"" pod="openstack/rabbitmq-server-0" Dec 11 08:35:22 crc kubenswrapper[4881]: I1211 08:35:22.518038 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/ed841687-cd89-4419-8726-85e086a5cc21-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"ed841687-cd89-4419-8726-85e086a5cc21\") " pod="openstack/rabbitmq-server-0" Dec 11 08:35:22 crc kubenswrapper[4881]: I1211 08:35:22.518321 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/ed841687-cd89-4419-8726-85e086a5cc21-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"ed841687-cd89-4419-8726-85e086a5cc21\") " pod="openstack/rabbitmq-server-0" Dec 11 08:35:22 crc kubenswrapper[4881]: I1211 08:35:22.519470 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/ed841687-cd89-4419-8726-85e086a5cc21-config-data\") pod \"rabbitmq-server-0\" (UID: \"ed841687-cd89-4419-8726-85e086a5cc21\") " pod="openstack/rabbitmq-server-0" Dec 11 08:35:22 crc kubenswrapper[4881]: I1211 08:35:22.520206 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/ed841687-cd89-4419-8726-85e086a5cc21-server-conf\") pod \"rabbitmq-server-0\" (UID: \"ed841687-cd89-4419-8726-85e086a5cc21\") " pod="openstack/rabbitmq-server-0" Dec 11 08:35:22 crc kubenswrapper[4881]: I1211 08:35:22.522980 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/ed841687-cd89-4419-8726-85e086a5cc21-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"ed841687-cd89-4419-8726-85e086a5cc21\") " pod="openstack/rabbitmq-server-0" Dec 11 08:35:22 crc kubenswrapper[4881]: I1211 08:35:22.525155 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/ed841687-cd89-4419-8726-85e086a5cc21-pod-info\") pod \"rabbitmq-server-0\" (UID: \"ed841687-cd89-4419-8726-85e086a5cc21\") " pod="openstack/rabbitmq-server-0" Dec 11 08:35:22 crc kubenswrapper[4881]: I1211 08:35:22.529017 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/ed841687-cd89-4419-8726-85e086a5cc21-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"ed841687-cd89-4419-8726-85e086a5cc21\") " pod="openstack/rabbitmq-server-0" Dec 11 08:35:22 crc kubenswrapper[4881]: I1211 08:35:22.529068 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/ed841687-cd89-4419-8726-85e086a5cc21-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"ed841687-cd89-4419-8726-85e086a5cc21\") " pod="openstack/rabbitmq-server-0" Dec 11 08:35:22 crc kubenswrapper[4881]: I1211 08:35:22.544793 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8jt69\" (UniqueName: \"kubernetes.io/projected/ed841687-cd89-4419-8726-85e086a5cc21-kube-api-access-8jt69\") pod \"rabbitmq-server-0\" (UID: \"ed841687-cd89-4419-8726-85e086a5cc21\") " pod="openstack/rabbitmq-server-0" Dec 11 08:35:22 crc kubenswrapper[4881]: I1211 08:35:22.545205 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/ed841687-cd89-4419-8726-85e086a5cc21-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"ed841687-cd89-4419-8726-85e086a5cc21\") " pod="openstack/rabbitmq-server-0" Dec 11 08:35:22 crc kubenswrapper[4881]: I1211 08:35:22.549323 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"rabbitmq-server-0\" (UID: \"ed841687-cd89-4419-8726-85e086a5cc21\") " pod="openstack/rabbitmq-server-0" Dec 11 08:35:22 crc kubenswrapper[4881]: I1211 08:35:22.581645 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Dec 11 08:35:22 crc kubenswrapper[4881]: I1211 08:35:22.587873 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Dec 11 08:35:22 crc kubenswrapper[4881]: I1211 08:35:22.589366 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Dec 11 08:35:22 crc kubenswrapper[4881]: I1211 08:35:22.592188 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-default-user" Dec 11 08:35:22 crc kubenswrapper[4881]: I1211 08:35:22.592447 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-plugins-conf" Dec 11 08:35:22 crc kubenswrapper[4881]: I1211 08:35:22.592553 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-config-data" Dec 11 08:35:22 crc kubenswrapper[4881]: I1211 08:35:22.592625 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-erlang-cookie" Dec 11 08:35:22 crc kubenswrapper[4881]: I1211 08:35:22.592904 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-server-dockercfg-67jkl" Dec 11 08:35:22 crc kubenswrapper[4881]: I1211 08:35:22.593037 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-cell1-svc" Dec 11 08:35:22 crc kubenswrapper[4881]: I1211 08:35:22.593029 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-server-conf" Dec 11 08:35:22 crc kubenswrapper[4881]: I1211 08:35:22.640992 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Dec 11 08:35:22 crc kubenswrapper[4881]: I1211 08:35:22.720935 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/83d70e0f-d672-49c8-89d6-c1aa99c572a0-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"83d70e0f-d672-49c8-89d6-c1aa99c572a0\") " pod="openstack/rabbitmq-cell1-server-0" Dec 11 08:35:22 crc kubenswrapper[4881]: I1211 08:35:22.721084 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cfpct\" (UniqueName: \"kubernetes.io/projected/83d70e0f-d672-49c8-89d6-c1aa99c572a0-kube-api-access-cfpct\") pod \"rabbitmq-cell1-server-0\" (UID: \"83d70e0f-d672-49c8-89d6-c1aa99c572a0\") " pod="openstack/rabbitmq-cell1-server-0" Dec 11 08:35:22 crc kubenswrapper[4881]: I1211 08:35:22.721122 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/83d70e0f-d672-49c8-89d6-c1aa99c572a0-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"83d70e0f-d672-49c8-89d6-c1aa99c572a0\") " pod="openstack/rabbitmq-cell1-server-0" Dec 11 08:35:22 crc kubenswrapper[4881]: I1211 08:35:22.721143 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/83d70e0f-d672-49c8-89d6-c1aa99c572a0-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"83d70e0f-d672-49c8-89d6-c1aa99c572a0\") " pod="openstack/rabbitmq-cell1-server-0" Dec 11 08:35:22 crc kubenswrapper[4881]: I1211 08:35:22.721170 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/83d70e0f-d672-49c8-89d6-c1aa99c572a0-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"83d70e0f-d672-49c8-89d6-c1aa99c572a0\") " pod="openstack/rabbitmq-cell1-server-0" Dec 11 08:35:22 crc kubenswrapper[4881]: I1211 08:35:22.721195 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/83d70e0f-d672-49c8-89d6-c1aa99c572a0-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"83d70e0f-d672-49c8-89d6-c1aa99c572a0\") " pod="openstack/rabbitmq-cell1-server-0" Dec 11 08:35:22 crc kubenswrapper[4881]: I1211 08:35:22.721210 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/83d70e0f-d672-49c8-89d6-c1aa99c572a0-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"83d70e0f-d672-49c8-89d6-c1aa99c572a0\") " pod="openstack/rabbitmq-cell1-server-0" Dec 11 08:35:22 crc kubenswrapper[4881]: I1211 08:35:22.721233 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/83d70e0f-d672-49c8-89d6-c1aa99c572a0-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"83d70e0f-d672-49c8-89d6-c1aa99c572a0\") " pod="openstack/rabbitmq-cell1-server-0" Dec 11 08:35:22 crc kubenswrapper[4881]: I1211 08:35:22.721296 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/83d70e0f-d672-49c8-89d6-c1aa99c572a0-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"83d70e0f-d672-49c8-89d6-c1aa99c572a0\") " pod="openstack/rabbitmq-cell1-server-0" Dec 11 08:35:22 crc kubenswrapper[4881]: I1211 08:35:22.721383 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"83d70e0f-d672-49c8-89d6-c1aa99c572a0\") " pod="openstack/rabbitmq-cell1-server-0" Dec 11 08:35:22 crc kubenswrapper[4881]: I1211 08:35:22.721437 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/83d70e0f-d672-49c8-89d6-c1aa99c572a0-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"83d70e0f-d672-49c8-89d6-c1aa99c572a0\") " pod="openstack/rabbitmq-cell1-server-0" Dec 11 08:35:22 crc kubenswrapper[4881]: I1211 08:35:22.822497 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/83d70e0f-d672-49c8-89d6-c1aa99c572a0-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"83d70e0f-d672-49c8-89d6-c1aa99c572a0\") " pod="openstack/rabbitmq-cell1-server-0" Dec 11 08:35:22 crc kubenswrapper[4881]: I1211 08:35:22.822547 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/83d70e0f-d672-49c8-89d6-c1aa99c572a0-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"83d70e0f-d672-49c8-89d6-c1aa99c572a0\") " pod="openstack/rabbitmq-cell1-server-0" Dec 11 08:35:22 crc kubenswrapper[4881]: I1211 08:35:22.822563 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/83d70e0f-d672-49c8-89d6-c1aa99c572a0-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"83d70e0f-d672-49c8-89d6-c1aa99c572a0\") " pod="openstack/rabbitmq-cell1-server-0" Dec 11 08:35:22 crc kubenswrapper[4881]: I1211 08:35:22.822591 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/83d70e0f-d672-49c8-89d6-c1aa99c572a0-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"83d70e0f-d672-49c8-89d6-c1aa99c572a0\") " pod="openstack/rabbitmq-cell1-server-0" Dec 11 08:35:22 crc kubenswrapper[4881]: I1211 08:35:22.822637 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/83d70e0f-d672-49c8-89d6-c1aa99c572a0-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"83d70e0f-d672-49c8-89d6-c1aa99c572a0\") " pod="openstack/rabbitmq-cell1-server-0" Dec 11 08:35:22 crc kubenswrapper[4881]: I1211 08:35:22.822666 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"83d70e0f-d672-49c8-89d6-c1aa99c572a0\") " pod="openstack/rabbitmq-cell1-server-0" Dec 11 08:35:22 crc kubenswrapper[4881]: I1211 08:35:22.822687 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/83d70e0f-d672-49c8-89d6-c1aa99c572a0-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"83d70e0f-d672-49c8-89d6-c1aa99c572a0\") " pod="openstack/rabbitmq-cell1-server-0" Dec 11 08:35:22 crc kubenswrapper[4881]: I1211 08:35:22.822707 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/83d70e0f-d672-49c8-89d6-c1aa99c572a0-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"83d70e0f-d672-49c8-89d6-c1aa99c572a0\") " pod="openstack/rabbitmq-cell1-server-0" Dec 11 08:35:22 crc kubenswrapper[4881]: I1211 08:35:22.822756 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cfpct\" (UniqueName: \"kubernetes.io/projected/83d70e0f-d672-49c8-89d6-c1aa99c572a0-kube-api-access-cfpct\") pod \"rabbitmq-cell1-server-0\" (UID: \"83d70e0f-d672-49c8-89d6-c1aa99c572a0\") " pod="openstack/rabbitmq-cell1-server-0" Dec 11 08:35:22 crc kubenswrapper[4881]: I1211 08:35:22.822787 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/83d70e0f-d672-49c8-89d6-c1aa99c572a0-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"83d70e0f-d672-49c8-89d6-c1aa99c572a0\") " pod="openstack/rabbitmq-cell1-server-0" Dec 11 08:35:22 crc kubenswrapper[4881]: I1211 08:35:22.822803 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/83d70e0f-d672-49c8-89d6-c1aa99c572a0-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"83d70e0f-d672-49c8-89d6-c1aa99c572a0\") " pod="openstack/rabbitmq-cell1-server-0" Dec 11 08:35:22 crc kubenswrapper[4881]: I1211 08:35:22.823931 4881 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"83d70e0f-d672-49c8-89d6-c1aa99c572a0\") device mount path \"/mnt/openstack/pv03\"" pod="openstack/rabbitmq-cell1-server-0" Dec 11 08:35:22 crc kubenswrapper[4881]: I1211 08:35:22.824177 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/83d70e0f-d672-49c8-89d6-c1aa99c572a0-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"83d70e0f-d672-49c8-89d6-c1aa99c572a0\") " pod="openstack/rabbitmq-cell1-server-0" Dec 11 08:35:22 crc kubenswrapper[4881]: I1211 08:35:22.824295 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/83d70e0f-d672-49c8-89d6-c1aa99c572a0-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"83d70e0f-d672-49c8-89d6-c1aa99c572a0\") " pod="openstack/rabbitmq-cell1-server-0" Dec 11 08:35:22 crc kubenswrapper[4881]: I1211 08:35:22.824379 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/83d70e0f-d672-49c8-89d6-c1aa99c572a0-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"83d70e0f-d672-49c8-89d6-c1aa99c572a0\") " pod="openstack/rabbitmq-cell1-server-0" Dec 11 08:35:22 crc kubenswrapper[4881]: I1211 08:35:22.828100 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/83d70e0f-d672-49c8-89d6-c1aa99c572a0-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"83d70e0f-d672-49c8-89d6-c1aa99c572a0\") " pod="openstack/rabbitmq-cell1-server-0" Dec 11 08:35:22 crc kubenswrapper[4881]: I1211 08:35:22.830264 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/83d70e0f-d672-49c8-89d6-c1aa99c572a0-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"83d70e0f-d672-49c8-89d6-c1aa99c572a0\") " pod="openstack/rabbitmq-cell1-server-0" Dec 11 08:35:22 crc kubenswrapper[4881]: I1211 08:35:22.835251 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/83d70e0f-d672-49c8-89d6-c1aa99c572a0-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"83d70e0f-d672-49c8-89d6-c1aa99c572a0\") " pod="openstack/rabbitmq-cell1-server-0" Dec 11 08:35:22 crc kubenswrapper[4881]: I1211 08:35:22.837081 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/83d70e0f-d672-49c8-89d6-c1aa99c572a0-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"83d70e0f-d672-49c8-89d6-c1aa99c572a0\") " pod="openstack/rabbitmq-cell1-server-0" Dec 11 08:35:22 crc kubenswrapper[4881]: I1211 08:35:22.838118 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/83d70e0f-d672-49c8-89d6-c1aa99c572a0-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"83d70e0f-d672-49c8-89d6-c1aa99c572a0\") " pod="openstack/rabbitmq-cell1-server-0" Dec 11 08:35:22 crc kubenswrapper[4881]: I1211 08:35:22.842023 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/83d70e0f-d672-49c8-89d6-c1aa99c572a0-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"83d70e0f-d672-49c8-89d6-c1aa99c572a0\") " pod="openstack/rabbitmq-cell1-server-0" Dec 11 08:35:22 crc kubenswrapper[4881]: I1211 08:35:22.842465 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cfpct\" (UniqueName: \"kubernetes.io/projected/83d70e0f-d672-49c8-89d6-c1aa99c572a0-kube-api-access-cfpct\") pod \"rabbitmq-cell1-server-0\" (UID: \"83d70e0f-d672-49c8-89d6-c1aa99c572a0\") " pod="openstack/rabbitmq-cell1-server-0" Dec 11 08:35:22 crc kubenswrapper[4881]: I1211 08:35:22.906074 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"83d70e0f-d672-49c8-89d6-c1aa99c572a0\") " pod="openstack/rabbitmq-cell1-server-0" Dec 11 08:35:22 crc kubenswrapper[4881]: I1211 08:35:22.935815 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Dec 11 08:35:23 crc kubenswrapper[4881]: I1211 08:35:23.283971 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-666b6646f7-5485d" event={"ID":"5821aceb-9d65-4e6f-940d-2a20117397f6","Type":"ContainerStarted","Data":"deaa8d121fc24b0dee390ec6f33b93af21996dd0190bc540f1a4a92964bfdb1a"} Dec 11 08:35:23 crc kubenswrapper[4881]: I1211 08:35:23.292638 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-vkxg2" event={"ID":"ee76bb27-a55c-48e8-a943-712c8f0036f4","Type":"ContainerStarted","Data":"08c3f264a3d428de49ff45df482764969a7a0852727698b0204cb3153fbed710"} Dec 11 08:35:23 crc kubenswrapper[4881]: I1211 08:35:23.346148 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Dec 11 08:35:23 crc kubenswrapper[4881]: I1211 08:35:23.723254 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Dec 11 08:35:23 crc kubenswrapper[4881]: W1211 08:35:23.725445 4881 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod83d70e0f_d672_49c8_89d6_c1aa99c572a0.slice/crio-3217862f1edf01c9a7059bdf56d484104266d33ecf8edab58de2c1065831171f WatchSource:0}: Error finding container 3217862f1edf01c9a7059bdf56d484104266d33ecf8edab58de2c1065831171f: Status 404 returned error can't find the container with id 3217862f1edf01c9a7059bdf56d484104266d33ecf8edab58de2c1065831171f Dec 11 08:35:24 crc kubenswrapper[4881]: I1211 08:35:24.306562 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstack-galera-0"] Dec 11 08:35:24 crc kubenswrapper[4881]: I1211 08:35:24.314262 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"ed841687-cd89-4419-8726-85e086a5cc21","Type":"ContainerStarted","Data":"ab949ebb5f98ec0bd36bca41b542132d17a18fbabaa3b31b787c099504b8a75e"} Dec 11 08:35:24 crc kubenswrapper[4881]: I1211 08:35:24.314733 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Dec 11 08:35:24 crc kubenswrapper[4881]: I1211 08:35:24.315438 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"83d70e0f-d672-49c8-89d6-c1aa99c572a0","Type":"ContainerStarted","Data":"3217862f1edf01c9a7059bdf56d484104266d33ecf8edab58de2c1065831171f"} Dec 11 08:35:24 crc kubenswrapper[4881]: I1211 08:35:24.321294 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Dec 11 08:35:24 crc kubenswrapper[4881]: I1211 08:35:24.326862 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-galera-0"] Dec 11 08:35:24 crc kubenswrapper[4881]: I1211 08:35:24.327447 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-svc" Dec 11 08:35:24 crc kubenswrapper[4881]: I1211 08:35:24.327559 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config-data" Dec 11 08:35:24 crc kubenswrapper[4881]: I1211 08:35:24.330124 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-dockercfg-2cd28" Dec 11 08:35:24 crc kubenswrapper[4881]: I1211 08:35:24.330853 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-scripts" Dec 11 08:35:24 crc kubenswrapper[4881]: I1211 08:35:24.368748 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"combined-ca-bundle" Dec 11 08:35:24 crc kubenswrapper[4881]: I1211 08:35:24.407482 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7a825abb-23ec-4f51-940d-2500da233e14-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"7a825abb-23ec-4f51-940d-2500da233e14\") " pod="openstack/openstack-galera-0" Dec 11 08:35:24 crc kubenswrapper[4881]: I1211 08:35:24.407538 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7a825abb-23ec-4f51-940d-2500da233e14-operator-scripts\") pod \"openstack-galera-0\" (UID: \"7a825abb-23ec-4f51-940d-2500da233e14\") " pod="openstack/openstack-galera-0" Dec 11 08:35:24 crc kubenswrapper[4881]: I1211 08:35:24.407617 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/7a825abb-23ec-4f51-940d-2500da233e14-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"7a825abb-23ec-4f51-940d-2500da233e14\") " pod="openstack/openstack-galera-0" Dec 11 08:35:24 crc kubenswrapper[4881]: I1211 08:35:24.407669 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/7a825abb-23ec-4f51-940d-2500da233e14-config-data-default\") pod \"openstack-galera-0\" (UID: \"7a825abb-23ec-4f51-940d-2500da233e14\") " pod="openstack/openstack-galera-0" Dec 11 08:35:24 crc kubenswrapper[4881]: I1211 08:35:24.407693 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secrets\" (UniqueName: \"kubernetes.io/secret/7a825abb-23ec-4f51-940d-2500da233e14-secrets\") pod \"openstack-galera-0\" (UID: \"7a825abb-23ec-4f51-940d-2500da233e14\") " pod="openstack/openstack-galera-0" Dec 11 08:35:24 crc kubenswrapper[4881]: I1211 08:35:24.407773 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"openstack-galera-0\" (UID: \"7a825abb-23ec-4f51-940d-2500da233e14\") " pod="openstack/openstack-galera-0" Dec 11 08:35:24 crc kubenswrapper[4881]: I1211 08:35:24.408434 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/7a825abb-23ec-4f51-940d-2500da233e14-config-data-generated\") pod \"openstack-galera-0\" (UID: \"7a825abb-23ec-4f51-940d-2500da233e14\") " pod="openstack/openstack-galera-0" Dec 11 08:35:24 crc kubenswrapper[4881]: I1211 08:35:24.408502 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/7a825abb-23ec-4f51-940d-2500da233e14-kolla-config\") pod \"openstack-galera-0\" (UID: \"7a825abb-23ec-4f51-940d-2500da233e14\") " pod="openstack/openstack-galera-0" Dec 11 08:35:24 crc kubenswrapper[4881]: I1211 08:35:24.408521 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wnb7s\" (UniqueName: \"kubernetes.io/projected/7a825abb-23ec-4f51-940d-2500da233e14-kube-api-access-wnb7s\") pod \"openstack-galera-0\" (UID: \"7a825abb-23ec-4f51-940d-2500da233e14\") " pod="openstack/openstack-galera-0" Dec 11 08:35:24 crc kubenswrapper[4881]: I1211 08:35:24.511490 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/7a825abb-23ec-4f51-940d-2500da233e14-kolla-config\") pod \"openstack-galera-0\" (UID: \"7a825abb-23ec-4f51-940d-2500da233e14\") " pod="openstack/openstack-galera-0" Dec 11 08:35:24 crc kubenswrapper[4881]: I1211 08:35:24.511531 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wnb7s\" (UniqueName: \"kubernetes.io/projected/7a825abb-23ec-4f51-940d-2500da233e14-kube-api-access-wnb7s\") pod \"openstack-galera-0\" (UID: \"7a825abb-23ec-4f51-940d-2500da233e14\") " pod="openstack/openstack-galera-0" Dec 11 08:35:24 crc kubenswrapper[4881]: I1211 08:35:24.511561 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7a825abb-23ec-4f51-940d-2500da233e14-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"7a825abb-23ec-4f51-940d-2500da233e14\") " pod="openstack/openstack-galera-0" Dec 11 08:35:24 crc kubenswrapper[4881]: I1211 08:35:24.511582 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7a825abb-23ec-4f51-940d-2500da233e14-operator-scripts\") pod \"openstack-galera-0\" (UID: \"7a825abb-23ec-4f51-940d-2500da233e14\") " pod="openstack/openstack-galera-0" Dec 11 08:35:24 crc kubenswrapper[4881]: I1211 08:35:24.511617 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/7a825abb-23ec-4f51-940d-2500da233e14-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"7a825abb-23ec-4f51-940d-2500da233e14\") " pod="openstack/openstack-galera-0" Dec 11 08:35:24 crc kubenswrapper[4881]: I1211 08:35:24.511982 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/7a825abb-23ec-4f51-940d-2500da233e14-config-data-default\") pod \"openstack-galera-0\" (UID: \"7a825abb-23ec-4f51-940d-2500da233e14\") " pod="openstack/openstack-galera-0" Dec 11 08:35:24 crc kubenswrapper[4881]: I1211 08:35:24.512039 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secrets\" (UniqueName: \"kubernetes.io/secret/7a825abb-23ec-4f51-940d-2500da233e14-secrets\") pod \"openstack-galera-0\" (UID: \"7a825abb-23ec-4f51-940d-2500da233e14\") " pod="openstack/openstack-galera-0" Dec 11 08:35:24 crc kubenswrapper[4881]: I1211 08:35:24.512168 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"openstack-galera-0\" (UID: \"7a825abb-23ec-4f51-940d-2500da233e14\") " pod="openstack/openstack-galera-0" Dec 11 08:35:24 crc kubenswrapper[4881]: I1211 08:35:24.512352 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/7a825abb-23ec-4f51-940d-2500da233e14-config-data-generated\") pod \"openstack-galera-0\" (UID: \"7a825abb-23ec-4f51-940d-2500da233e14\") " pod="openstack/openstack-galera-0" Dec 11 08:35:24 crc kubenswrapper[4881]: I1211 08:35:24.512793 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/7a825abb-23ec-4f51-940d-2500da233e14-config-data-generated\") pod \"openstack-galera-0\" (UID: \"7a825abb-23ec-4f51-940d-2500da233e14\") " pod="openstack/openstack-galera-0" Dec 11 08:35:24 crc kubenswrapper[4881]: I1211 08:35:24.512952 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/7a825abb-23ec-4f51-940d-2500da233e14-kolla-config\") pod \"openstack-galera-0\" (UID: \"7a825abb-23ec-4f51-940d-2500da233e14\") " pod="openstack/openstack-galera-0" Dec 11 08:35:24 crc kubenswrapper[4881]: I1211 08:35:24.513548 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/7a825abb-23ec-4f51-940d-2500da233e14-config-data-default\") pod \"openstack-galera-0\" (UID: \"7a825abb-23ec-4f51-940d-2500da233e14\") " pod="openstack/openstack-galera-0" Dec 11 08:35:24 crc kubenswrapper[4881]: I1211 08:35:24.513776 4881 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"openstack-galera-0\" (UID: \"7a825abb-23ec-4f51-940d-2500da233e14\") device mount path \"/mnt/openstack/pv12\"" pod="openstack/openstack-galera-0" Dec 11 08:35:24 crc kubenswrapper[4881]: I1211 08:35:24.516649 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7a825abb-23ec-4f51-940d-2500da233e14-operator-scripts\") pod \"openstack-galera-0\" (UID: \"7a825abb-23ec-4f51-940d-2500da233e14\") " pod="openstack/openstack-galera-0" Dec 11 08:35:24 crc kubenswrapper[4881]: I1211 08:35:24.521981 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secrets\" (UniqueName: \"kubernetes.io/secret/7a825abb-23ec-4f51-940d-2500da233e14-secrets\") pod \"openstack-galera-0\" (UID: \"7a825abb-23ec-4f51-940d-2500da233e14\") " pod="openstack/openstack-galera-0" Dec 11 08:35:24 crc kubenswrapper[4881]: I1211 08:35:24.522109 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/7a825abb-23ec-4f51-940d-2500da233e14-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"7a825abb-23ec-4f51-940d-2500da233e14\") " pod="openstack/openstack-galera-0" Dec 11 08:35:24 crc kubenswrapper[4881]: I1211 08:35:24.536699 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7a825abb-23ec-4f51-940d-2500da233e14-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"7a825abb-23ec-4f51-940d-2500da233e14\") " pod="openstack/openstack-galera-0" Dec 11 08:35:24 crc kubenswrapper[4881]: I1211 08:35:24.542437 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wnb7s\" (UniqueName: \"kubernetes.io/projected/7a825abb-23ec-4f51-940d-2500da233e14-kube-api-access-wnb7s\") pod \"openstack-galera-0\" (UID: \"7a825abb-23ec-4f51-940d-2500da233e14\") " pod="openstack/openstack-galera-0" Dec 11 08:35:24 crc kubenswrapper[4881]: I1211 08:35:24.557887 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"openstack-galera-0\" (UID: \"7a825abb-23ec-4f51-940d-2500da233e14\") " pod="openstack/openstack-galera-0" Dec 11 08:35:24 crc kubenswrapper[4881]: I1211 08:35:24.704959 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Dec 11 08:35:25 crc kubenswrapper[4881]: I1211 08:35:25.391515 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-galera-0"] Dec 11 08:35:25 crc kubenswrapper[4881]: W1211 08:35:25.423729 4881 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7a825abb_23ec_4f51_940d_2500da233e14.slice/crio-c16b4e5af4a572370ea491c669e77ce9aab05087ce1af6275d24feee134fe3ea WatchSource:0}: Error finding container c16b4e5af4a572370ea491c669e77ce9aab05087ce1af6275d24feee134fe3ea: Status 404 returned error can't find the container with id c16b4e5af4a572370ea491c669e77ce9aab05087ce1af6275d24feee134fe3ea Dec 11 08:35:25 crc kubenswrapper[4881]: I1211 08:35:25.452072 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstack-cell1-galera-0"] Dec 11 08:35:25 crc kubenswrapper[4881]: I1211 08:35:25.453916 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Dec 11 08:35:25 crc kubenswrapper[4881]: I1211 08:35:25.455676 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-cell1-dockercfg-bhs2f" Dec 11 08:35:25 crc kubenswrapper[4881]: I1211 08:35:25.456438 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-cell1-svc" Dec 11 08:35:25 crc kubenswrapper[4881]: I1211 08:35:25.456740 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-config-data" Dec 11 08:35:25 crc kubenswrapper[4881]: I1211 08:35:25.456894 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-scripts" Dec 11 08:35:25 crc kubenswrapper[4881]: I1211 08:35:25.465636 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Dec 11 08:35:25 crc kubenswrapper[4881]: I1211 08:35:25.541794 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/70402eec-968d-4ceb-b259-5e2508ee21a0-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"70402eec-968d-4ceb-b259-5e2508ee21a0\") " pod="openstack/openstack-cell1-galera-0" Dec 11 08:35:25 crc kubenswrapper[4881]: I1211 08:35:25.541875 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/70402eec-968d-4ceb-b259-5e2508ee21a0-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"70402eec-968d-4ceb-b259-5e2508ee21a0\") " pod="openstack/openstack-cell1-galera-0" Dec 11 08:35:25 crc kubenswrapper[4881]: I1211 08:35:25.541893 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/70402eec-968d-4ceb-b259-5e2508ee21a0-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"70402eec-968d-4ceb-b259-5e2508ee21a0\") " pod="openstack/openstack-cell1-galera-0" Dec 11 08:35:25 crc kubenswrapper[4881]: I1211 08:35:25.541929 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/70402eec-968d-4ceb-b259-5e2508ee21a0-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"70402eec-968d-4ceb-b259-5e2508ee21a0\") " pod="openstack/openstack-cell1-galera-0" Dec 11 08:35:25 crc kubenswrapper[4881]: I1211 08:35:25.541983 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/70402eec-968d-4ceb-b259-5e2508ee21a0-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"70402eec-968d-4ceb-b259-5e2508ee21a0\") " pod="openstack/openstack-cell1-galera-0" Dec 11 08:35:25 crc kubenswrapper[4881]: I1211 08:35:25.542025 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secrets\" (UniqueName: \"kubernetes.io/secret/70402eec-968d-4ceb-b259-5e2508ee21a0-secrets\") pod \"openstack-cell1-galera-0\" (UID: \"70402eec-968d-4ceb-b259-5e2508ee21a0\") " pod="openstack/openstack-cell1-galera-0" Dec 11 08:35:25 crc kubenswrapper[4881]: I1211 08:35:25.542074 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"openstack-cell1-galera-0\" (UID: \"70402eec-968d-4ceb-b259-5e2508ee21a0\") " pod="openstack/openstack-cell1-galera-0" Dec 11 08:35:25 crc kubenswrapper[4881]: I1211 08:35:25.542128 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6n5xm\" (UniqueName: \"kubernetes.io/projected/70402eec-968d-4ceb-b259-5e2508ee21a0-kube-api-access-6n5xm\") pod \"openstack-cell1-galera-0\" (UID: \"70402eec-968d-4ceb-b259-5e2508ee21a0\") " pod="openstack/openstack-cell1-galera-0" Dec 11 08:35:25 crc kubenswrapper[4881]: I1211 08:35:25.542155 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/70402eec-968d-4ceb-b259-5e2508ee21a0-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"70402eec-968d-4ceb-b259-5e2508ee21a0\") " pod="openstack/openstack-cell1-galera-0" Dec 11 08:35:25 crc kubenswrapper[4881]: I1211 08:35:25.644030 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secrets\" (UniqueName: \"kubernetes.io/secret/70402eec-968d-4ceb-b259-5e2508ee21a0-secrets\") pod \"openstack-cell1-galera-0\" (UID: \"70402eec-968d-4ceb-b259-5e2508ee21a0\") " pod="openstack/openstack-cell1-galera-0" Dec 11 08:35:25 crc kubenswrapper[4881]: I1211 08:35:25.644103 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"openstack-cell1-galera-0\" (UID: \"70402eec-968d-4ceb-b259-5e2508ee21a0\") " pod="openstack/openstack-cell1-galera-0" Dec 11 08:35:25 crc kubenswrapper[4881]: I1211 08:35:25.644161 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6n5xm\" (UniqueName: \"kubernetes.io/projected/70402eec-968d-4ceb-b259-5e2508ee21a0-kube-api-access-6n5xm\") pod \"openstack-cell1-galera-0\" (UID: \"70402eec-968d-4ceb-b259-5e2508ee21a0\") " pod="openstack/openstack-cell1-galera-0" Dec 11 08:35:25 crc kubenswrapper[4881]: I1211 08:35:25.644202 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/70402eec-968d-4ceb-b259-5e2508ee21a0-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"70402eec-968d-4ceb-b259-5e2508ee21a0\") " pod="openstack/openstack-cell1-galera-0" Dec 11 08:35:25 crc kubenswrapper[4881]: I1211 08:35:25.644251 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/70402eec-968d-4ceb-b259-5e2508ee21a0-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"70402eec-968d-4ceb-b259-5e2508ee21a0\") " pod="openstack/openstack-cell1-galera-0" Dec 11 08:35:25 crc kubenswrapper[4881]: I1211 08:35:25.644308 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/70402eec-968d-4ceb-b259-5e2508ee21a0-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"70402eec-968d-4ceb-b259-5e2508ee21a0\") " pod="openstack/openstack-cell1-galera-0" Dec 11 08:35:25 crc kubenswrapper[4881]: I1211 08:35:25.644348 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/70402eec-968d-4ceb-b259-5e2508ee21a0-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"70402eec-968d-4ceb-b259-5e2508ee21a0\") " pod="openstack/openstack-cell1-galera-0" Dec 11 08:35:25 crc kubenswrapper[4881]: I1211 08:35:25.644395 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/70402eec-968d-4ceb-b259-5e2508ee21a0-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"70402eec-968d-4ceb-b259-5e2508ee21a0\") " pod="openstack/openstack-cell1-galera-0" Dec 11 08:35:25 crc kubenswrapper[4881]: I1211 08:35:25.644446 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/70402eec-968d-4ceb-b259-5e2508ee21a0-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"70402eec-968d-4ceb-b259-5e2508ee21a0\") " pod="openstack/openstack-cell1-galera-0" Dec 11 08:35:25 crc kubenswrapper[4881]: I1211 08:35:25.644459 4881 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"openstack-cell1-galera-0\" (UID: \"70402eec-968d-4ceb-b259-5e2508ee21a0\") device mount path \"/mnt/openstack/pv07\"" pod="openstack/openstack-cell1-galera-0" Dec 11 08:35:25 crc kubenswrapper[4881]: I1211 08:35:25.647206 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/70402eec-968d-4ceb-b259-5e2508ee21a0-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"70402eec-968d-4ceb-b259-5e2508ee21a0\") " pod="openstack/openstack-cell1-galera-0" Dec 11 08:35:25 crc kubenswrapper[4881]: I1211 08:35:25.648187 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/70402eec-968d-4ceb-b259-5e2508ee21a0-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"70402eec-968d-4ceb-b259-5e2508ee21a0\") " pod="openstack/openstack-cell1-galera-0" Dec 11 08:35:25 crc kubenswrapper[4881]: I1211 08:35:25.648321 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/70402eec-968d-4ceb-b259-5e2508ee21a0-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"70402eec-968d-4ceb-b259-5e2508ee21a0\") " pod="openstack/openstack-cell1-galera-0" Dec 11 08:35:25 crc kubenswrapper[4881]: I1211 08:35:25.648557 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/70402eec-968d-4ceb-b259-5e2508ee21a0-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"70402eec-968d-4ceb-b259-5e2508ee21a0\") " pod="openstack/openstack-cell1-galera-0" Dec 11 08:35:25 crc kubenswrapper[4881]: I1211 08:35:25.658439 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/70402eec-968d-4ceb-b259-5e2508ee21a0-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"70402eec-968d-4ceb-b259-5e2508ee21a0\") " pod="openstack/openstack-cell1-galera-0" Dec 11 08:35:25 crc kubenswrapper[4881]: I1211 08:35:25.661485 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secrets\" (UniqueName: \"kubernetes.io/secret/70402eec-968d-4ceb-b259-5e2508ee21a0-secrets\") pod \"openstack-cell1-galera-0\" (UID: \"70402eec-968d-4ceb-b259-5e2508ee21a0\") " pod="openstack/openstack-cell1-galera-0" Dec 11 08:35:25 crc kubenswrapper[4881]: I1211 08:35:25.661980 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/70402eec-968d-4ceb-b259-5e2508ee21a0-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"70402eec-968d-4ceb-b259-5e2508ee21a0\") " pod="openstack/openstack-cell1-galera-0" Dec 11 08:35:25 crc kubenswrapper[4881]: I1211 08:35:25.674326 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6n5xm\" (UniqueName: \"kubernetes.io/projected/70402eec-968d-4ceb-b259-5e2508ee21a0-kube-api-access-6n5xm\") pod \"openstack-cell1-galera-0\" (UID: \"70402eec-968d-4ceb-b259-5e2508ee21a0\") " pod="openstack/openstack-cell1-galera-0" Dec 11 08:35:25 crc kubenswrapper[4881]: I1211 08:35:25.693426 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"openstack-cell1-galera-0\" (UID: \"70402eec-968d-4ceb-b259-5e2508ee21a0\") " pod="openstack/openstack-cell1-galera-0" Dec 11 08:35:25 crc kubenswrapper[4881]: I1211 08:35:25.775844 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/memcached-0"] Dec 11 08:35:25 crc kubenswrapper[4881]: I1211 08:35:25.776997 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Dec 11 08:35:25 crc kubenswrapper[4881]: I1211 08:35:25.781393 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"memcached-memcached-dockercfg-b5qh4" Dec 11 08:35:25 crc kubenswrapper[4881]: I1211 08:35:25.781563 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"memcached-config-data" Dec 11 08:35:25 crc kubenswrapper[4881]: I1211 08:35:25.781575 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-memcached-svc" Dec 11 08:35:25 crc kubenswrapper[4881]: I1211 08:35:25.784115 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/memcached-0"] Dec 11 08:35:25 crc kubenswrapper[4881]: I1211 08:35:25.793796 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Dec 11 08:35:25 crc kubenswrapper[4881]: I1211 08:35:25.849823 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/06499ad1-4a0e-46e2-b0fa-7583b8958148-kolla-config\") pod \"memcached-0\" (UID: \"06499ad1-4a0e-46e2-b0fa-7583b8958148\") " pod="openstack/memcached-0" Dec 11 08:35:25 crc kubenswrapper[4881]: I1211 08:35:25.849873 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/06499ad1-4a0e-46e2-b0fa-7583b8958148-memcached-tls-certs\") pod \"memcached-0\" (UID: \"06499ad1-4a0e-46e2-b0fa-7583b8958148\") " pod="openstack/memcached-0" Dec 11 08:35:25 crc kubenswrapper[4881]: I1211 08:35:25.849949 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/06499ad1-4a0e-46e2-b0fa-7583b8958148-combined-ca-bundle\") pod \"memcached-0\" (UID: \"06499ad1-4a0e-46e2-b0fa-7583b8958148\") " pod="openstack/memcached-0" Dec 11 08:35:25 crc kubenswrapper[4881]: I1211 08:35:25.850061 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/06499ad1-4a0e-46e2-b0fa-7583b8958148-config-data\") pod \"memcached-0\" (UID: \"06499ad1-4a0e-46e2-b0fa-7583b8958148\") " pod="openstack/memcached-0" Dec 11 08:35:25 crc kubenswrapper[4881]: I1211 08:35:25.850219 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ffp6b\" (UniqueName: \"kubernetes.io/projected/06499ad1-4a0e-46e2-b0fa-7583b8958148-kube-api-access-ffp6b\") pod \"memcached-0\" (UID: \"06499ad1-4a0e-46e2-b0fa-7583b8958148\") " pod="openstack/memcached-0" Dec 11 08:35:25 crc kubenswrapper[4881]: I1211 08:35:25.961873 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/06499ad1-4a0e-46e2-b0fa-7583b8958148-kolla-config\") pod \"memcached-0\" (UID: \"06499ad1-4a0e-46e2-b0fa-7583b8958148\") " pod="openstack/memcached-0" Dec 11 08:35:25 crc kubenswrapper[4881]: I1211 08:35:25.961934 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/06499ad1-4a0e-46e2-b0fa-7583b8958148-memcached-tls-certs\") pod \"memcached-0\" (UID: \"06499ad1-4a0e-46e2-b0fa-7583b8958148\") " pod="openstack/memcached-0" Dec 11 08:35:25 crc kubenswrapper[4881]: I1211 08:35:25.962036 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/06499ad1-4a0e-46e2-b0fa-7583b8958148-combined-ca-bundle\") pod \"memcached-0\" (UID: \"06499ad1-4a0e-46e2-b0fa-7583b8958148\") " pod="openstack/memcached-0" Dec 11 08:35:25 crc kubenswrapper[4881]: I1211 08:35:25.962088 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/06499ad1-4a0e-46e2-b0fa-7583b8958148-config-data\") pod \"memcached-0\" (UID: \"06499ad1-4a0e-46e2-b0fa-7583b8958148\") " pod="openstack/memcached-0" Dec 11 08:35:25 crc kubenswrapper[4881]: I1211 08:35:25.962150 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ffp6b\" (UniqueName: \"kubernetes.io/projected/06499ad1-4a0e-46e2-b0fa-7583b8958148-kube-api-access-ffp6b\") pod \"memcached-0\" (UID: \"06499ad1-4a0e-46e2-b0fa-7583b8958148\") " pod="openstack/memcached-0" Dec 11 08:35:25 crc kubenswrapper[4881]: I1211 08:35:25.963519 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/06499ad1-4a0e-46e2-b0fa-7583b8958148-kolla-config\") pod \"memcached-0\" (UID: \"06499ad1-4a0e-46e2-b0fa-7583b8958148\") " pod="openstack/memcached-0" Dec 11 08:35:25 crc kubenswrapper[4881]: I1211 08:35:25.967044 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/06499ad1-4a0e-46e2-b0fa-7583b8958148-combined-ca-bundle\") pod \"memcached-0\" (UID: \"06499ad1-4a0e-46e2-b0fa-7583b8958148\") " pod="openstack/memcached-0" Dec 11 08:35:25 crc kubenswrapper[4881]: I1211 08:35:25.967144 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/06499ad1-4a0e-46e2-b0fa-7583b8958148-config-data\") pod \"memcached-0\" (UID: \"06499ad1-4a0e-46e2-b0fa-7583b8958148\") " pod="openstack/memcached-0" Dec 11 08:35:25 crc kubenswrapper[4881]: I1211 08:35:25.975913 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/06499ad1-4a0e-46e2-b0fa-7583b8958148-memcached-tls-certs\") pod \"memcached-0\" (UID: \"06499ad1-4a0e-46e2-b0fa-7583b8958148\") " pod="openstack/memcached-0" Dec 11 08:35:25 crc kubenswrapper[4881]: I1211 08:35:25.997960 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ffp6b\" (UniqueName: \"kubernetes.io/projected/06499ad1-4a0e-46e2-b0fa-7583b8958148-kube-api-access-ffp6b\") pod \"memcached-0\" (UID: \"06499ad1-4a0e-46e2-b0fa-7583b8958148\") " pod="openstack/memcached-0" Dec 11 08:35:26 crc kubenswrapper[4881]: I1211 08:35:26.105972 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Dec 11 08:35:26 crc kubenswrapper[4881]: I1211 08:35:26.371834 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"7a825abb-23ec-4f51-940d-2500da233e14","Type":"ContainerStarted","Data":"c16b4e5af4a572370ea491c669e77ce9aab05087ce1af6275d24feee134fe3ea"} Dec 11 08:35:26 crc kubenswrapper[4881]: I1211 08:35:26.732784 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Dec 11 08:35:26 crc kubenswrapper[4881]: I1211 08:35:26.853989 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/memcached-0"] Dec 11 08:35:27 crc kubenswrapper[4881]: I1211 08:35:27.445103 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"06499ad1-4a0e-46e2-b0fa-7583b8958148","Type":"ContainerStarted","Data":"65097c84ec5b444c5c7f33cac424ced01b13b213c06dd3a5f42502ced427cad5"} Dec 11 08:35:27 crc kubenswrapper[4881]: I1211 08:35:27.446629 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"70402eec-968d-4ceb-b259-5e2508ee21a0","Type":"ContainerStarted","Data":"5e4e2eda9e468f6313ee2ca95263821a4c3ac2ac7cedd66fcbe44c73ebbafdf9"} Dec 11 08:35:27 crc kubenswrapper[4881]: I1211 08:35:27.929055 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/kube-state-metrics-0"] Dec 11 08:35:27 crc kubenswrapper[4881]: I1211 08:35:27.931725 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Dec 11 08:35:27 crc kubenswrapper[4881]: I1211 08:35:27.942847 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Dec 11 08:35:27 crc kubenswrapper[4881]: I1211 08:35:27.965799 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"telemetry-ceilometer-dockercfg-ng9b8" Dec 11 08:35:28 crc kubenswrapper[4881]: I1211 08:35:28.029592 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h8f96\" (UniqueName: \"kubernetes.io/projected/558f097f-277d-4824-bafc-28c4c0f139f3-kube-api-access-h8f96\") pod \"kube-state-metrics-0\" (UID: \"558f097f-277d-4824-bafc-28c4c0f139f3\") " pod="openstack/kube-state-metrics-0" Dec 11 08:35:28 crc kubenswrapper[4881]: I1211 08:35:28.133774 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h8f96\" (UniqueName: \"kubernetes.io/projected/558f097f-277d-4824-bafc-28c4c0f139f3-kube-api-access-h8f96\") pod \"kube-state-metrics-0\" (UID: \"558f097f-277d-4824-bafc-28c4c0f139f3\") " pod="openstack/kube-state-metrics-0" Dec 11 08:35:28 crc kubenswrapper[4881]: I1211 08:35:28.168473 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h8f96\" (UniqueName: \"kubernetes.io/projected/558f097f-277d-4824-bafc-28c4c0f139f3-kube-api-access-h8f96\") pod \"kube-state-metrics-0\" (UID: \"558f097f-277d-4824-bafc-28c4c0f139f3\") " pod="openstack/kube-state-metrics-0" Dec 11 08:35:28 crc kubenswrapper[4881]: I1211 08:35:28.309892 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Dec 11 08:35:28 crc kubenswrapper[4881]: I1211 08:35:28.873503 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/observability-ui-dashboards-7d5fb4cbfb-qvsbb"] Dec 11 08:35:28 crc kubenswrapper[4881]: I1211 08:35:28.879060 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-ui-dashboards-7d5fb4cbfb-qvsbb" Dec 11 08:35:28 crc kubenswrapper[4881]: I1211 08:35:28.883931 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"observability-ui-dashboards-sa-dockercfg-l4lnz" Dec 11 08:35:28 crc kubenswrapper[4881]: I1211 08:35:28.884115 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"observability-ui-dashboards" Dec 11 08:35:28 crc kubenswrapper[4881]: I1211 08:35:28.901410 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/observability-ui-dashboards-7d5fb4cbfb-qvsbb"] Dec 11 08:35:28 crc kubenswrapper[4881]: I1211 08:35:28.949614 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3c5fa886-8b43-4ef2-9f4b-d4724c4efa56-serving-cert\") pod \"observability-ui-dashboards-7d5fb4cbfb-qvsbb\" (UID: \"3c5fa886-8b43-4ef2-9f4b-d4724c4efa56\") " pod="openshift-operators/observability-ui-dashboards-7d5fb4cbfb-qvsbb" Dec 11 08:35:28 crc kubenswrapper[4881]: I1211 08:35:28.949740 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8nssf\" (UniqueName: \"kubernetes.io/projected/3c5fa886-8b43-4ef2-9f4b-d4724c4efa56-kube-api-access-8nssf\") pod \"observability-ui-dashboards-7d5fb4cbfb-qvsbb\" (UID: \"3c5fa886-8b43-4ef2-9f4b-d4724c4efa56\") " pod="openshift-operators/observability-ui-dashboards-7d5fb4cbfb-qvsbb" Dec 11 08:35:29 crc kubenswrapper[4881]: I1211 08:35:29.057663 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3c5fa886-8b43-4ef2-9f4b-d4724c4efa56-serving-cert\") pod \"observability-ui-dashboards-7d5fb4cbfb-qvsbb\" (UID: \"3c5fa886-8b43-4ef2-9f4b-d4724c4efa56\") " pod="openshift-operators/observability-ui-dashboards-7d5fb4cbfb-qvsbb" Dec 11 08:35:29 crc kubenswrapper[4881]: I1211 08:35:29.058974 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8nssf\" (UniqueName: \"kubernetes.io/projected/3c5fa886-8b43-4ef2-9f4b-d4724c4efa56-kube-api-access-8nssf\") pod \"observability-ui-dashboards-7d5fb4cbfb-qvsbb\" (UID: \"3c5fa886-8b43-4ef2-9f4b-d4724c4efa56\") " pod="openshift-operators/observability-ui-dashboards-7d5fb4cbfb-qvsbb" Dec 11 08:35:29 crc kubenswrapper[4881]: E1211 08:35:29.058814 4881 secret.go:188] Couldn't get secret openshift-operators/observability-ui-dashboards: secret "observability-ui-dashboards" not found Dec 11 08:35:29 crc kubenswrapper[4881]: E1211 08:35:29.060281 4881 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/3c5fa886-8b43-4ef2-9f4b-d4724c4efa56-serving-cert podName:3c5fa886-8b43-4ef2-9f4b-d4724c4efa56 nodeName:}" failed. No retries permitted until 2025-12-11 08:35:29.560239631 +0000 UTC m=+1177.937608328 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/3c5fa886-8b43-4ef2-9f4b-d4724c4efa56-serving-cert") pod "observability-ui-dashboards-7d5fb4cbfb-qvsbb" (UID: "3c5fa886-8b43-4ef2-9f4b-d4724c4efa56") : secret "observability-ui-dashboards" not found Dec 11 08:35:29 crc kubenswrapper[4881]: I1211 08:35:29.139636 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8nssf\" (UniqueName: \"kubernetes.io/projected/3c5fa886-8b43-4ef2-9f4b-d4724c4efa56-kube-api-access-8nssf\") pod \"observability-ui-dashboards-7d5fb4cbfb-qvsbb\" (UID: \"3c5fa886-8b43-4ef2-9f4b-d4724c4efa56\") " pod="openshift-operators/observability-ui-dashboards-7d5fb4cbfb-qvsbb" Dec 11 08:35:29 crc kubenswrapper[4881]: I1211 08:35:29.427634 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/prometheus-metric-storage-0"] Dec 11 08:35:29 crc kubenswrapper[4881]: I1211 08:35:29.430121 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Dec 11 08:35:29 crc kubenswrapper[4881]: I1211 08:35:29.445444 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage" Dec 11 08:35:29 crc kubenswrapper[4881]: I1211 08:35:29.445751 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"metric-storage-prometheus-dockercfg-nb9sv" Dec 11 08:35:29 crc kubenswrapper[4881]: I1211 08:35:29.452579 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"prometheus-metric-storage-rulefiles-0" Dec 11 08:35:29 crc kubenswrapper[4881]: I1211 08:35:29.452734 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-thanos-prometheus-http-client-file" Dec 11 08:35:29 crc kubenswrapper[4881]: I1211 08:35:29.453053 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-web-config" Dec 11 08:35:29 crc kubenswrapper[4881]: I1211 08:35:29.461202 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-tls-assets-0" Dec 11 08:35:29 crc kubenswrapper[4881]: I1211 08:35:29.477512 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/prometheus-metric-storage-0"] Dec 11 08:35:29 crc kubenswrapper[4881]: I1211 08:35:29.527000 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-5669cc64cd-8h7sh"] Dec 11 08:35:29 crc kubenswrapper[4881]: I1211 08:35:29.541251 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-5669cc64cd-8h7sh" Dec 11 08:35:29 crc kubenswrapper[4881]: I1211 08:35:29.608556 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/1206112a-8438-4dcf-9cad-a3e38790a344-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"1206112a-8438-4dcf-9cad-a3e38790a344\") " pod="openstack/prometheus-metric-storage-0" Dec 11 08:35:29 crc kubenswrapper[4881]: I1211 08:35:29.608596 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/1206112a-8438-4dcf-9cad-a3e38790a344-config\") pod \"prometheus-metric-storage-0\" (UID: \"1206112a-8438-4dcf-9cad-a3e38790a344\") " pod="openstack/prometheus-metric-storage-0" Dec 11 08:35:29 crc kubenswrapper[4881]: I1211 08:35:29.608617 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/1206112a-8438-4dcf-9cad-a3e38790a344-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"1206112a-8438-4dcf-9cad-a3e38790a344\") " pod="openstack/prometheus-metric-storage-0" Dec 11 08:35:29 crc kubenswrapper[4881]: I1211 08:35:29.608715 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/1206112a-8438-4dcf-9cad-a3e38790a344-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"1206112a-8438-4dcf-9cad-a3e38790a344\") " pod="openstack/prometheus-metric-storage-0" Dec 11 08:35:29 crc kubenswrapper[4881]: I1211 08:35:29.608874 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/1206112a-8438-4dcf-9cad-a3e38790a344-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"1206112a-8438-4dcf-9cad-a3e38790a344\") " pod="openstack/prometheus-metric-storage-0" Dec 11 08:35:29 crc kubenswrapper[4881]: I1211 08:35:29.608916 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/1206112a-8438-4dcf-9cad-a3e38790a344-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"1206112a-8438-4dcf-9cad-a3e38790a344\") " pod="openstack/prometheus-metric-storage-0" Dec 11 08:35:29 crc kubenswrapper[4881]: I1211 08:35:29.608954 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rmn5j\" (UniqueName: \"kubernetes.io/projected/1206112a-8438-4dcf-9cad-a3e38790a344-kube-api-access-rmn5j\") pod \"prometheus-metric-storage-0\" (UID: \"1206112a-8438-4dcf-9cad-a3e38790a344\") " pod="openstack/prometheus-metric-storage-0" Dec 11 08:35:29 crc kubenswrapper[4881]: I1211 08:35:29.608992 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3c5fa886-8b43-4ef2-9f4b-d4724c4efa56-serving-cert\") pod \"observability-ui-dashboards-7d5fb4cbfb-qvsbb\" (UID: \"3c5fa886-8b43-4ef2-9f4b-d4724c4efa56\") " pod="openshift-operators/observability-ui-dashboards-7d5fb4cbfb-qvsbb" Dec 11 08:35:29 crc kubenswrapper[4881]: I1211 08:35:29.609118 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"prometheus-metric-storage-0\" (UID: \"1206112a-8438-4dcf-9cad-a3e38790a344\") " pod="openstack/prometheus-metric-storage-0" Dec 11 08:35:29 crc kubenswrapper[4881]: I1211 08:35:29.631154 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3c5fa886-8b43-4ef2-9f4b-d4724c4efa56-serving-cert\") pod \"observability-ui-dashboards-7d5fb4cbfb-qvsbb\" (UID: \"3c5fa886-8b43-4ef2-9f4b-d4724c4efa56\") " pod="openshift-operators/observability-ui-dashboards-7d5fb4cbfb-qvsbb" Dec 11 08:35:29 crc kubenswrapper[4881]: I1211 08:35:29.661745 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-5669cc64cd-8h7sh"] Dec 11 08:35:29 crc kubenswrapper[4881]: I1211 08:35:29.711501 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sg7nb\" (UniqueName: \"kubernetes.io/projected/07642cdb-f925-469e-bb1c-7a06fb4e316e-kube-api-access-sg7nb\") pod \"console-5669cc64cd-8h7sh\" (UID: \"07642cdb-f925-469e-bb1c-7a06fb4e316e\") " pod="openshift-console/console-5669cc64cd-8h7sh" Dec 11 08:35:29 crc kubenswrapper[4881]: I1211 08:35:29.711646 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/1206112a-8438-4dcf-9cad-a3e38790a344-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"1206112a-8438-4dcf-9cad-a3e38790a344\") " pod="openstack/prometheus-metric-storage-0" Dec 11 08:35:29 crc kubenswrapper[4881]: I1211 08:35:29.711667 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/1206112a-8438-4dcf-9cad-a3e38790a344-config\") pod \"prometheus-metric-storage-0\" (UID: \"1206112a-8438-4dcf-9cad-a3e38790a344\") " pod="openstack/prometheus-metric-storage-0" Dec 11 08:35:29 crc kubenswrapper[4881]: I1211 08:35:29.711684 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/1206112a-8438-4dcf-9cad-a3e38790a344-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"1206112a-8438-4dcf-9cad-a3e38790a344\") " pod="openstack/prometheus-metric-storage-0" Dec 11 08:35:29 crc kubenswrapper[4881]: I1211 08:35:29.711767 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/07642cdb-f925-469e-bb1c-7a06fb4e316e-console-config\") pod \"console-5669cc64cd-8h7sh\" (UID: \"07642cdb-f925-469e-bb1c-7a06fb4e316e\") " pod="openshift-console/console-5669cc64cd-8h7sh" Dec 11 08:35:29 crc kubenswrapper[4881]: I1211 08:35:29.711788 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/1206112a-8438-4dcf-9cad-a3e38790a344-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"1206112a-8438-4dcf-9cad-a3e38790a344\") " pod="openstack/prometheus-metric-storage-0" Dec 11 08:35:29 crc kubenswrapper[4881]: I1211 08:35:29.711856 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/1206112a-8438-4dcf-9cad-a3e38790a344-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"1206112a-8438-4dcf-9cad-a3e38790a344\") " pod="openstack/prometheus-metric-storage-0" Dec 11 08:35:29 crc kubenswrapper[4881]: I1211 08:35:29.711873 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/1206112a-8438-4dcf-9cad-a3e38790a344-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"1206112a-8438-4dcf-9cad-a3e38790a344\") " pod="openstack/prometheus-metric-storage-0" Dec 11 08:35:29 crc kubenswrapper[4881]: I1211 08:35:29.711892 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rmn5j\" (UniqueName: \"kubernetes.io/projected/1206112a-8438-4dcf-9cad-a3e38790a344-kube-api-access-rmn5j\") pod \"prometheus-metric-storage-0\" (UID: \"1206112a-8438-4dcf-9cad-a3e38790a344\") " pod="openstack/prometheus-metric-storage-0" Dec 11 08:35:29 crc kubenswrapper[4881]: I1211 08:35:29.711928 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/07642cdb-f925-469e-bb1c-7a06fb4e316e-oauth-serving-cert\") pod \"console-5669cc64cd-8h7sh\" (UID: \"07642cdb-f925-469e-bb1c-7a06fb4e316e\") " pod="openshift-console/console-5669cc64cd-8h7sh" Dec 11 08:35:29 crc kubenswrapper[4881]: I1211 08:35:29.711974 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"prometheus-metric-storage-0\" (UID: \"1206112a-8438-4dcf-9cad-a3e38790a344\") " pod="openstack/prometheus-metric-storage-0" Dec 11 08:35:29 crc kubenswrapper[4881]: I1211 08:35:29.711989 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/07642cdb-f925-469e-bb1c-7a06fb4e316e-console-oauth-config\") pod \"console-5669cc64cd-8h7sh\" (UID: \"07642cdb-f925-469e-bb1c-7a06fb4e316e\") " pod="openshift-console/console-5669cc64cd-8h7sh" Dec 11 08:35:29 crc kubenswrapper[4881]: I1211 08:35:29.712013 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/07642cdb-f925-469e-bb1c-7a06fb4e316e-trusted-ca-bundle\") pod \"console-5669cc64cd-8h7sh\" (UID: \"07642cdb-f925-469e-bb1c-7a06fb4e316e\") " pod="openshift-console/console-5669cc64cd-8h7sh" Dec 11 08:35:29 crc kubenswrapper[4881]: I1211 08:35:29.712059 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/07642cdb-f925-469e-bb1c-7a06fb4e316e-service-ca\") pod \"console-5669cc64cd-8h7sh\" (UID: \"07642cdb-f925-469e-bb1c-7a06fb4e316e\") " pod="openshift-console/console-5669cc64cd-8h7sh" Dec 11 08:35:29 crc kubenswrapper[4881]: I1211 08:35:29.712080 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/07642cdb-f925-469e-bb1c-7a06fb4e316e-console-serving-cert\") pod \"console-5669cc64cd-8h7sh\" (UID: \"07642cdb-f925-469e-bb1c-7a06fb4e316e\") " pod="openshift-console/console-5669cc64cd-8h7sh" Dec 11 08:35:29 crc kubenswrapper[4881]: I1211 08:35:29.713727 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/1206112a-8438-4dcf-9cad-a3e38790a344-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"1206112a-8438-4dcf-9cad-a3e38790a344\") " pod="openstack/prometheus-metric-storage-0" Dec 11 08:35:29 crc kubenswrapper[4881]: I1211 08:35:29.715068 4881 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"prometheus-metric-storage-0\" (UID: \"1206112a-8438-4dcf-9cad-a3e38790a344\") device mount path \"/mnt/openstack/pv08\"" pod="openstack/prometheus-metric-storage-0" Dec 11 08:35:29 crc kubenswrapper[4881]: I1211 08:35:29.716981 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/1206112a-8438-4dcf-9cad-a3e38790a344-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"1206112a-8438-4dcf-9cad-a3e38790a344\") " pod="openstack/prometheus-metric-storage-0" Dec 11 08:35:29 crc kubenswrapper[4881]: I1211 08:35:29.721624 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/1206112a-8438-4dcf-9cad-a3e38790a344-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"1206112a-8438-4dcf-9cad-a3e38790a344\") " pod="openstack/prometheus-metric-storage-0" Dec 11 08:35:29 crc kubenswrapper[4881]: I1211 08:35:29.730797 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/1206112a-8438-4dcf-9cad-a3e38790a344-config\") pod \"prometheus-metric-storage-0\" (UID: \"1206112a-8438-4dcf-9cad-a3e38790a344\") " pod="openstack/prometheus-metric-storage-0" Dec 11 08:35:29 crc kubenswrapper[4881]: I1211 08:35:29.735229 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/1206112a-8438-4dcf-9cad-a3e38790a344-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"1206112a-8438-4dcf-9cad-a3e38790a344\") " pod="openstack/prometheus-metric-storage-0" Dec 11 08:35:29 crc kubenswrapper[4881]: I1211 08:35:29.737368 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/1206112a-8438-4dcf-9cad-a3e38790a344-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"1206112a-8438-4dcf-9cad-a3e38790a344\") " pod="openstack/prometheus-metric-storage-0" Dec 11 08:35:29 crc kubenswrapper[4881]: I1211 08:35:29.741516 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rmn5j\" (UniqueName: \"kubernetes.io/projected/1206112a-8438-4dcf-9cad-a3e38790a344-kube-api-access-rmn5j\") pod \"prometheus-metric-storage-0\" (UID: \"1206112a-8438-4dcf-9cad-a3e38790a344\") " pod="openstack/prometheus-metric-storage-0" Dec 11 08:35:29 crc kubenswrapper[4881]: I1211 08:35:29.781014 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"prometheus-metric-storage-0\" (UID: \"1206112a-8438-4dcf-9cad-a3e38790a344\") " pod="openstack/prometheus-metric-storage-0" Dec 11 08:35:29 crc kubenswrapper[4881]: I1211 08:35:29.813662 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sg7nb\" (UniqueName: \"kubernetes.io/projected/07642cdb-f925-469e-bb1c-7a06fb4e316e-kube-api-access-sg7nb\") pod \"console-5669cc64cd-8h7sh\" (UID: \"07642cdb-f925-469e-bb1c-7a06fb4e316e\") " pod="openshift-console/console-5669cc64cd-8h7sh" Dec 11 08:35:29 crc kubenswrapper[4881]: I1211 08:35:29.813768 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/07642cdb-f925-469e-bb1c-7a06fb4e316e-console-config\") pod \"console-5669cc64cd-8h7sh\" (UID: \"07642cdb-f925-469e-bb1c-7a06fb4e316e\") " pod="openshift-console/console-5669cc64cd-8h7sh" Dec 11 08:35:29 crc kubenswrapper[4881]: I1211 08:35:29.813829 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/07642cdb-f925-469e-bb1c-7a06fb4e316e-oauth-serving-cert\") pod \"console-5669cc64cd-8h7sh\" (UID: \"07642cdb-f925-469e-bb1c-7a06fb4e316e\") " pod="openshift-console/console-5669cc64cd-8h7sh" Dec 11 08:35:29 crc kubenswrapper[4881]: I1211 08:35:29.813869 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/07642cdb-f925-469e-bb1c-7a06fb4e316e-console-oauth-config\") pod \"console-5669cc64cd-8h7sh\" (UID: \"07642cdb-f925-469e-bb1c-7a06fb4e316e\") " pod="openshift-console/console-5669cc64cd-8h7sh" Dec 11 08:35:29 crc kubenswrapper[4881]: I1211 08:35:29.813891 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/07642cdb-f925-469e-bb1c-7a06fb4e316e-trusted-ca-bundle\") pod \"console-5669cc64cd-8h7sh\" (UID: \"07642cdb-f925-469e-bb1c-7a06fb4e316e\") " pod="openshift-console/console-5669cc64cd-8h7sh" Dec 11 08:35:29 crc kubenswrapper[4881]: I1211 08:35:29.813935 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/07642cdb-f925-469e-bb1c-7a06fb4e316e-service-ca\") pod \"console-5669cc64cd-8h7sh\" (UID: \"07642cdb-f925-469e-bb1c-7a06fb4e316e\") " pod="openshift-console/console-5669cc64cd-8h7sh" Dec 11 08:35:29 crc kubenswrapper[4881]: I1211 08:35:29.813957 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/07642cdb-f925-469e-bb1c-7a06fb4e316e-console-serving-cert\") pod \"console-5669cc64cd-8h7sh\" (UID: \"07642cdb-f925-469e-bb1c-7a06fb4e316e\") " pod="openshift-console/console-5669cc64cd-8h7sh" Dec 11 08:35:29 crc kubenswrapper[4881]: I1211 08:35:29.815957 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/07642cdb-f925-469e-bb1c-7a06fb4e316e-service-ca\") pod \"console-5669cc64cd-8h7sh\" (UID: \"07642cdb-f925-469e-bb1c-7a06fb4e316e\") " pod="openshift-console/console-5669cc64cd-8h7sh" Dec 11 08:35:29 crc kubenswrapper[4881]: I1211 08:35:29.816387 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/07642cdb-f925-469e-bb1c-7a06fb4e316e-console-config\") pod \"console-5669cc64cd-8h7sh\" (UID: \"07642cdb-f925-469e-bb1c-7a06fb4e316e\") " pod="openshift-console/console-5669cc64cd-8h7sh" Dec 11 08:35:29 crc kubenswrapper[4881]: I1211 08:35:29.817456 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/07642cdb-f925-469e-bb1c-7a06fb4e316e-trusted-ca-bundle\") pod \"console-5669cc64cd-8h7sh\" (UID: \"07642cdb-f925-469e-bb1c-7a06fb4e316e\") " pod="openshift-console/console-5669cc64cd-8h7sh" Dec 11 08:35:29 crc kubenswrapper[4881]: I1211 08:35:29.817940 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/07642cdb-f925-469e-bb1c-7a06fb4e316e-console-serving-cert\") pod \"console-5669cc64cd-8h7sh\" (UID: \"07642cdb-f925-469e-bb1c-7a06fb4e316e\") " pod="openshift-console/console-5669cc64cd-8h7sh" Dec 11 08:35:29 crc kubenswrapper[4881]: I1211 08:35:29.818271 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/07642cdb-f925-469e-bb1c-7a06fb4e316e-oauth-serving-cert\") pod \"console-5669cc64cd-8h7sh\" (UID: \"07642cdb-f925-469e-bb1c-7a06fb4e316e\") " pod="openshift-console/console-5669cc64cd-8h7sh" Dec 11 08:35:29 crc kubenswrapper[4881]: I1211 08:35:29.828307 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-ui-dashboards-7d5fb4cbfb-qvsbb" Dec 11 08:35:29 crc kubenswrapper[4881]: I1211 08:35:29.829245 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/07642cdb-f925-469e-bb1c-7a06fb4e316e-console-oauth-config\") pod \"console-5669cc64cd-8h7sh\" (UID: \"07642cdb-f925-469e-bb1c-7a06fb4e316e\") " pod="openshift-console/console-5669cc64cd-8h7sh" Dec 11 08:35:29 crc kubenswrapper[4881]: I1211 08:35:29.835649 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sg7nb\" (UniqueName: \"kubernetes.io/projected/07642cdb-f925-469e-bb1c-7a06fb4e316e-kube-api-access-sg7nb\") pod \"console-5669cc64cd-8h7sh\" (UID: \"07642cdb-f925-469e-bb1c-7a06fb4e316e\") " pod="openshift-console/console-5669cc64cd-8h7sh" Dec 11 08:35:29 crc kubenswrapper[4881]: I1211 08:35:29.976124 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-5669cc64cd-8h7sh" Dec 11 08:35:30 crc kubenswrapper[4881]: I1211 08:35:30.085098 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Dec 11 08:35:31 crc kubenswrapper[4881]: I1211 08:35:31.257957 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-nb-0"] Dec 11 08:35:31 crc kubenswrapper[4881]: I1211 08:35:31.263170 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Dec 11 08:35:31 crc kubenswrapper[4881]: I1211 08:35:31.266284 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-config" Dec 11 08:35:31 crc kubenswrapper[4881]: I1211 08:35:31.266327 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-scripts" Dec 11 08:35:31 crc kubenswrapper[4881]: I1211 08:35:31.266571 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovndbcluster-nb-ovndbs" Dec 11 08:35:31 crc kubenswrapper[4881]: I1211 08:35:31.266988 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-nb-dockercfg-db5zr" Dec 11 08:35:31 crc kubenswrapper[4881]: I1211 08:35:31.268484 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovn-metrics" Dec 11 08:35:31 crc kubenswrapper[4881]: I1211 08:35:31.289982 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-0"] Dec 11 08:35:31 crc kubenswrapper[4881]: I1211 08:35:31.348850 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/7f0aa090-3aac-4da8-9efa-a31a7b3b130f-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"7f0aa090-3aac-4da8-9efa-a31a7b3b130f\") " pod="openstack/ovsdbserver-nb-0" Dec 11 08:35:31 crc kubenswrapper[4881]: I1211 08:35:31.348991 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7f0aa090-3aac-4da8-9efa-a31a7b3b130f-config\") pod \"ovsdbserver-nb-0\" (UID: \"7f0aa090-3aac-4da8-9efa-a31a7b3b130f\") " pod="openstack/ovsdbserver-nb-0" Dec 11 08:35:31 crc kubenswrapper[4881]: I1211 08:35:31.349060 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7f0aa090-3aac-4da8-9efa-a31a7b3b130f-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"7f0aa090-3aac-4da8-9efa-a31a7b3b130f\") " pod="openstack/ovsdbserver-nb-0" Dec 11 08:35:31 crc kubenswrapper[4881]: I1211 08:35:31.349108 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/7f0aa090-3aac-4da8-9efa-a31a7b3b130f-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"7f0aa090-3aac-4da8-9efa-a31a7b3b130f\") " pod="openstack/ovsdbserver-nb-0" Dec 11 08:35:31 crc kubenswrapper[4881]: I1211 08:35:31.349141 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6l5f6\" (UniqueName: \"kubernetes.io/projected/7f0aa090-3aac-4da8-9efa-a31a7b3b130f-kube-api-access-6l5f6\") pod \"ovsdbserver-nb-0\" (UID: \"7f0aa090-3aac-4da8-9efa-a31a7b3b130f\") " pod="openstack/ovsdbserver-nb-0" Dec 11 08:35:31 crc kubenswrapper[4881]: I1211 08:35:31.350653 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"ovsdbserver-nb-0\" (UID: \"7f0aa090-3aac-4da8-9efa-a31a7b3b130f\") " pod="openstack/ovsdbserver-nb-0" Dec 11 08:35:31 crc kubenswrapper[4881]: I1211 08:35:31.350707 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/7f0aa090-3aac-4da8-9efa-a31a7b3b130f-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"7f0aa090-3aac-4da8-9efa-a31a7b3b130f\") " pod="openstack/ovsdbserver-nb-0" Dec 11 08:35:31 crc kubenswrapper[4881]: I1211 08:35:31.350747 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/7f0aa090-3aac-4da8-9efa-a31a7b3b130f-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"7f0aa090-3aac-4da8-9efa-a31a7b3b130f\") " pod="openstack/ovsdbserver-nb-0" Dec 11 08:35:31 crc kubenswrapper[4881]: I1211 08:35:31.452565 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/7f0aa090-3aac-4da8-9efa-a31a7b3b130f-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"7f0aa090-3aac-4da8-9efa-a31a7b3b130f\") " pod="openstack/ovsdbserver-nb-0" Dec 11 08:35:31 crc kubenswrapper[4881]: I1211 08:35:31.452699 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7f0aa090-3aac-4da8-9efa-a31a7b3b130f-config\") pod \"ovsdbserver-nb-0\" (UID: \"7f0aa090-3aac-4da8-9efa-a31a7b3b130f\") " pod="openstack/ovsdbserver-nb-0" Dec 11 08:35:31 crc kubenswrapper[4881]: I1211 08:35:31.452757 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7f0aa090-3aac-4da8-9efa-a31a7b3b130f-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"7f0aa090-3aac-4da8-9efa-a31a7b3b130f\") " pod="openstack/ovsdbserver-nb-0" Dec 11 08:35:31 crc kubenswrapper[4881]: I1211 08:35:31.452809 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/7f0aa090-3aac-4da8-9efa-a31a7b3b130f-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"7f0aa090-3aac-4da8-9efa-a31a7b3b130f\") " pod="openstack/ovsdbserver-nb-0" Dec 11 08:35:31 crc kubenswrapper[4881]: I1211 08:35:31.452834 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6l5f6\" (UniqueName: \"kubernetes.io/projected/7f0aa090-3aac-4da8-9efa-a31a7b3b130f-kube-api-access-6l5f6\") pod \"ovsdbserver-nb-0\" (UID: \"7f0aa090-3aac-4da8-9efa-a31a7b3b130f\") " pod="openstack/ovsdbserver-nb-0" Dec 11 08:35:31 crc kubenswrapper[4881]: I1211 08:35:31.452891 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"ovsdbserver-nb-0\" (UID: \"7f0aa090-3aac-4da8-9efa-a31a7b3b130f\") " pod="openstack/ovsdbserver-nb-0" Dec 11 08:35:31 crc kubenswrapper[4881]: I1211 08:35:31.452916 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/7f0aa090-3aac-4da8-9efa-a31a7b3b130f-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"7f0aa090-3aac-4da8-9efa-a31a7b3b130f\") " pod="openstack/ovsdbserver-nb-0" Dec 11 08:35:31 crc kubenswrapper[4881]: I1211 08:35:31.452984 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/7f0aa090-3aac-4da8-9efa-a31a7b3b130f-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"7f0aa090-3aac-4da8-9efa-a31a7b3b130f\") " pod="openstack/ovsdbserver-nb-0" Dec 11 08:35:31 crc kubenswrapper[4881]: I1211 08:35:31.453621 4881 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"ovsdbserver-nb-0\" (UID: \"7f0aa090-3aac-4da8-9efa-a31a7b3b130f\") device mount path \"/mnt/openstack/pv10\"" pod="openstack/ovsdbserver-nb-0" Dec 11 08:35:31 crc kubenswrapper[4881]: I1211 08:35:31.453660 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/7f0aa090-3aac-4da8-9efa-a31a7b3b130f-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"7f0aa090-3aac-4da8-9efa-a31a7b3b130f\") " pod="openstack/ovsdbserver-nb-0" Dec 11 08:35:31 crc kubenswrapper[4881]: I1211 08:35:31.454058 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7f0aa090-3aac-4da8-9efa-a31a7b3b130f-config\") pod \"ovsdbserver-nb-0\" (UID: \"7f0aa090-3aac-4da8-9efa-a31a7b3b130f\") " pod="openstack/ovsdbserver-nb-0" Dec 11 08:35:31 crc kubenswrapper[4881]: I1211 08:35:31.454140 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/7f0aa090-3aac-4da8-9efa-a31a7b3b130f-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"7f0aa090-3aac-4da8-9efa-a31a7b3b130f\") " pod="openstack/ovsdbserver-nb-0" Dec 11 08:35:31 crc kubenswrapper[4881]: I1211 08:35:31.460505 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/7f0aa090-3aac-4da8-9efa-a31a7b3b130f-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"7f0aa090-3aac-4da8-9efa-a31a7b3b130f\") " pod="openstack/ovsdbserver-nb-0" Dec 11 08:35:31 crc kubenswrapper[4881]: I1211 08:35:31.460650 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7f0aa090-3aac-4da8-9efa-a31a7b3b130f-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"7f0aa090-3aac-4da8-9efa-a31a7b3b130f\") " pod="openstack/ovsdbserver-nb-0" Dec 11 08:35:31 crc kubenswrapper[4881]: I1211 08:35:31.463240 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/7f0aa090-3aac-4da8-9efa-a31a7b3b130f-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"7f0aa090-3aac-4da8-9efa-a31a7b3b130f\") " pod="openstack/ovsdbserver-nb-0" Dec 11 08:35:31 crc kubenswrapper[4881]: I1211 08:35:31.478569 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6l5f6\" (UniqueName: \"kubernetes.io/projected/7f0aa090-3aac-4da8-9efa-a31a7b3b130f-kube-api-access-6l5f6\") pod \"ovsdbserver-nb-0\" (UID: \"7f0aa090-3aac-4da8-9efa-a31a7b3b130f\") " pod="openstack/ovsdbserver-nb-0" Dec 11 08:35:31 crc kubenswrapper[4881]: I1211 08:35:31.489183 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"ovsdbserver-nb-0\" (UID: \"7f0aa090-3aac-4da8-9efa-a31a7b3b130f\") " pod="openstack/ovsdbserver-nb-0" Dec 11 08:35:31 crc kubenswrapper[4881]: I1211 08:35:31.587889 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Dec 11 08:35:32 crc kubenswrapper[4881]: I1211 08:35:32.824171 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-xfltd"] Dec 11 08:35:32 crc kubenswrapper[4881]: I1211 08:35:32.825502 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-xfltd" Dec 11 08:35:32 crc kubenswrapper[4881]: I1211 08:35:32.830107 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovncontroller-ovndbs" Dec 11 08:35:32 crc kubenswrapper[4881]: I1211 08:35:32.830253 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncontroller-ovncontroller-dockercfg-8p8rp" Dec 11 08:35:32 crc kubenswrapper[4881]: I1211 08:35:32.830404 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-scripts" Dec 11 08:35:32 crc kubenswrapper[4881]: I1211 08:35:32.836827 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-ovs-5gqh8"] Dec 11 08:35:32 crc kubenswrapper[4881]: I1211 08:35:32.839057 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-5gqh8" Dec 11 08:35:32 crc kubenswrapper[4881]: I1211 08:35:32.845799 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-xfltd"] Dec 11 08:35:32 crc kubenswrapper[4881]: I1211 08:35:32.885609 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5gwsw\" (UniqueName: \"kubernetes.io/projected/49d6015f-9f76-4e77-821e-2a11887e497c-kube-api-access-5gwsw\") pod \"ovn-controller-xfltd\" (UID: \"49d6015f-9f76-4e77-821e-2a11887e497c\") " pod="openstack/ovn-controller-xfltd" Dec 11 08:35:32 crc kubenswrapper[4881]: I1211 08:35:32.885737 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/49d6015f-9f76-4e77-821e-2a11887e497c-var-log-ovn\") pod \"ovn-controller-xfltd\" (UID: \"49d6015f-9f76-4e77-821e-2a11887e497c\") " pod="openstack/ovn-controller-xfltd" Dec 11 08:35:32 crc kubenswrapper[4881]: I1211 08:35:32.885766 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/ede1ec9d-4207-4a9c-ba57-3f2037f68632-var-log\") pod \"ovn-controller-ovs-5gqh8\" (UID: \"ede1ec9d-4207-4a9c-ba57-3f2037f68632\") " pod="openstack/ovn-controller-ovs-5gqh8" Dec 11 08:35:32 crc kubenswrapper[4881]: I1211 08:35:32.885796 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/49d6015f-9f76-4e77-821e-2a11887e497c-var-run\") pod \"ovn-controller-xfltd\" (UID: \"49d6015f-9f76-4e77-821e-2a11887e497c\") " pod="openstack/ovn-controller-xfltd" Dec 11 08:35:32 crc kubenswrapper[4881]: I1211 08:35:32.885822 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/ede1ec9d-4207-4a9c-ba57-3f2037f68632-var-run\") pod \"ovn-controller-ovs-5gqh8\" (UID: \"ede1ec9d-4207-4a9c-ba57-3f2037f68632\") " pod="openstack/ovn-controller-ovs-5gqh8" Dec 11 08:35:32 crc kubenswrapper[4881]: I1211 08:35:32.885849 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xz7kg\" (UniqueName: \"kubernetes.io/projected/ede1ec9d-4207-4a9c-ba57-3f2037f68632-kube-api-access-xz7kg\") pod \"ovn-controller-ovs-5gqh8\" (UID: \"ede1ec9d-4207-4a9c-ba57-3f2037f68632\") " pod="openstack/ovn-controller-ovs-5gqh8" Dec 11 08:35:32 crc kubenswrapper[4881]: I1211 08:35:32.885911 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/49d6015f-9f76-4e77-821e-2a11887e497c-scripts\") pod \"ovn-controller-xfltd\" (UID: \"49d6015f-9f76-4e77-821e-2a11887e497c\") " pod="openstack/ovn-controller-xfltd" Dec 11 08:35:32 crc kubenswrapper[4881]: I1211 08:35:32.885928 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/49d6015f-9f76-4e77-821e-2a11887e497c-combined-ca-bundle\") pod \"ovn-controller-xfltd\" (UID: \"49d6015f-9f76-4e77-821e-2a11887e497c\") " pod="openstack/ovn-controller-xfltd" Dec 11 08:35:32 crc kubenswrapper[4881]: I1211 08:35:32.886034 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/49d6015f-9f76-4e77-821e-2a11887e497c-ovn-controller-tls-certs\") pod \"ovn-controller-xfltd\" (UID: \"49d6015f-9f76-4e77-821e-2a11887e497c\") " pod="openstack/ovn-controller-xfltd" Dec 11 08:35:32 crc kubenswrapper[4881]: I1211 08:35:32.886160 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/ede1ec9d-4207-4a9c-ba57-3f2037f68632-var-lib\") pod \"ovn-controller-ovs-5gqh8\" (UID: \"ede1ec9d-4207-4a9c-ba57-3f2037f68632\") " pod="openstack/ovn-controller-ovs-5gqh8" Dec 11 08:35:32 crc kubenswrapper[4881]: I1211 08:35:32.886186 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/ede1ec9d-4207-4a9c-ba57-3f2037f68632-scripts\") pod \"ovn-controller-ovs-5gqh8\" (UID: \"ede1ec9d-4207-4a9c-ba57-3f2037f68632\") " pod="openstack/ovn-controller-ovs-5gqh8" Dec 11 08:35:32 crc kubenswrapper[4881]: I1211 08:35:32.886209 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/49d6015f-9f76-4e77-821e-2a11887e497c-var-run-ovn\") pod \"ovn-controller-xfltd\" (UID: \"49d6015f-9f76-4e77-821e-2a11887e497c\") " pod="openstack/ovn-controller-xfltd" Dec 11 08:35:32 crc kubenswrapper[4881]: I1211 08:35:32.886249 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/ede1ec9d-4207-4a9c-ba57-3f2037f68632-etc-ovs\") pod \"ovn-controller-ovs-5gqh8\" (UID: \"ede1ec9d-4207-4a9c-ba57-3f2037f68632\") " pod="openstack/ovn-controller-ovs-5gqh8" Dec 11 08:35:32 crc kubenswrapper[4881]: I1211 08:35:32.925284 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-ovs-5gqh8"] Dec 11 08:35:32 crc kubenswrapper[4881]: I1211 08:35:32.990542 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/49d6015f-9f76-4e77-821e-2a11887e497c-scripts\") pod \"ovn-controller-xfltd\" (UID: \"49d6015f-9f76-4e77-821e-2a11887e497c\") " pod="openstack/ovn-controller-xfltd" Dec 11 08:35:32 crc kubenswrapper[4881]: I1211 08:35:32.990597 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/49d6015f-9f76-4e77-821e-2a11887e497c-combined-ca-bundle\") pod \"ovn-controller-xfltd\" (UID: \"49d6015f-9f76-4e77-821e-2a11887e497c\") " pod="openstack/ovn-controller-xfltd" Dec 11 08:35:32 crc kubenswrapper[4881]: I1211 08:35:32.990625 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/49d6015f-9f76-4e77-821e-2a11887e497c-ovn-controller-tls-certs\") pod \"ovn-controller-xfltd\" (UID: \"49d6015f-9f76-4e77-821e-2a11887e497c\") " pod="openstack/ovn-controller-xfltd" Dec 11 08:35:32 crc kubenswrapper[4881]: I1211 08:35:32.990677 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/ede1ec9d-4207-4a9c-ba57-3f2037f68632-var-lib\") pod \"ovn-controller-ovs-5gqh8\" (UID: \"ede1ec9d-4207-4a9c-ba57-3f2037f68632\") " pod="openstack/ovn-controller-ovs-5gqh8" Dec 11 08:35:32 crc kubenswrapper[4881]: I1211 08:35:32.990697 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/ede1ec9d-4207-4a9c-ba57-3f2037f68632-scripts\") pod \"ovn-controller-ovs-5gqh8\" (UID: \"ede1ec9d-4207-4a9c-ba57-3f2037f68632\") " pod="openstack/ovn-controller-ovs-5gqh8" Dec 11 08:35:32 crc kubenswrapper[4881]: I1211 08:35:32.990713 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/49d6015f-9f76-4e77-821e-2a11887e497c-var-run-ovn\") pod \"ovn-controller-xfltd\" (UID: \"49d6015f-9f76-4e77-821e-2a11887e497c\") " pod="openstack/ovn-controller-xfltd" Dec 11 08:35:32 crc kubenswrapper[4881]: I1211 08:35:32.990741 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/ede1ec9d-4207-4a9c-ba57-3f2037f68632-etc-ovs\") pod \"ovn-controller-ovs-5gqh8\" (UID: \"ede1ec9d-4207-4a9c-ba57-3f2037f68632\") " pod="openstack/ovn-controller-ovs-5gqh8" Dec 11 08:35:32 crc kubenswrapper[4881]: I1211 08:35:32.990784 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5gwsw\" (UniqueName: \"kubernetes.io/projected/49d6015f-9f76-4e77-821e-2a11887e497c-kube-api-access-5gwsw\") pod \"ovn-controller-xfltd\" (UID: \"49d6015f-9f76-4e77-821e-2a11887e497c\") " pod="openstack/ovn-controller-xfltd" Dec 11 08:35:32 crc kubenswrapper[4881]: I1211 08:35:32.990851 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/ede1ec9d-4207-4a9c-ba57-3f2037f68632-var-log\") pod \"ovn-controller-ovs-5gqh8\" (UID: \"ede1ec9d-4207-4a9c-ba57-3f2037f68632\") " pod="openstack/ovn-controller-ovs-5gqh8" Dec 11 08:35:32 crc kubenswrapper[4881]: I1211 08:35:32.990872 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/49d6015f-9f76-4e77-821e-2a11887e497c-var-log-ovn\") pod \"ovn-controller-xfltd\" (UID: \"49d6015f-9f76-4e77-821e-2a11887e497c\") " pod="openstack/ovn-controller-xfltd" Dec 11 08:35:32 crc kubenswrapper[4881]: I1211 08:35:32.990903 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/49d6015f-9f76-4e77-821e-2a11887e497c-var-run\") pod \"ovn-controller-xfltd\" (UID: \"49d6015f-9f76-4e77-821e-2a11887e497c\") " pod="openstack/ovn-controller-xfltd" Dec 11 08:35:32 crc kubenswrapper[4881]: I1211 08:35:32.990930 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/ede1ec9d-4207-4a9c-ba57-3f2037f68632-var-run\") pod \"ovn-controller-ovs-5gqh8\" (UID: \"ede1ec9d-4207-4a9c-ba57-3f2037f68632\") " pod="openstack/ovn-controller-ovs-5gqh8" Dec 11 08:35:32 crc kubenswrapper[4881]: I1211 08:35:32.990960 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xz7kg\" (UniqueName: \"kubernetes.io/projected/ede1ec9d-4207-4a9c-ba57-3f2037f68632-kube-api-access-xz7kg\") pod \"ovn-controller-ovs-5gqh8\" (UID: \"ede1ec9d-4207-4a9c-ba57-3f2037f68632\") " pod="openstack/ovn-controller-ovs-5gqh8" Dec 11 08:35:32 crc kubenswrapper[4881]: I1211 08:35:32.991358 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/ede1ec9d-4207-4a9c-ba57-3f2037f68632-etc-ovs\") pod \"ovn-controller-ovs-5gqh8\" (UID: \"ede1ec9d-4207-4a9c-ba57-3f2037f68632\") " pod="openstack/ovn-controller-ovs-5gqh8" Dec 11 08:35:32 crc kubenswrapper[4881]: I1211 08:35:32.991480 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/ede1ec9d-4207-4a9c-ba57-3f2037f68632-var-lib\") pod \"ovn-controller-ovs-5gqh8\" (UID: \"ede1ec9d-4207-4a9c-ba57-3f2037f68632\") " pod="openstack/ovn-controller-ovs-5gqh8" Dec 11 08:35:32 crc kubenswrapper[4881]: I1211 08:35:32.991533 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/49d6015f-9f76-4e77-821e-2a11887e497c-var-run\") pod \"ovn-controller-xfltd\" (UID: \"49d6015f-9f76-4e77-821e-2a11887e497c\") " pod="openstack/ovn-controller-xfltd" Dec 11 08:35:32 crc kubenswrapper[4881]: I1211 08:35:32.991543 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/49d6015f-9f76-4e77-821e-2a11887e497c-var-log-ovn\") pod \"ovn-controller-xfltd\" (UID: \"49d6015f-9f76-4e77-821e-2a11887e497c\") " pod="openstack/ovn-controller-xfltd" Dec 11 08:35:32 crc kubenswrapper[4881]: I1211 08:35:32.991360 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/49d6015f-9f76-4e77-821e-2a11887e497c-var-run-ovn\") pod \"ovn-controller-xfltd\" (UID: \"49d6015f-9f76-4e77-821e-2a11887e497c\") " pod="openstack/ovn-controller-xfltd" Dec 11 08:35:32 crc kubenswrapper[4881]: I1211 08:35:32.991573 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/ede1ec9d-4207-4a9c-ba57-3f2037f68632-var-run\") pod \"ovn-controller-ovs-5gqh8\" (UID: \"ede1ec9d-4207-4a9c-ba57-3f2037f68632\") " pod="openstack/ovn-controller-ovs-5gqh8" Dec 11 08:35:32 crc kubenswrapper[4881]: I1211 08:35:32.991632 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/ede1ec9d-4207-4a9c-ba57-3f2037f68632-var-log\") pod \"ovn-controller-ovs-5gqh8\" (UID: \"ede1ec9d-4207-4a9c-ba57-3f2037f68632\") " pod="openstack/ovn-controller-ovs-5gqh8" Dec 11 08:35:33 crc kubenswrapper[4881]: I1211 08:35:33.989993 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/49d6015f-9f76-4e77-821e-2a11887e497c-scripts\") pod \"ovn-controller-xfltd\" (UID: \"49d6015f-9f76-4e77-821e-2a11887e497c\") " pod="openstack/ovn-controller-xfltd" Dec 11 08:35:33 crc kubenswrapper[4881]: I1211 08:35:33.993723 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/49d6015f-9f76-4e77-821e-2a11887e497c-ovn-controller-tls-certs\") pod \"ovn-controller-xfltd\" (UID: \"49d6015f-9f76-4e77-821e-2a11887e497c\") " pod="openstack/ovn-controller-xfltd" Dec 11 08:35:33 crc kubenswrapper[4881]: I1211 08:35:33.995784 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/ede1ec9d-4207-4a9c-ba57-3f2037f68632-scripts\") pod \"ovn-controller-ovs-5gqh8\" (UID: \"ede1ec9d-4207-4a9c-ba57-3f2037f68632\") " pod="openstack/ovn-controller-ovs-5gqh8" Dec 11 08:35:34 crc kubenswrapper[4881]: I1211 08:35:33.999162 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5gwsw\" (UniqueName: \"kubernetes.io/projected/49d6015f-9f76-4e77-821e-2a11887e497c-kube-api-access-5gwsw\") pod \"ovn-controller-xfltd\" (UID: \"49d6015f-9f76-4e77-821e-2a11887e497c\") " pod="openstack/ovn-controller-xfltd" Dec 11 08:35:34 crc kubenswrapper[4881]: I1211 08:35:33.999647 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xz7kg\" (UniqueName: \"kubernetes.io/projected/ede1ec9d-4207-4a9c-ba57-3f2037f68632-kube-api-access-xz7kg\") pod \"ovn-controller-ovs-5gqh8\" (UID: \"ede1ec9d-4207-4a9c-ba57-3f2037f68632\") " pod="openstack/ovn-controller-ovs-5gqh8" Dec 11 08:35:34 crc kubenswrapper[4881]: I1211 08:35:34.002105 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/49d6015f-9f76-4e77-821e-2a11887e497c-combined-ca-bundle\") pod \"ovn-controller-xfltd\" (UID: \"49d6015f-9f76-4e77-821e-2a11887e497c\") " pod="openstack/ovn-controller-xfltd" Dec 11 08:35:34 crc kubenswrapper[4881]: I1211 08:35:34.288246 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-xfltd" Dec 11 08:35:34 crc kubenswrapper[4881]: I1211 08:35:34.293437 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-5gqh8" Dec 11 08:35:34 crc kubenswrapper[4881]: I1211 08:35:34.814661 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-sb-0"] Dec 11 08:35:34 crc kubenswrapper[4881]: I1211 08:35:34.817447 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Dec 11 08:35:34 crc kubenswrapper[4881]: I1211 08:35:34.825562 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-scripts" Dec 11 08:35:34 crc kubenswrapper[4881]: I1211 08:35:34.825708 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovndbcluster-sb-ovndbs" Dec 11 08:35:34 crc kubenswrapper[4881]: I1211 08:35:34.825834 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-sb-dockercfg-mrwv4" Dec 11 08:35:34 crc kubenswrapper[4881]: I1211 08:35:34.826019 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-config" Dec 11 08:35:34 crc kubenswrapper[4881]: I1211 08:35:34.830945 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-0"] Dec 11 08:35:34 crc kubenswrapper[4881]: I1211 08:35:34.926810 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/b9b67c1c-0e11-4c19-8d1f-6c046375659c-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"b9b67c1c-0e11-4c19-8d1f-6c046375659c\") " pod="openstack/ovsdbserver-sb-0" Dec 11 08:35:34 crc kubenswrapper[4881]: I1211 08:35:34.927015 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bx5dr\" (UniqueName: \"kubernetes.io/projected/b9b67c1c-0e11-4c19-8d1f-6c046375659c-kube-api-access-bx5dr\") pod \"ovsdbserver-sb-0\" (UID: \"b9b67c1c-0e11-4c19-8d1f-6c046375659c\") " pod="openstack/ovsdbserver-sb-0" Dec 11 08:35:34 crc kubenswrapper[4881]: I1211 08:35:34.927057 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/b9b67c1c-0e11-4c19-8d1f-6c046375659c-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"b9b67c1c-0e11-4c19-8d1f-6c046375659c\") " pod="openstack/ovsdbserver-sb-0" Dec 11 08:35:34 crc kubenswrapper[4881]: I1211 08:35:34.927144 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b9b67c1c-0e11-4c19-8d1f-6c046375659c-config\") pod \"ovsdbserver-sb-0\" (UID: \"b9b67c1c-0e11-4c19-8d1f-6c046375659c\") " pod="openstack/ovsdbserver-sb-0" Dec 11 08:35:34 crc kubenswrapper[4881]: I1211 08:35:34.927305 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/b9b67c1c-0e11-4c19-8d1f-6c046375659c-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"b9b67c1c-0e11-4c19-8d1f-6c046375659c\") " pod="openstack/ovsdbserver-sb-0" Dec 11 08:35:34 crc kubenswrapper[4881]: I1211 08:35:34.927381 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"ovsdbserver-sb-0\" (UID: \"b9b67c1c-0e11-4c19-8d1f-6c046375659c\") " pod="openstack/ovsdbserver-sb-0" Dec 11 08:35:34 crc kubenswrapper[4881]: I1211 08:35:34.927682 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b9b67c1c-0e11-4c19-8d1f-6c046375659c-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"b9b67c1c-0e11-4c19-8d1f-6c046375659c\") " pod="openstack/ovsdbserver-sb-0" Dec 11 08:35:34 crc kubenswrapper[4881]: I1211 08:35:34.927749 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/b9b67c1c-0e11-4c19-8d1f-6c046375659c-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"b9b67c1c-0e11-4c19-8d1f-6c046375659c\") " pod="openstack/ovsdbserver-sb-0" Dec 11 08:35:35 crc kubenswrapper[4881]: I1211 08:35:35.029740 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b9b67c1c-0e11-4c19-8d1f-6c046375659c-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"b9b67c1c-0e11-4c19-8d1f-6c046375659c\") " pod="openstack/ovsdbserver-sb-0" Dec 11 08:35:35 crc kubenswrapper[4881]: I1211 08:35:35.029793 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/b9b67c1c-0e11-4c19-8d1f-6c046375659c-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"b9b67c1c-0e11-4c19-8d1f-6c046375659c\") " pod="openstack/ovsdbserver-sb-0" Dec 11 08:35:35 crc kubenswrapper[4881]: I1211 08:35:35.029861 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/b9b67c1c-0e11-4c19-8d1f-6c046375659c-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"b9b67c1c-0e11-4c19-8d1f-6c046375659c\") " pod="openstack/ovsdbserver-sb-0" Dec 11 08:35:35 crc kubenswrapper[4881]: I1211 08:35:35.029897 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bx5dr\" (UniqueName: \"kubernetes.io/projected/b9b67c1c-0e11-4c19-8d1f-6c046375659c-kube-api-access-bx5dr\") pod \"ovsdbserver-sb-0\" (UID: \"b9b67c1c-0e11-4c19-8d1f-6c046375659c\") " pod="openstack/ovsdbserver-sb-0" Dec 11 08:35:35 crc kubenswrapper[4881]: I1211 08:35:35.029919 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/b9b67c1c-0e11-4c19-8d1f-6c046375659c-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"b9b67c1c-0e11-4c19-8d1f-6c046375659c\") " pod="openstack/ovsdbserver-sb-0" Dec 11 08:35:35 crc kubenswrapper[4881]: I1211 08:35:35.029936 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b9b67c1c-0e11-4c19-8d1f-6c046375659c-config\") pod \"ovsdbserver-sb-0\" (UID: \"b9b67c1c-0e11-4c19-8d1f-6c046375659c\") " pod="openstack/ovsdbserver-sb-0" Dec 11 08:35:35 crc kubenswrapper[4881]: I1211 08:35:35.029960 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/b9b67c1c-0e11-4c19-8d1f-6c046375659c-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"b9b67c1c-0e11-4c19-8d1f-6c046375659c\") " pod="openstack/ovsdbserver-sb-0" Dec 11 08:35:35 crc kubenswrapper[4881]: I1211 08:35:35.029983 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"ovsdbserver-sb-0\" (UID: \"b9b67c1c-0e11-4c19-8d1f-6c046375659c\") " pod="openstack/ovsdbserver-sb-0" Dec 11 08:35:35 crc kubenswrapper[4881]: I1211 08:35:35.030300 4881 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"ovsdbserver-sb-0\" (UID: \"b9b67c1c-0e11-4c19-8d1f-6c046375659c\") device mount path \"/mnt/openstack/pv04\"" pod="openstack/ovsdbserver-sb-0" Dec 11 08:35:35 crc kubenswrapper[4881]: I1211 08:35:35.031691 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b9b67c1c-0e11-4c19-8d1f-6c046375659c-config\") pod \"ovsdbserver-sb-0\" (UID: \"b9b67c1c-0e11-4c19-8d1f-6c046375659c\") " pod="openstack/ovsdbserver-sb-0" Dec 11 08:35:35 crc kubenswrapper[4881]: I1211 08:35:35.031811 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/b9b67c1c-0e11-4c19-8d1f-6c046375659c-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"b9b67c1c-0e11-4c19-8d1f-6c046375659c\") " pod="openstack/ovsdbserver-sb-0" Dec 11 08:35:35 crc kubenswrapper[4881]: I1211 08:35:35.032324 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/b9b67c1c-0e11-4c19-8d1f-6c046375659c-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"b9b67c1c-0e11-4c19-8d1f-6c046375659c\") " pod="openstack/ovsdbserver-sb-0" Dec 11 08:35:35 crc kubenswrapper[4881]: I1211 08:35:35.035567 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/b9b67c1c-0e11-4c19-8d1f-6c046375659c-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"b9b67c1c-0e11-4c19-8d1f-6c046375659c\") " pod="openstack/ovsdbserver-sb-0" Dec 11 08:35:35 crc kubenswrapper[4881]: I1211 08:35:35.039388 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b9b67c1c-0e11-4c19-8d1f-6c046375659c-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"b9b67c1c-0e11-4c19-8d1f-6c046375659c\") " pod="openstack/ovsdbserver-sb-0" Dec 11 08:35:35 crc kubenswrapper[4881]: I1211 08:35:35.045643 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/b9b67c1c-0e11-4c19-8d1f-6c046375659c-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"b9b67c1c-0e11-4c19-8d1f-6c046375659c\") " pod="openstack/ovsdbserver-sb-0" Dec 11 08:35:35 crc kubenswrapper[4881]: I1211 08:35:35.049786 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bx5dr\" (UniqueName: \"kubernetes.io/projected/b9b67c1c-0e11-4c19-8d1f-6c046375659c-kube-api-access-bx5dr\") pod \"ovsdbserver-sb-0\" (UID: \"b9b67c1c-0e11-4c19-8d1f-6c046375659c\") " pod="openstack/ovsdbserver-sb-0" Dec 11 08:35:35 crc kubenswrapper[4881]: I1211 08:35:35.057054 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"ovsdbserver-sb-0\" (UID: \"b9b67c1c-0e11-4c19-8d1f-6c046375659c\") " pod="openstack/ovsdbserver-sb-0" Dec 11 08:35:35 crc kubenswrapper[4881]: I1211 08:35:35.151574 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Dec 11 08:35:47 crc kubenswrapper[4881]: E1211 08:35:47.838989 4881 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-mariadb:current-podified" Dec 11 08:35:47 crc kubenswrapper[4881]: E1211 08:35:47.839903 4881 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:mysql-bootstrap,Image:quay.io/podified-antelope-centos9/openstack-mariadb:current-podified,Command:[bash /var/lib/operator-scripts/mysql_bootstrap.sh],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:True,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},EnvVar{Name:DB_ROOT_PASSWORD,Value:,ValueFrom:&EnvVarSource{FieldRef:nil,ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:&SecretKeySelector{LocalObjectReference:LocalObjectReference{Name:osp-secret,},Key:DbRootPassword,Optional:nil,},},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:mysql-db,ReadOnly:false,MountPath:/var/lib/mysql,SubPath:mysql,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data-default,ReadOnly:true,MountPath:/var/lib/config-data/default,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data-generated,ReadOnly:false,MountPath:/var/lib/config-data/generated,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:secrets,ReadOnly:true,MountPath:/var/lib/secrets,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:operator-scripts,ReadOnly:true,MountPath:/var/lib/operator-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kolla-config,ReadOnly:true,MountPath:/var/lib/kolla/config_files,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-wnb7s,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod openstack-galera-0_openstack(7a825abb-23ec-4f51-940d-2500da233e14): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 11 08:35:47 crc kubenswrapper[4881]: E1211 08:35:47.841211 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mysql-bootstrap\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/openstack-galera-0" podUID="7a825abb-23ec-4f51-940d-2500da233e14" Dec 11 08:35:48 crc kubenswrapper[4881]: E1211 08:35:48.715148 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mysql-bootstrap\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-mariadb:current-podified\\\"\"" pod="openstack/openstack-galera-0" podUID="7a825abb-23ec-4f51-940d-2500da233e14" Dec 11 08:35:48 crc kubenswrapper[4881]: I1211 08:35:48.886630 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-0"] Dec 11 08:35:49 crc kubenswrapper[4881]: E1211 08:35:49.898151 4881 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified" Dec 11 08:35:49 crc kubenswrapper[4881]: E1211 08:35:49.898418 4881 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:ndfhb5h667h568h584h5f9h58dh565h664h587h597h577h64bh5c4h66fh647hbdh68ch5c5h68dh686h5f7h64hd7hc6h55fh57bh98h57fh87h5fh57fq,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-dbd9w,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-78dd6ddcc-qwfp5_openstack(d33da677-6c43-4e7e-87de-648d14ae9a8d): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 11 08:35:49 crc kubenswrapper[4881]: E1211 08:35:49.899972 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-78dd6ddcc-qwfp5" podUID="d33da677-6c43-4e7e-87de-648d14ae9a8d" Dec 11 08:35:49 crc kubenswrapper[4881]: W1211 08:35:49.900577 4881 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7f0aa090_3aac_4da8_9efa_a31a7b3b130f.slice/crio-f4e6088d420d59079a6f4d14500f0dc672c237b6dbec54f2d7b4072afbb69f6f WatchSource:0}: Error finding container f4e6088d420d59079a6f4d14500f0dc672c237b6dbec54f2d7b4072afbb69f6f: Status 404 returned error can't find the container with id f4e6088d420d59079a6f4d14500f0dc672c237b6dbec54f2d7b4072afbb69f6f Dec 11 08:35:49 crc kubenswrapper[4881]: E1211 08:35:49.936728 4881 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified" Dec 11 08:35:49 crc kubenswrapper[4881]: E1211 08:35:49.936887 4881 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n659h4h664hbh658h587h67ch89h587h8fh679hc6hf9h55fh644h5d5h698h68dh5cdh5ffh669h54ch9h689hb8hd4h5bfhd8h5d7h5fh665h574q,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-t9zr6,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-57d769cc4f-vkxg2_openstack(ee76bb27-a55c-48e8-a943-712c8f0036f4): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 11 08:35:49 crc kubenswrapper[4881]: E1211 08:35:49.938666 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-57d769cc4f-vkxg2" podUID="ee76bb27-a55c-48e8-a943-712c8f0036f4" Dec 11 08:35:49 crc kubenswrapper[4881]: E1211 08:35:49.939449 4881 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified" Dec 11 08:35:49 crc kubenswrapper[4881]: E1211 08:35:49.939564 4881 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n68chd6h679hbfh55fhc6h5ffh5d8h94h56ch589hb4hc5h57bh677hcdh655h8dh667h675h654h66ch567h8fh659h5b4h675h566h55bh54h67dh6dq,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-g8sl4,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-666b6646f7-5485d_openstack(5821aceb-9d65-4e6f-940d-2a20117397f6): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 11 08:35:49 crc kubenswrapper[4881]: E1211 08:35:49.940809 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-666b6646f7-5485d" podUID="5821aceb-9d65-4e6f-940d-2a20117397f6" Dec 11 08:35:49 crc kubenswrapper[4881]: E1211 08:35:49.963309 4881 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified" Dec 11 08:35:49 crc kubenswrapper[4881]: E1211 08:35:49.963665 4881 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:nffh5bdhf4h5f8h79h55h77h58fh56dh7bh6fh578hbch55dh68h56bhd9h65dh57ch658hc9h566h666h688h58h65dh684h5d7h6ch575h5d6h88q,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-mqcl7,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-675f4bcbfc-wkdgt_openstack(1e6bcde5-0508-48b6-bb9e-9a2cb7c5fa3a): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 11 08:35:49 crc kubenswrapper[4881]: E1211 08:35:49.965514 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-675f4bcbfc-wkdgt" podUID="1e6bcde5-0508-48b6-bb9e-9a2cb7c5fa3a" Dec 11 08:35:50 crc kubenswrapper[4881]: I1211 08:35:50.347329 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/observability-ui-dashboards-7d5fb4cbfb-qvsbb"] Dec 11 08:35:50 crc kubenswrapper[4881]: W1211 08:35:50.352154 4881 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3c5fa886_8b43_4ef2_9f4b_d4724c4efa56.slice/crio-829c78c0309eb8e834b4dd15e03058468cc3326fb776b2b0f2c0a372e9e89bff WatchSource:0}: Error finding container 829c78c0309eb8e834b4dd15e03058468cc3326fb776b2b0f2c0a372e9e89bff: Status 404 returned error can't find the container with id 829c78c0309eb8e834b4dd15e03058468cc3326fb776b2b0f2c0a372e9e89bff Dec 11 08:35:50 crc kubenswrapper[4881]: I1211 08:35:50.739678 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"7f0aa090-3aac-4da8-9efa-a31a7b3b130f","Type":"ContainerStarted","Data":"f4e6088d420d59079a6f4d14500f0dc672c237b6dbec54f2d7b4072afbb69f6f"} Dec 11 08:35:50 crc kubenswrapper[4881]: I1211 08:35:50.741492 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"70402eec-968d-4ceb-b259-5e2508ee21a0","Type":"ContainerStarted","Data":"23f5f75747060572293a78abea8b3fef17b14ced3f7b03ac261e600dd62b4dac"} Dec 11 08:35:50 crc kubenswrapper[4881]: I1211 08:35:50.745184 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"06499ad1-4a0e-46e2-b0fa-7583b8958148","Type":"ContainerStarted","Data":"405f9e1ae69c97457ee73fa63842ec2451d2eec34f5e56768e9929964b5419b1"} Dec 11 08:35:50 crc kubenswrapper[4881]: I1211 08:35:50.745369 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/memcached-0" Dec 11 08:35:50 crc kubenswrapper[4881]: I1211 08:35:50.747660 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/observability-ui-dashboards-7d5fb4cbfb-qvsbb" event={"ID":"3c5fa886-8b43-4ef2-9f4b-d4724c4efa56","Type":"ContainerStarted","Data":"829c78c0309eb8e834b4dd15e03058468cc3326fb776b2b0f2c0a372e9e89bff"} Dec 11 08:35:50 crc kubenswrapper[4881]: E1211 08:35:50.748546 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified\\\"\"" pod="openstack/dnsmasq-dns-57d769cc4f-vkxg2" podUID="ee76bb27-a55c-48e8-a943-712c8f0036f4" Dec 11 08:35:50 crc kubenswrapper[4881]: E1211 08:35:50.749573 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified\\\"\"" pod="openstack/dnsmasq-dns-666b6646f7-5485d" podUID="5821aceb-9d65-4e6f-940d-2a20117397f6" Dec 11 08:35:50 crc kubenswrapper[4881]: I1211 08:35:50.809139 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/memcached-0" podStartSLOduration=2.6492588059999997 podStartE2EDuration="25.809112552s" podCreationTimestamp="2025-12-11 08:35:25 +0000 UTC" firstStartedPulling="2025-12-11 08:35:26.883996431 +0000 UTC m=+1175.261365118" lastFinishedPulling="2025-12-11 08:35:50.043850157 +0000 UTC m=+1198.421218864" observedRunningTime="2025-12-11 08:35:50.794865415 +0000 UTC m=+1199.172234122" watchObservedRunningTime="2025-12-11 08:35:50.809112552 +0000 UTC m=+1199.186481249" Dec 11 08:35:50 crc kubenswrapper[4881]: I1211 08:35:50.847658 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/prometheus-metric-storage-0"] Dec 11 08:35:51 crc kubenswrapper[4881]: I1211 08:35:51.000108 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Dec 11 08:35:51 crc kubenswrapper[4881]: W1211 08:35:51.006505 4881 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod558f097f_277d_4824_bafc_28c4c0f139f3.slice/crio-be56e0bea3e809c928a527716966f23b675b617e5c19e67a00151d075ae6f19b WatchSource:0}: Error finding container be56e0bea3e809c928a527716966f23b675b617e5c19e67a00151d075ae6f19b: Status 404 returned error can't find the container with id be56e0bea3e809c928a527716966f23b675b617e5c19e67a00151d075ae6f19b Dec 11 08:35:51 crc kubenswrapper[4881]: I1211 08:35:51.039144 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-xfltd"] Dec 11 08:35:51 crc kubenswrapper[4881]: I1211 08:35:51.048542 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-0"] Dec 11 08:35:51 crc kubenswrapper[4881]: W1211 08:35:51.100390 4881 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod49d6015f_9f76_4e77_821e_2a11887e497c.slice/crio-c986028e5518fd939ddbc52860c9c345092464e46f4e64c6d3e605cc110b5f72 WatchSource:0}: Error finding container c986028e5518fd939ddbc52860c9c345092464e46f4e64c6d3e605cc110b5f72: Status 404 returned error can't find the container with id c986028e5518fd939ddbc52860c9c345092464e46f4e64c6d3e605cc110b5f72 Dec 11 08:35:51 crc kubenswrapper[4881]: I1211 08:35:51.416027 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-5669cc64cd-8h7sh"] Dec 11 08:35:51 crc kubenswrapper[4881]: I1211 08:35:51.570389 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-wkdgt" Dec 11 08:35:51 crc kubenswrapper[4881]: I1211 08:35:51.587597 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-ovs-5gqh8"] Dec 11 08:35:51 crc kubenswrapper[4881]: I1211 08:35:51.635937 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mqcl7\" (UniqueName: \"kubernetes.io/projected/1e6bcde5-0508-48b6-bb9e-9a2cb7c5fa3a-kube-api-access-mqcl7\") pod \"1e6bcde5-0508-48b6-bb9e-9a2cb7c5fa3a\" (UID: \"1e6bcde5-0508-48b6-bb9e-9a2cb7c5fa3a\") " Dec 11 08:35:51 crc kubenswrapper[4881]: I1211 08:35:51.636096 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1e6bcde5-0508-48b6-bb9e-9a2cb7c5fa3a-config\") pod \"1e6bcde5-0508-48b6-bb9e-9a2cb7c5fa3a\" (UID: \"1e6bcde5-0508-48b6-bb9e-9a2cb7c5fa3a\") " Dec 11 08:35:51 crc kubenswrapper[4881]: I1211 08:35:51.641655 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1e6bcde5-0508-48b6-bb9e-9a2cb7c5fa3a-config" (OuterVolumeSpecName: "config") pod "1e6bcde5-0508-48b6-bb9e-9a2cb7c5fa3a" (UID: "1e6bcde5-0508-48b6-bb9e-9a2cb7c5fa3a"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:35:51 crc kubenswrapper[4881]: W1211 08:35:51.648001 4881 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podede1ec9d_4207_4a9c_ba57_3f2037f68632.slice/crio-c33f4d49da2361c428790cd7618164449c52586390a80dbd4e70f1c6119b77ee WatchSource:0}: Error finding container c33f4d49da2361c428790cd7618164449c52586390a80dbd4e70f1c6119b77ee: Status 404 returned error can't find the container with id c33f4d49da2361c428790cd7618164449c52586390a80dbd4e70f1c6119b77ee Dec 11 08:35:51 crc kubenswrapper[4881]: I1211 08:35:51.651673 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1e6bcde5-0508-48b6-bb9e-9a2cb7c5fa3a-kube-api-access-mqcl7" (OuterVolumeSpecName: "kube-api-access-mqcl7") pod "1e6bcde5-0508-48b6-bb9e-9a2cb7c5fa3a" (UID: "1e6bcde5-0508-48b6-bb9e-9a2cb7c5fa3a"). InnerVolumeSpecName "kube-api-access-mqcl7". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:35:51 crc kubenswrapper[4881]: I1211 08:35:51.676629 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-qwfp5" Dec 11 08:35:51 crc kubenswrapper[4881]: I1211 08:35:51.737464 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dbd9w\" (UniqueName: \"kubernetes.io/projected/d33da677-6c43-4e7e-87de-648d14ae9a8d-kube-api-access-dbd9w\") pod \"d33da677-6c43-4e7e-87de-648d14ae9a8d\" (UID: \"d33da677-6c43-4e7e-87de-648d14ae9a8d\") " Dec 11 08:35:51 crc kubenswrapper[4881]: I1211 08:35:51.737592 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d33da677-6c43-4e7e-87de-648d14ae9a8d-config\") pod \"d33da677-6c43-4e7e-87de-648d14ae9a8d\" (UID: \"d33da677-6c43-4e7e-87de-648d14ae9a8d\") " Dec 11 08:35:51 crc kubenswrapper[4881]: I1211 08:35:51.737785 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d33da677-6c43-4e7e-87de-648d14ae9a8d-dns-svc\") pod \"d33da677-6c43-4e7e-87de-648d14ae9a8d\" (UID: \"d33da677-6c43-4e7e-87de-648d14ae9a8d\") " Dec 11 08:35:51 crc kubenswrapper[4881]: I1211 08:35:51.738026 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d33da677-6c43-4e7e-87de-648d14ae9a8d-config" (OuterVolumeSpecName: "config") pod "d33da677-6c43-4e7e-87de-648d14ae9a8d" (UID: "d33da677-6c43-4e7e-87de-648d14ae9a8d"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:35:51 crc kubenswrapper[4881]: I1211 08:35:51.738204 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mqcl7\" (UniqueName: \"kubernetes.io/projected/1e6bcde5-0508-48b6-bb9e-9a2cb7c5fa3a-kube-api-access-mqcl7\") on node \"crc\" DevicePath \"\"" Dec 11 08:35:51 crc kubenswrapper[4881]: I1211 08:35:51.738220 4881 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1e6bcde5-0508-48b6-bb9e-9a2cb7c5fa3a-config\") on node \"crc\" DevicePath \"\"" Dec 11 08:35:51 crc kubenswrapper[4881]: I1211 08:35:51.738231 4881 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d33da677-6c43-4e7e-87de-648d14ae9a8d-config\") on node \"crc\" DevicePath \"\"" Dec 11 08:35:51 crc kubenswrapper[4881]: I1211 08:35:51.738415 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d33da677-6c43-4e7e-87de-648d14ae9a8d-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "d33da677-6c43-4e7e-87de-648d14ae9a8d" (UID: "d33da677-6c43-4e7e-87de-648d14ae9a8d"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:35:51 crc kubenswrapper[4881]: I1211 08:35:51.742500 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d33da677-6c43-4e7e-87de-648d14ae9a8d-kube-api-access-dbd9w" (OuterVolumeSpecName: "kube-api-access-dbd9w") pod "d33da677-6c43-4e7e-87de-648d14ae9a8d" (UID: "d33da677-6c43-4e7e-87de-648d14ae9a8d"). InnerVolumeSpecName "kube-api-access-dbd9w". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:35:51 crc kubenswrapper[4881]: I1211 08:35:51.760292 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"ed841687-cd89-4419-8726-85e086a5cc21","Type":"ContainerStarted","Data":"080f4092c28abc98f3cd45e00f973d275bf68faf0a43fb8d189c7bc8955176a2"} Dec 11 08:35:51 crc kubenswrapper[4881]: I1211 08:35:51.762680 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-5gqh8" event={"ID":"ede1ec9d-4207-4a9c-ba57-3f2037f68632","Type":"ContainerStarted","Data":"c33f4d49da2361c428790cd7618164449c52586390a80dbd4e70f1c6119b77ee"} Dec 11 08:35:51 crc kubenswrapper[4881]: I1211 08:35:51.763706 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"1206112a-8438-4dcf-9cad-a3e38790a344","Type":"ContainerStarted","Data":"18c790be2f3607b53bfd255854e482425c9db1653791bde5a4363dc5669d3d3b"} Dec 11 08:35:51 crc kubenswrapper[4881]: I1211 08:35:51.766149 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-xfltd" event={"ID":"49d6015f-9f76-4e77-821e-2a11887e497c","Type":"ContainerStarted","Data":"c986028e5518fd939ddbc52860c9c345092464e46f4e64c6d3e605cc110b5f72"} Dec 11 08:35:51 crc kubenswrapper[4881]: I1211 08:35:51.768252 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-78dd6ddcc-qwfp5" event={"ID":"d33da677-6c43-4e7e-87de-648d14ae9a8d","Type":"ContainerDied","Data":"daebff3496f4adc4f453839efdd40c33e1b72a2127d7e5a75a9322c3911389cd"} Dec 11 08:35:51 crc kubenswrapper[4881]: I1211 08:35:51.768388 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-qwfp5" Dec 11 08:35:51 crc kubenswrapper[4881]: I1211 08:35:51.779775 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"b9b67c1c-0e11-4c19-8d1f-6c046375659c","Type":"ContainerStarted","Data":"daf41c3a5ad943084bb0425bef5f300288c879c21661369c1a3e2a297609b3c1"} Dec 11 08:35:51 crc kubenswrapper[4881]: I1211 08:35:51.790197 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"83d70e0f-d672-49c8-89d6-c1aa99c572a0","Type":"ContainerStarted","Data":"fad51349caab58607ba58815c5f2340b3cd292421c0d6836f33b9f343fcadef1"} Dec 11 08:35:51 crc kubenswrapper[4881]: I1211 08:35:51.796993 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-5669cc64cd-8h7sh" event={"ID":"07642cdb-f925-469e-bb1c-7a06fb4e316e","Type":"ContainerStarted","Data":"c36dcbe134a4658f3cc114c9300c40629aae16e1914591073b5c19fae1089c20"} Dec 11 08:35:51 crc kubenswrapper[4881]: I1211 08:35:51.797043 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-5669cc64cd-8h7sh" event={"ID":"07642cdb-f925-469e-bb1c-7a06fb4e316e","Type":"ContainerStarted","Data":"2808dd9bb41f3f92c0e27c75df7b3b0bce3cc2d3722e9c1eade0ece0f562f44a"} Dec 11 08:35:51 crc kubenswrapper[4881]: I1211 08:35:51.799999 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"558f097f-277d-4824-bafc-28c4c0f139f3","Type":"ContainerStarted","Data":"be56e0bea3e809c928a527716966f23b675b617e5c19e67a00151d075ae6f19b"} Dec 11 08:35:51 crc kubenswrapper[4881]: I1211 08:35:51.806725 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-wkdgt" Dec 11 08:35:51 crc kubenswrapper[4881]: I1211 08:35:51.811094 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-675f4bcbfc-wkdgt" event={"ID":"1e6bcde5-0508-48b6-bb9e-9a2cb7c5fa3a","Type":"ContainerDied","Data":"0e84f9ae82630896561c505b76187cfb529226b2b06ef517f6ec8173630b0548"} Dec 11 08:35:51 crc kubenswrapper[4881]: I1211 08:35:51.840144 4881 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d33da677-6c43-4e7e-87de-648d14ae9a8d-dns-svc\") on node \"crc\" DevicePath \"\"" Dec 11 08:35:51 crc kubenswrapper[4881]: I1211 08:35:51.840188 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dbd9w\" (UniqueName: \"kubernetes.io/projected/d33da677-6c43-4e7e-87de-648d14ae9a8d-kube-api-access-dbd9w\") on node \"crc\" DevicePath \"\"" Dec 11 08:35:51 crc kubenswrapper[4881]: I1211 08:35:51.882301 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-qwfp5"] Dec 11 08:35:51 crc kubenswrapper[4881]: I1211 08:35:51.919184 4881 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-qwfp5"] Dec 11 08:35:51 crc kubenswrapper[4881]: I1211 08:35:51.920081 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-5669cc64cd-8h7sh" podStartSLOduration=22.920065093 podStartE2EDuration="22.920065093s" podCreationTimestamp="2025-12-11 08:35:29 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 08:35:51.907831897 +0000 UTC m=+1200.285200594" watchObservedRunningTime="2025-12-11 08:35:51.920065093 +0000 UTC m=+1200.297433780" Dec 11 08:35:51 crc kubenswrapper[4881]: I1211 08:35:51.962961 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-wkdgt"] Dec 11 08:35:51 crc kubenswrapper[4881]: I1211 08:35:51.973358 4881 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-wkdgt"] Dec 11 08:35:53 crc kubenswrapper[4881]: I1211 08:35:53.053257 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1e6bcde5-0508-48b6-bb9e-9a2cb7c5fa3a" path="/var/lib/kubelet/pods/1e6bcde5-0508-48b6-bb9e-9a2cb7c5fa3a/volumes" Dec 11 08:35:53 crc kubenswrapper[4881]: I1211 08:35:53.054108 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d33da677-6c43-4e7e-87de-648d14ae9a8d" path="/var/lib/kubelet/pods/d33da677-6c43-4e7e-87de-648d14ae9a8d/volumes" Dec 11 08:35:54 crc kubenswrapper[4881]: I1211 08:35:54.838294 4881 generic.go:334] "Generic (PLEG): container finished" podID="70402eec-968d-4ceb-b259-5e2508ee21a0" containerID="23f5f75747060572293a78abea8b3fef17b14ced3f7b03ac261e600dd62b4dac" exitCode=0 Dec 11 08:35:54 crc kubenswrapper[4881]: I1211 08:35:54.838474 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"70402eec-968d-4ceb-b259-5e2508ee21a0","Type":"ContainerDied","Data":"23f5f75747060572293a78abea8b3fef17b14ced3f7b03ac261e600dd62b4dac"} Dec 11 08:35:56 crc kubenswrapper[4881]: I1211 08:35:56.109619 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/memcached-0" Dec 11 08:35:58 crc kubenswrapper[4881]: I1211 08:35:58.336387 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-vkxg2"] Dec 11 08:35:58 crc kubenswrapper[4881]: I1211 08:35:58.383526 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-7cb5889db5-5tjbs"] Dec 11 08:35:58 crc kubenswrapper[4881]: I1211 08:35:58.385562 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7cb5889db5-5tjbs" Dec 11 08:35:58 crc kubenswrapper[4881]: I1211 08:35:58.413492 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7cb5889db5-5tjbs"] Dec 11 08:35:58 crc kubenswrapper[4881]: I1211 08:35:58.498613 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e1cbf25b-5855-4bdd-8b9a-af1d48ef038d-dns-svc\") pod \"dnsmasq-dns-7cb5889db5-5tjbs\" (UID: \"e1cbf25b-5855-4bdd-8b9a-af1d48ef038d\") " pod="openstack/dnsmasq-dns-7cb5889db5-5tjbs" Dec 11 08:35:58 crc kubenswrapper[4881]: I1211 08:35:58.498824 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e1cbf25b-5855-4bdd-8b9a-af1d48ef038d-config\") pod \"dnsmasq-dns-7cb5889db5-5tjbs\" (UID: \"e1cbf25b-5855-4bdd-8b9a-af1d48ef038d\") " pod="openstack/dnsmasq-dns-7cb5889db5-5tjbs" Dec 11 08:35:58 crc kubenswrapper[4881]: I1211 08:35:58.498922 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g9slf\" (UniqueName: \"kubernetes.io/projected/e1cbf25b-5855-4bdd-8b9a-af1d48ef038d-kube-api-access-g9slf\") pod \"dnsmasq-dns-7cb5889db5-5tjbs\" (UID: \"e1cbf25b-5855-4bdd-8b9a-af1d48ef038d\") " pod="openstack/dnsmasq-dns-7cb5889db5-5tjbs" Dec 11 08:35:58 crc kubenswrapper[4881]: I1211 08:35:58.600828 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e1cbf25b-5855-4bdd-8b9a-af1d48ef038d-config\") pod \"dnsmasq-dns-7cb5889db5-5tjbs\" (UID: \"e1cbf25b-5855-4bdd-8b9a-af1d48ef038d\") " pod="openstack/dnsmasq-dns-7cb5889db5-5tjbs" Dec 11 08:35:58 crc kubenswrapper[4881]: I1211 08:35:58.600941 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g9slf\" (UniqueName: \"kubernetes.io/projected/e1cbf25b-5855-4bdd-8b9a-af1d48ef038d-kube-api-access-g9slf\") pod \"dnsmasq-dns-7cb5889db5-5tjbs\" (UID: \"e1cbf25b-5855-4bdd-8b9a-af1d48ef038d\") " pod="openstack/dnsmasq-dns-7cb5889db5-5tjbs" Dec 11 08:35:58 crc kubenswrapper[4881]: I1211 08:35:58.601269 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e1cbf25b-5855-4bdd-8b9a-af1d48ef038d-dns-svc\") pod \"dnsmasq-dns-7cb5889db5-5tjbs\" (UID: \"e1cbf25b-5855-4bdd-8b9a-af1d48ef038d\") " pod="openstack/dnsmasq-dns-7cb5889db5-5tjbs" Dec 11 08:35:58 crc kubenswrapper[4881]: I1211 08:35:58.602255 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e1cbf25b-5855-4bdd-8b9a-af1d48ef038d-dns-svc\") pod \"dnsmasq-dns-7cb5889db5-5tjbs\" (UID: \"e1cbf25b-5855-4bdd-8b9a-af1d48ef038d\") " pod="openstack/dnsmasq-dns-7cb5889db5-5tjbs" Dec 11 08:35:58 crc kubenswrapper[4881]: I1211 08:35:58.602287 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e1cbf25b-5855-4bdd-8b9a-af1d48ef038d-config\") pod \"dnsmasq-dns-7cb5889db5-5tjbs\" (UID: \"e1cbf25b-5855-4bdd-8b9a-af1d48ef038d\") " pod="openstack/dnsmasq-dns-7cb5889db5-5tjbs" Dec 11 08:35:58 crc kubenswrapper[4881]: I1211 08:35:58.622941 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g9slf\" (UniqueName: \"kubernetes.io/projected/e1cbf25b-5855-4bdd-8b9a-af1d48ef038d-kube-api-access-g9slf\") pod \"dnsmasq-dns-7cb5889db5-5tjbs\" (UID: \"e1cbf25b-5855-4bdd-8b9a-af1d48ef038d\") " pod="openstack/dnsmasq-dns-7cb5889db5-5tjbs" Dec 11 08:35:58 crc kubenswrapper[4881]: I1211 08:35:58.734363 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7cb5889db5-5tjbs" Dec 11 08:35:59 crc kubenswrapper[4881]: I1211 08:35:59.397403 4881 patch_prober.go:28] interesting pod/machine-config-daemon-z9nnh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 11 08:35:59 crc kubenswrapper[4881]: I1211 08:35:59.397492 4881 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 11 08:35:59 crc kubenswrapper[4881]: I1211 08:35:59.475554 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-storage-0"] Dec 11 08:35:59 crc kubenswrapper[4881]: I1211 08:35:59.484956 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-storage-0" Dec 11 08:35:59 crc kubenswrapper[4881]: I1211 08:35:59.488123 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-swift-dockercfg-d9gkj" Dec 11 08:35:59 crc kubenswrapper[4881]: I1211 08:35:59.488161 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-conf" Dec 11 08:35:59 crc kubenswrapper[4881]: I1211 08:35:59.491260 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-files" Dec 11 08:35:59 crc kubenswrapper[4881]: I1211 08:35:59.497077 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-storage-0"] Dec 11 08:35:59 crc kubenswrapper[4881]: I1211 08:35:59.555544 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-storage-config-data" Dec 11 08:35:59 crc kubenswrapper[4881]: I1211 08:35:59.622877 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"swift-storage-0\" (UID: \"1af483c2-ea3f-45cd-971d-797c06f5c6e2\") " pod="openstack/swift-storage-0" Dec 11 08:35:59 crc kubenswrapper[4881]: I1211 08:35:59.622988 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/1af483c2-ea3f-45cd-971d-797c06f5c6e2-lock\") pod \"swift-storage-0\" (UID: \"1af483c2-ea3f-45cd-971d-797c06f5c6e2\") " pod="openstack/swift-storage-0" Dec 11 08:35:59 crc kubenswrapper[4881]: I1211 08:35:59.623100 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j5kwg\" (UniqueName: \"kubernetes.io/projected/1af483c2-ea3f-45cd-971d-797c06f5c6e2-kube-api-access-j5kwg\") pod \"swift-storage-0\" (UID: \"1af483c2-ea3f-45cd-971d-797c06f5c6e2\") " pod="openstack/swift-storage-0" Dec 11 08:35:59 crc kubenswrapper[4881]: I1211 08:35:59.623144 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/1af483c2-ea3f-45cd-971d-797c06f5c6e2-etc-swift\") pod \"swift-storage-0\" (UID: \"1af483c2-ea3f-45cd-971d-797c06f5c6e2\") " pod="openstack/swift-storage-0" Dec 11 08:35:59 crc kubenswrapper[4881]: I1211 08:35:59.623204 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/1af483c2-ea3f-45cd-971d-797c06f5c6e2-cache\") pod \"swift-storage-0\" (UID: \"1af483c2-ea3f-45cd-971d-797c06f5c6e2\") " pod="openstack/swift-storage-0" Dec 11 08:35:59 crc kubenswrapper[4881]: I1211 08:35:59.725228 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"swift-storage-0\" (UID: \"1af483c2-ea3f-45cd-971d-797c06f5c6e2\") " pod="openstack/swift-storage-0" Dec 11 08:35:59 crc kubenswrapper[4881]: I1211 08:35:59.725311 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/1af483c2-ea3f-45cd-971d-797c06f5c6e2-lock\") pod \"swift-storage-0\" (UID: \"1af483c2-ea3f-45cd-971d-797c06f5c6e2\") " pod="openstack/swift-storage-0" Dec 11 08:35:59 crc kubenswrapper[4881]: I1211 08:35:59.725420 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j5kwg\" (UniqueName: \"kubernetes.io/projected/1af483c2-ea3f-45cd-971d-797c06f5c6e2-kube-api-access-j5kwg\") pod \"swift-storage-0\" (UID: \"1af483c2-ea3f-45cd-971d-797c06f5c6e2\") " pod="openstack/swift-storage-0" Dec 11 08:35:59 crc kubenswrapper[4881]: I1211 08:35:59.725459 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/1af483c2-ea3f-45cd-971d-797c06f5c6e2-etc-swift\") pod \"swift-storage-0\" (UID: \"1af483c2-ea3f-45cd-971d-797c06f5c6e2\") " pod="openstack/swift-storage-0" Dec 11 08:35:59 crc kubenswrapper[4881]: I1211 08:35:59.725530 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/1af483c2-ea3f-45cd-971d-797c06f5c6e2-cache\") pod \"swift-storage-0\" (UID: \"1af483c2-ea3f-45cd-971d-797c06f5c6e2\") " pod="openstack/swift-storage-0" Dec 11 08:35:59 crc kubenswrapper[4881]: I1211 08:35:59.725729 4881 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"swift-storage-0\" (UID: \"1af483c2-ea3f-45cd-971d-797c06f5c6e2\") device mount path \"/mnt/openstack/pv01\"" pod="openstack/swift-storage-0" Dec 11 08:35:59 crc kubenswrapper[4881]: E1211 08:35:59.725869 4881 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Dec 11 08:35:59 crc kubenswrapper[4881]: E1211 08:35:59.725941 4881 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Dec 11 08:35:59 crc kubenswrapper[4881]: I1211 08:35:59.725936 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/1af483c2-ea3f-45cd-971d-797c06f5c6e2-lock\") pod \"swift-storage-0\" (UID: \"1af483c2-ea3f-45cd-971d-797c06f5c6e2\") " pod="openstack/swift-storage-0" Dec 11 08:35:59 crc kubenswrapper[4881]: E1211 08:35:59.725981 4881 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/1af483c2-ea3f-45cd-971d-797c06f5c6e2-etc-swift podName:1af483c2-ea3f-45cd-971d-797c06f5c6e2 nodeName:}" failed. No retries permitted until 2025-12-11 08:36:00.225966408 +0000 UTC m=+1208.603335105 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/1af483c2-ea3f-45cd-971d-797c06f5c6e2-etc-swift") pod "swift-storage-0" (UID: "1af483c2-ea3f-45cd-971d-797c06f5c6e2") : configmap "swift-ring-files" not found Dec 11 08:35:59 crc kubenswrapper[4881]: I1211 08:35:59.726086 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/1af483c2-ea3f-45cd-971d-797c06f5c6e2-cache\") pod \"swift-storage-0\" (UID: \"1af483c2-ea3f-45cd-971d-797c06f5c6e2\") " pod="openstack/swift-storage-0" Dec 11 08:35:59 crc kubenswrapper[4881]: I1211 08:35:59.757265 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j5kwg\" (UniqueName: \"kubernetes.io/projected/1af483c2-ea3f-45cd-971d-797c06f5c6e2-kube-api-access-j5kwg\") pod \"swift-storage-0\" (UID: \"1af483c2-ea3f-45cd-971d-797c06f5c6e2\") " pod="openstack/swift-storage-0" Dec 11 08:35:59 crc kubenswrapper[4881]: I1211 08:35:59.764860 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"swift-storage-0\" (UID: \"1af483c2-ea3f-45cd-971d-797c06f5c6e2\") " pod="openstack/swift-storage-0" Dec 11 08:35:59 crc kubenswrapper[4881]: I1211 08:35:59.977246 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-5669cc64cd-8h7sh" Dec 11 08:35:59 crc kubenswrapper[4881]: I1211 08:35:59.977669 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-5669cc64cd-8h7sh" Dec 11 08:35:59 crc kubenswrapper[4881]: I1211 08:35:59.982154 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-5669cc64cd-8h7sh" Dec 11 08:36:00 crc kubenswrapper[4881]: I1211 08:36:00.034003 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-ring-rebalance-qt8lj"] Dec 11 08:36:00 crc kubenswrapper[4881]: I1211 08:36:00.035292 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-qt8lj" Dec 11 08:36:00 crc kubenswrapper[4881]: I1211 08:36:00.038496 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-proxy-config-data" Dec 11 08:36:00 crc kubenswrapper[4881]: I1211 08:36:00.038500 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-scripts" Dec 11 08:36:00 crc kubenswrapper[4881]: I1211 08:36:00.039621 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-config-data" Dec 11 08:36:00 crc kubenswrapper[4881]: I1211 08:36:00.049848 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-ring-rebalance-qt8lj"] Dec 11 08:36:00 crc kubenswrapper[4881]: I1211 08:36:00.132931 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/d40e3cbd-c017-4b42-94ee-dea2565d55a3-swiftconf\") pod \"swift-ring-rebalance-qt8lj\" (UID: \"d40e3cbd-c017-4b42-94ee-dea2565d55a3\") " pod="openstack/swift-ring-rebalance-qt8lj" Dec 11 08:36:00 crc kubenswrapper[4881]: I1211 08:36:00.133018 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/d40e3cbd-c017-4b42-94ee-dea2565d55a3-dispersionconf\") pod \"swift-ring-rebalance-qt8lj\" (UID: \"d40e3cbd-c017-4b42-94ee-dea2565d55a3\") " pod="openstack/swift-ring-rebalance-qt8lj" Dec 11 08:36:00 crc kubenswrapper[4881]: I1211 08:36:00.133037 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-76mk6\" (UniqueName: \"kubernetes.io/projected/d40e3cbd-c017-4b42-94ee-dea2565d55a3-kube-api-access-76mk6\") pod \"swift-ring-rebalance-qt8lj\" (UID: \"d40e3cbd-c017-4b42-94ee-dea2565d55a3\") " pod="openstack/swift-ring-rebalance-qt8lj" Dec 11 08:36:00 crc kubenswrapper[4881]: I1211 08:36:00.133117 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/d40e3cbd-c017-4b42-94ee-dea2565d55a3-scripts\") pod \"swift-ring-rebalance-qt8lj\" (UID: \"d40e3cbd-c017-4b42-94ee-dea2565d55a3\") " pod="openstack/swift-ring-rebalance-qt8lj" Dec 11 08:36:00 crc kubenswrapper[4881]: I1211 08:36:00.133155 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/d40e3cbd-c017-4b42-94ee-dea2565d55a3-ring-data-devices\") pod \"swift-ring-rebalance-qt8lj\" (UID: \"d40e3cbd-c017-4b42-94ee-dea2565d55a3\") " pod="openstack/swift-ring-rebalance-qt8lj" Dec 11 08:36:00 crc kubenswrapper[4881]: I1211 08:36:00.133272 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/d40e3cbd-c017-4b42-94ee-dea2565d55a3-etc-swift\") pod \"swift-ring-rebalance-qt8lj\" (UID: \"d40e3cbd-c017-4b42-94ee-dea2565d55a3\") " pod="openstack/swift-ring-rebalance-qt8lj" Dec 11 08:36:00 crc kubenswrapper[4881]: I1211 08:36:00.133306 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d40e3cbd-c017-4b42-94ee-dea2565d55a3-combined-ca-bundle\") pod \"swift-ring-rebalance-qt8lj\" (UID: \"d40e3cbd-c017-4b42-94ee-dea2565d55a3\") " pod="openstack/swift-ring-rebalance-qt8lj" Dec 11 08:36:00 crc kubenswrapper[4881]: I1211 08:36:00.235088 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/d40e3cbd-c017-4b42-94ee-dea2565d55a3-dispersionconf\") pod \"swift-ring-rebalance-qt8lj\" (UID: \"d40e3cbd-c017-4b42-94ee-dea2565d55a3\") " pod="openstack/swift-ring-rebalance-qt8lj" Dec 11 08:36:00 crc kubenswrapper[4881]: I1211 08:36:00.235141 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-76mk6\" (UniqueName: \"kubernetes.io/projected/d40e3cbd-c017-4b42-94ee-dea2565d55a3-kube-api-access-76mk6\") pod \"swift-ring-rebalance-qt8lj\" (UID: \"d40e3cbd-c017-4b42-94ee-dea2565d55a3\") " pod="openstack/swift-ring-rebalance-qt8lj" Dec 11 08:36:00 crc kubenswrapper[4881]: I1211 08:36:00.235175 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/d40e3cbd-c017-4b42-94ee-dea2565d55a3-scripts\") pod \"swift-ring-rebalance-qt8lj\" (UID: \"d40e3cbd-c017-4b42-94ee-dea2565d55a3\") " pod="openstack/swift-ring-rebalance-qt8lj" Dec 11 08:36:00 crc kubenswrapper[4881]: I1211 08:36:00.235209 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/d40e3cbd-c017-4b42-94ee-dea2565d55a3-ring-data-devices\") pod \"swift-ring-rebalance-qt8lj\" (UID: \"d40e3cbd-c017-4b42-94ee-dea2565d55a3\") " pod="openstack/swift-ring-rebalance-qt8lj" Dec 11 08:36:00 crc kubenswrapper[4881]: I1211 08:36:00.235287 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/1af483c2-ea3f-45cd-971d-797c06f5c6e2-etc-swift\") pod \"swift-storage-0\" (UID: \"1af483c2-ea3f-45cd-971d-797c06f5c6e2\") " pod="openstack/swift-storage-0" Dec 11 08:36:00 crc kubenswrapper[4881]: I1211 08:36:00.235328 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/d40e3cbd-c017-4b42-94ee-dea2565d55a3-etc-swift\") pod \"swift-ring-rebalance-qt8lj\" (UID: \"d40e3cbd-c017-4b42-94ee-dea2565d55a3\") " pod="openstack/swift-ring-rebalance-qt8lj" Dec 11 08:36:00 crc kubenswrapper[4881]: I1211 08:36:00.235379 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d40e3cbd-c017-4b42-94ee-dea2565d55a3-combined-ca-bundle\") pod \"swift-ring-rebalance-qt8lj\" (UID: \"d40e3cbd-c017-4b42-94ee-dea2565d55a3\") " pod="openstack/swift-ring-rebalance-qt8lj" Dec 11 08:36:00 crc kubenswrapper[4881]: I1211 08:36:00.235441 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/d40e3cbd-c017-4b42-94ee-dea2565d55a3-swiftconf\") pod \"swift-ring-rebalance-qt8lj\" (UID: \"d40e3cbd-c017-4b42-94ee-dea2565d55a3\") " pod="openstack/swift-ring-rebalance-qt8lj" Dec 11 08:36:00 crc kubenswrapper[4881]: E1211 08:36:00.235676 4881 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Dec 11 08:36:00 crc kubenswrapper[4881]: E1211 08:36:00.235706 4881 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Dec 11 08:36:00 crc kubenswrapper[4881]: E1211 08:36:00.235754 4881 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/1af483c2-ea3f-45cd-971d-797c06f5c6e2-etc-swift podName:1af483c2-ea3f-45cd-971d-797c06f5c6e2 nodeName:}" failed. No retries permitted until 2025-12-11 08:36:01.235738074 +0000 UTC m=+1209.613106771 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/1af483c2-ea3f-45cd-971d-797c06f5c6e2-etc-swift") pod "swift-storage-0" (UID: "1af483c2-ea3f-45cd-971d-797c06f5c6e2") : configmap "swift-ring-files" not found Dec 11 08:36:00 crc kubenswrapper[4881]: I1211 08:36:00.236177 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/d40e3cbd-c017-4b42-94ee-dea2565d55a3-scripts\") pod \"swift-ring-rebalance-qt8lj\" (UID: \"d40e3cbd-c017-4b42-94ee-dea2565d55a3\") " pod="openstack/swift-ring-rebalance-qt8lj" Dec 11 08:36:00 crc kubenswrapper[4881]: I1211 08:36:00.236327 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/d40e3cbd-c017-4b42-94ee-dea2565d55a3-ring-data-devices\") pod \"swift-ring-rebalance-qt8lj\" (UID: \"d40e3cbd-c017-4b42-94ee-dea2565d55a3\") " pod="openstack/swift-ring-rebalance-qt8lj" Dec 11 08:36:00 crc kubenswrapper[4881]: I1211 08:36:00.236364 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/d40e3cbd-c017-4b42-94ee-dea2565d55a3-etc-swift\") pod \"swift-ring-rebalance-qt8lj\" (UID: \"d40e3cbd-c017-4b42-94ee-dea2565d55a3\") " pod="openstack/swift-ring-rebalance-qt8lj" Dec 11 08:36:00 crc kubenswrapper[4881]: I1211 08:36:00.238962 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/d40e3cbd-c017-4b42-94ee-dea2565d55a3-swiftconf\") pod \"swift-ring-rebalance-qt8lj\" (UID: \"d40e3cbd-c017-4b42-94ee-dea2565d55a3\") " pod="openstack/swift-ring-rebalance-qt8lj" Dec 11 08:36:00 crc kubenswrapper[4881]: I1211 08:36:00.241407 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d40e3cbd-c017-4b42-94ee-dea2565d55a3-combined-ca-bundle\") pod \"swift-ring-rebalance-qt8lj\" (UID: \"d40e3cbd-c017-4b42-94ee-dea2565d55a3\") " pod="openstack/swift-ring-rebalance-qt8lj" Dec 11 08:36:00 crc kubenswrapper[4881]: I1211 08:36:00.243661 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/d40e3cbd-c017-4b42-94ee-dea2565d55a3-dispersionconf\") pod \"swift-ring-rebalance-qt8lj\" (UID: \"d40e3cbd-c017-4b42-94ee-dea2565d55a3\") " pod="openstack/swift-ring-rebalance-qt8lj" Dec 11 08:36:00 crc kubenswrapper[4881]: I1211 08:36:00.256641 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-76mk6\" (UniqueName: \"kubernetes.io/projected/d40e3cbd-c017-4b42-94ee-dea2565d55a3-kube-api-access-76mk6\") pod \"swift-ring-rebalance-qt8lj\" (UID: \"d40e3cbd-c017-4b42-94ee-dea2565d55a3\") " pod="openstack/swift-ring-rebalance-qt8lj" Dec 11 08:36:00 crc kubenswrapper[4881]: I1211 08:36:00.357819 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-qt8lj" Dec 11 08:36:00 crc kubenswrapper[4881]: I1211 08:36:00.900024 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-5669cc64cd-8h7sh" Dec 11 08:36:00 crc kubenswrapper[4881]: I1211 08:36:00.964998 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-69ddbf8769-4wnph"] Dec 11 08:36:01 crc kubenswrapper[4881]: I1211 08:36:01.255294 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/1af483c2-ea3f-45cd-971d-797c06f5c6e2-etc-swift\") pod \"swift-storage-0\" (UID: \"1af483c2-ea3f-45cd-971d-797c06f5c6e2\") " pod="openstack/swift-storage-0" Dec 11 08:36:01 crc kubenswrapper[4881]: E1211 08:36:01.255662 4881 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Dec 11 08:36:01 crc kubenswrapper[4881]: E1211 08:36:01.255683 4881 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Dec 11 08:36:01 crc kubenswrapper[4881]: E1211 08:36:01.255725 4881 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/1af483c2-ea3f-45cd-971d-797c06f5c6e2-etc-swift podName:1af483c2-ea3f-45cd-971d-797c06f5c6e2 nodeName:}" failed. No retries permitted until 2025-12-11 08:36:03.255711568 +0000 UTC m=+1211.633080265 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/1af483c2-ea3f-45cd-971d-797c06f5c6e2-etc-swift") pod "swift-storage-0" (UID: "1af483c2-ea3f-45cd-971d-797c06f5c6e2") : configmap "swift-ring-files" not found Dec 11 08:36:03 crc kubenswrapper[4881]: I1211 08:36:03.298279 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/1af483c2-ea3f-45cd-971d-797c06f5c6e2-etc-swift\") pod \"swift-storage-0\" (UID: \"1af483c2-ea3f-45cd-971d-797c06f5c6e2\") " pod="openstack/swift-storage-0" Dec 11 08:36:03 crc kubenswrapper[4881]: E1211 08:36:03.298526 4881 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Dec 11 08:36:03 crc kubenswrapper[4881]: E1211 08:36:03.298691 4881 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Dec 11 08:36:03 crc kubenswrapper[4881]: E1211 08:36:03.298758 4881 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/1af483c2-ea3f-45cd-971d-797c06f5c6e2-etc-swift podName:1af483c2-ea3f-45cd-971d-797c06f5c6e2 nodeName:}" failed. No retries permitted until 2025-12-11 08:36:07.298736692 +0000 UTC m=+1215.676105389 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/1af483c2-ea3f-45cd-971d-797c06f5c6e2-etc-swift") pod "swift-storage-0" (UID: "1af483c2-ea3f-45cd-971d-797c06f5c6e2") : configmap "swift-ring-files" not found Dec 11 08:36:07 crc kubenswrapper[4881]: I1211 08:36:07.397005 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/1af483c2-ea3f-45cd-971d-797c06f5c6e2-etc-swift\") pod \"swift-storage-0\" (UID: \"1af483c2-ea3f-45cd-971d-797c06f5c6e2\") " pod="openstack/swift-storage-0" Dec 11 08:36:07 crc kubenswrapper[4881]: E1211 08:36:07.397205 4881 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Dec 11 08:36:07 crc kubenswrapper[4881]: E1211 08:36:07.397787 4881 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Dec 11 08:36:07 crc kubenswrapper[4881]: E1211 08:36:07.397856 4881 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/1af483c2-ea3f-45cd-971d-797c06f5c6e2-etc-swift podName:1af483c2-ea3f-45cd-971d-797c06f5c6e2 nodeName:}" failed. No retries permitted until 2025-12-11 08:36:15.397835857 +0000 UTC m=+1223.775204554 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/1af483c2-ea3f-45cd-971d-797c06f5c6e2-etc-swift") pod "swift-storage-0" (UID: "1af483c2-ea3f-45cd-971d-797c06f5c6e2") : configmap "swift-ring-files" not found Dec 11 08:36:10 crc kubenswrapper[4881]: I1211 08:36:10.182273 4881 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Dec 11 08:36:10 crc kubenswrapper[4881]: E1211 08:36:10.446614 4881 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified" Dec 11 08:36:10 crc kubenswrapper[4881]: E1211 08:36:10.447125 4881 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ovn-controller,Image:quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified,Command:[ovn-controller --pidfile unix:/run/openvswitch/db.sock --certificate=/etc/pki/tls/certs/ovndb.crt --private-key=/etc/pki/tls/private/ovndb.key --ca-cert=/etc/pki/tls/certs/ovndbca.crt],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n649h586h55fh5d6h65bh587hf8hfh5f4h584h6dhddh5dbh5b4h57fh98hdch89h656h686h95h96h58h676h687hffh695h67dh9bh665h67ch5d4q,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:var-run,ReadOnly:false,MountPath:/var/run/openvswitch,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:var-run-ovn,ReadOnly:false,MountPath:/var/run/ovn,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:var-log-ovn,ReadOnly:false,MountPath:/var/log/ovn,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:scripts,ReadOnly:true,MountPath:/usr/local/bin/container-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ovn-controller-tls-certs,ReadOnly:true,MountPath:/etc/pki/tls/certs/ovndb.crt,SubPath:tls.crt,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ovn-controller-tls-certs,ReadOnly:true,MountPath:/etc/pki/tls/private/ovndb.key,SubPath:tls.key,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ovn-controller-tls-certs,ReadOnly:true,MountPath:/etc/pki/tls/certs/ovndbca.crt,SubPath:ca.crt,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-5gwsw,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/local/bin/container-scripts/ovn_controller_liveness.sh],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:30,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/local/bin/container-scripts/ovn_controller_readiness.sh],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:30,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:&Lifecycle{PostStart:nil,PreStop:&LifecycleHandler{Exec:&ExecAction{Command:[/usr/share/ovn/scripts/ovn-ctl stop_controller],},HTTPGet:nil,TCPSocket:nil,Sleep:nil,},},TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[NET_ADMIN SYS_ADMIN SYS_NICE],Drop:[],},Privileged:*true,SELinuxOptions:nil,RunAsUser:*0,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ovn-controller-xfltd_openstack(49d6015f-9f76-4e77-821e-2a11887e497c): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 11 08:36:10 crc kubenswrapper[4881]: E1211 08:36:10.448320 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovn-controller\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/ovn-controller-xfltd" podUID="49d6015f-9f76-4e77-821e-2a11887e497c" Dec 11 08:36:10 crc kubenswrapper[4881]: E1211 08:36:10.652430 4881 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-ovn-nb-db-server:current-podified" Dec 11 08:36:10 crc kubenswrapper[4881]: E1211 08:36:10.652728 4881 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ovsdbserver-nb,Image:quay.io/podified-antelope-centos9/openstack-ovn-nb-db-server:current-podified,Command:[/usr/bin/dumb-init],Args:[/usr/local/bin/container-scripts/setup.sh],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:nbch9bh598h56ch58bh5fdh78h567h649h664h5h65dh5dh549h64bh564hdchcbh64hb5h5b4h55bhfdh55bhcch654h5f6h64h79h5f6h689h644q,ValueFrom:nil,},EnvVar{Name:OVN_LOGDIR,Value:/tmp,ValueFrom:nil,},EnvVar{Name:OVN_RUNDIR,Value:/tmp,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/usr/local/bin/container-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ovndbcluster-nb-etc-ovn,ReadOnly:false,MountPath:/etc/ovn,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ovsdb-rundir,ReadOnly:false,MountPath:/tmp,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ovsdbserver-nb-tls-certs,ReadOnly:true,MountPath:/etc/pki/tls/certs/ovndb.crt,SubPath:tls.crt,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ovsdbserver-nb-tls-certs,ReadOnly:true,MountPath:/etc/pki/tls/private/ovndb.key,SubPath:tls.key,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ovsdbserver-nb-tls-certs,ReadOnly:true,MountPath:/etc/pki/tls/certs/ovndbca.crt,SubPath:ca.crt,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-6l5f6,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/pidof ovsdb-server],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:3,TimeoutSeconds:5,PeriodSeconds:3,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/pidof ovsdb-server],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:&Lifecycle{PostStart:nil,PreStop:&LifecycleHandler{Exec:&ExecAction{Command:[/usr/local/bin/container-scripts/cleanup.sh],},HTTPGet:nil,TCPSocket:nil,Sleep:nil,},},TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/pidof ovsdb-server],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:3,TimeoutSeconds:5,PeriodSeconds:3,SuccessThreshold:1,FailureThreshold:20,TerminationGracePeriodSeconds:nil,},ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ovsdbserver-nb-0_openstack(7f0aa090-3aac-4da8-9efa-a31a7b3b130f): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 11 08:36:10 crc kubenswrapper[4881]: I1211 08:36:10.737929 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-vkxg2" Dec 11 08:36:10 crc kubenswrapper[4881]: I1211 08:36:10.861070 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-t9zr6\" (UniqueName: \"kubernetes.io/projected/ee76bb27-a55c-48e8-a943-712c8f0036f4-kube-api-access-t9zr6\") pod \"ee76bb27-a55c-48e8-a943-712c8f0036f4\" (UID: \"ee76bb27-a55c-48e8-a943-712c8f0036f4\") " Dec 11 08:36:10 crc kubenswrapper[4881]: I1211 08:36:10.861222 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ee76bb27-a55c-48e8-a943-712c8f0036f4-config\") pod \"ee76bb27-a55c-48e8-a943-712c8f0036f4\" (UID: \"ee76bb27-a55c-48e8-a943-712c8f0036f4\") " Dec 11 08:36:10 crc kubenswrapper[4881]: I1211 08:36:10.861243 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ee76bb27-a55c-48e8-a943-712c8f0036f4-dns-svc\") pod \"ee76bb27-a55c-48e8-a943-712c8f0036f4\" (UID: \"ee76bb27-a55c-48e8-a943-712c8f0036f4\") " Dec 11 08:36:10 crc kubenswrapper[4881]: I1211 08:36:10.861939 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ee76bb27-a55c-48e8-a943-712c8f0036f4-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "ee76bb27-a55c-48e8-a943-712c8f0036f4" (UID: "ee76bb27-a55c-48e8-a943-712c8f0036f4"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:36:10 crc kubenswrapper[4881]: I1211 08:36:10.861961 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ee76bb27-a55c-48e8-a943-712c8f0036f4-config" (OuterVolumeSpecName: "config") pod "ee76bb27-a55c-48e8-a943-712c8f0036f4" (UID: "ee76bb27-a55c-48e8-a943-712c8f0036f4"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:36:10 crc kubenswrapper[4881]: I1211 08:36:10.862615 4881 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ee76bb27-a55c-48e8-a943-712c8f0036f4-config\") on node \"crc\" DevicePath \"\"" Dec 11 08:36:10 crc kubenswrapper[4881]: I1211 08:36:10.862630 4881 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ee76bb27-a55c-48e8-a943-712c8f0036f4-dns-svc\") on node \"crc\" DevicePath \"\"" Dec 11 08:36:10 crc kubenswrapper[4881]: I1211 08:36:10.865054 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ee76bb27-a55c-48e8-a943-712c8f0036f4-kube-api-access-t9zr6" (OuterVolumeSpecName: "kube-api-access-t9zr6") pod "ee76bb27-a55c-48e8-a943-712c8f0036f4" (UID: "ee76bb27-a55c-48e8-a943-712c8f0036f4"). InnerVolumeSpecName "kube-api-access-t9zr6". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:36:10 crc kubenswrapper[4881]: I1211 08:36:10.965603 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-t9zr6\" (UniqueName: \"kubernetes.io/projected/ee76bb27-a55c-48e8-a943-712c8f0036f4-kube-api-access-t9zr6\") on node \"crc\" DevicePath \"\"" Dec 11 08:36:10 crc kubenswrapper[4881]: I1211 08:36:10.997378 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-vkxg2" Dec 11 08:36:10 crc kubenswrapper[4881]: I1211 08:36:10.997591 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-vkxg2" event={"ID":"ee76bb27-a55c-48e8-a943-712c8f0036f4","Type":"ContainerDied","Data":"08c3f264a3d428de49ff45df482764969a7a0852727698b0204cb3153fbed710"} Dec 11 08:36:10 crc kubenswrapper[4881]: E1211 08:36:10.999292 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovn-controller\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified\\\"\"" pod="openstack/ovn-controller-xfltd" podUID="49d6015f-9f76-4e77-821e-2a11887e497c" Dec 11 08:36:11 crc kubenswrapper[4881]: I1211 08:36:11.095839 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-vkxg2"] Dec 11 08:36:11 crc kubenswrapper[4881]: I1211 08:36:11.106515 4881 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-vkxg2"] Dec 11 08:36:11 crc kubenswrapper[4881]: I1211 08:36:11.233586 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7cb5889db5-5tjbs"] Dec 11 08:36:11 crc kubenswrapper[4881]: I1211 08:36:11.516471 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-ring-rebalance-qt8lj"] Dec 11 08:36:11 crc kubenswrapper[4881]: E1211 08:36:11.585999 4881 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.k8s.io/kube-state-metrics/kube-state-metrics:v2.15.0" Dec 11 08:36:11 crc kubenswrapper[4881]: E1211 08:36:11.586242 4881 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.k8s.io/kube-state-metrics/kube-state-metrics:v2.15.0" Dec 11 08:36:11 crc kubenswrapper[4881]: E1211 08:36:11.586406 4881 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-state-metrics,Image:registry.k8s.io/kube-state-metrics/kube-state-metrics:v2.15.0,Command:[],Args:[--resources=pods --namespaces=openstack],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:http-metrics,HostPort:0,ContainerPort:8080,Protocol:TCP,HostIP:,},ContainerPort{Name:telemetry,HostPort:0,ContainerPort:8081,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-h8f96,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/livez,Port:{0 8080 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:5,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:5,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:*true,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod kube-state-metrics-0_openstack(558f097f-277d-4824-bafc-28c4c0f139f3): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Dec 11 08:36:11 crc kubenswrapper[4881]: E1211 08:36:11.590371 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-state-metrics\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openstack/kube-state-metrics-0" podUID="558f097f-277d-4824-bafc-28c4c0f139f3" Dec 11 08:36:11 crc kubenswrapper[4881]: W1211 08:36:11.609702 4881 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd40e3cbd_c017_4b42_94ee_dea2565d55a3.slice/crio-7387deb0c9b28eaaa6c529f683a2d563ef404ab7ae77646c6e6bb0c763c157b9 WatchSource:0}: Error finding container 7387deb0c9b28eaaa6c529f683a2d563ef404ab7ae77646c6e6bb0c763c157b9: Status 404 returned error can't find the container with id 7387deb0c9b28eaaa6c529f683a2d563ef404ab7ae77646c6e6bb0c763c157b9 Dec 11 08:36:11 crc kubenswrapper[4881]: W1211 08:36:11.611360 4881 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode1cbf25b_5855_4bdd_8b9a_af1d48ef038d.slice/crio-a6ed656b2f45f5ea056e7f47470fcdeac7bee96b766ad8af245d132d25f0b122 WatchSource:0}: Error finding container a6ed656b2f45f5ea056e7f47470fcdeac7bee96b766ad8af245d132d25f0b122: Status 404 returned error can't find the container with id a6ed656b2f45f5ea056e7f47470fcdeac7bee96b766ad8af245d132d25f0b122 Dec 11 08:36:12 crc kubenswrapper[4881]: I1211 08:36:12.007358 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7cb5889db5-5tjbs" event={"ID":"e1cbf25b-5855-4bdd-8b9a-af1d48ef038d","Type":"ContainerStarted","Data":"a6ed656b2f45f5ea056e7f47470fcdeac7bee96b766ad8af245d132d25f0b122"} Dec 11 08:36:12 crc kubenswrapper[4881]: I1211 08:36:12.009626 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-qt8lj" event={"ID":"d40e3cbd-c017-4b42-94ee-dea2565d55a3","Type":"ContainerStarted","Data":"7387deb0c9b28eaaa6c529f683a2d563ef404ab7ae77646c6e6bb0c763c157b9"} Dec 11 08:36:12 crc kubenswrapper[4881]: E1211 08:36:12.013133 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-state-metrics\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.k8s.io/kube-state-metrics/kube-state-metrics:v2.15.0\\\"\"" pod="openstack/kube-state-metrics-0" podUID="558f097f-277d-4824-bafc-28c4c0f139f3" Dec 11 08:36:13 crc kubenswrapper[4881]: I1211 08:36:13.050997 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ee76bb27-a55c-48e8-a943-712c8f0036f4" path="/var/lib/kubelet/pods/ee76bb27-a55c-48e8-a943-712c8f0036f4/volumes" Dec 11 08:36:13 crc kubenswrapper[4881]: I1211 08:36:13.062763 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-5gqh8" event={"ID":"ede1ec9d-4207-4a9c-ba57-3f2037f68632","Type":"ContainerStarted","Data":"fb8090d41cfb4f56b7c49ec4fb0f80a996ec1998c8a8042b374c2247fcf787d2"} Dec 11 08:36:13 crc kubenswrapper[4881]: I1211 08:36:13.084834 4881 generic.go:334] "Generic (PLEG): container finished" podID="5821aceb-9d65-4e6f-940d-2a20117397f6" containerID="d2e7f857235b2bcb1ad66c87925fed80c9ecd182c5a79c243a4af95cd5333196" exitCode=0 Dec 11 08:36:13 crc kubenswrapper[4881]: I1211 08:36:13.084950 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-666b6646f7-5485d" event={"ID":"5821aceb-9d65-4e6f-940d-2a20117397f6","Type":"ContainerDied","Data":"d2e7f857235b2bcb1ad66c87925fed80c9ecd182c5a79c243a4af95cd5333196"} Dec 11 08:36:13 crc kubenswrapper[4881]: I1211 08:36:13.113681 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/observability-ui-dashboards-7d5fb4cbfb-qvsbb" event={"ID":"3c5fa886-8b43-4ef2-9f4b-d4724c4efa56","Type":"ContainerStarted","Data":"df21d8256e01b90f7167bf70c3591b0281319b9daef9d988bd5c0293c02f1c81"} Dec 11 08:36:13 crc kubenswrapper[4881]: I1211 08:36:13.135866 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"70402eec-968d-4ceb-b259-5e2508ee21a0","Type":"ContainerStarted","Data":"12960b46b3efad366ecc5ad6901af182ebd3ad8be893caabdb06d963e05b79af"} Dec 11 08:36:13 crc kubenswrapper[4881]: I1211 08:36:13.172223 4881 generic.go:334] "Generic (PLEG): container finished" podID="e1cbf25b-5855-4bdd-8b9a-af1d48ef038d" containerID="ee58a97bf15ede8164bc20dc2102b2cd3854ce0929b2c2db6b2892101e2c841b" exitCode=0 Dec 11 08:36:13 crc kubenswrapper[4881]: I1211 08:36:13.172299 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7cb5889db5-5tjbs" event={"ID":"e1cbf25b-5855-4bdd-8b9a-af1d48ef038d","Type":"ContainerDied","Data":"ee58a97bf15ede8164bc20dc2102b2cd3854ce0929b2c2db6b2892101e2c841b"} Dec 11 08:36:13 crc kubenswrapper[4881]: I1211 08:36:13.182740 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"b9b67c1c-0e11-4c19-8d1f-6c046375659c","Type":"ContainerStarted","Data":"e41f1662a8b3565d062ff31205a30b93b5df1220f483734b24508ca9b503c701"} Dec 11 08:36:13 crc kubenswrapper[4881]: I1211 08:36:13.194075 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"7a825abb-23ec-4f51-940d-2500da233e14","Type":"ContainerStarted","Data":"993432871bb7f46e4abcca704ad878aef76d0a19c019cf0f6e8bbccb4b833128"} Dec 11 08:36:13 crc kubenswrapper[4881]: I1211 08:36:13.249248 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/observability-ui-dashboards-7d5fb4cbfb-qvsbb" podStartSLOduration=36.533232767 podStartE2EDuration="45.249229424s" podCreationTimestamp="2025-12-11 08:35:28 +0000 UTC" firstStartedPulling="2025-12-11 08:35:50.355813879 +0000 UTC m=+1198.733182566" lastFinishedPulling="2025-12-11 08:35:59.071810526 +0000 UTC m=+1207.449179223" observedRunningTime="2025-12-11 08:36:13.218658788 +0000 UTC m=+1221.596027485" watchObservedRunningTime="2025-12-11 08:36:13.249229424 +0000 UTC m=+1221.626598121" Dec 11 08:36:13 crc kubenswrapper[4881]: I1211 08:36:13.357901 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstack-cell1-galera-0" podStartSLOduration=25.988547482 podStartE2EDuration="49.357878634s" podCreationTimestamp="2025-12-11 08:35:24 +0000 UTC" firstStartedPulling="2025-12-11 08:35:26.729701827 +0000 UTC m=+1175.107070534" lastFinishedPulling="2025-12-11 08:35:50.099032989 +0000 UTC m=+1198.476401686" observedRunningTime="2025-12-11 08:36:13.323792131 +0000 UTC m=+1221.701160858" watchObservedRunningTime="2025-12-11 08:36:13.357878634 +0000 UTC m=+1221.735247331" Dec 11 08:36:14 crc kubenswrapper[4881]: I1211 08:36:14.208045 4881 generic.go:334] "Generic (PLEG): container finished" podID="ede1ec9d-4207-4a9c-ba57-3f2037f68632" containerID="fb8090d41cfb4f56b7c49ec4fb0f80a996ec1998c8a8042b374c2247fcf787d2" exitCode=0 Dec 11 08:36:14 crc kubenswrapper[4881]: I1211 08:36:14.208234 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-5gqh8" event={"ID":"ede1ec9d-4207-4a9c-ba57-3f2037f68632","Type":"ContainerDied","Data":"fb8090d41cfb4f56b7c49ec4fb0f80a996ec1998c8a8042b374c2247fcf787d2"} Dec 11 08:36:15 crc kubenswrapper[4881]: I1211 08:36:15.224613 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"1206112a-8438-4dcf-9cad-a3e38790a344","Type":"ContainerStarted","Data":"aacb4d3d3e7abb2a614eb9a8d21b36b2a3df4e493512d61d72fa8e2f8536328b"} Dec 11 08:36:15 crc kubenswrapper[4881]: I1211 08:36:15.264595 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-metrics-rmmgk"] Dec 11 08:36:15 crc kubenswrapper[4881]: I1211 08:36:15.266627 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-rmmgk" Dec 11 08:36:15 crc kubenswrapper[4881]: I1211 08:36:15.268476 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-metrics-config" Dec 11 08:36:15 crc kubenswrapper[4881]: I1211 08:36:15.360105 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-metrics-rmmgk"] Dec 11 08:36:15 crc kubenswrapper[4881]: E1211 08:36:15.435450 4881 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Dec 11 08:36:15 crc kubenswrapper[4881]: E1211 08:36:15.435480 4881 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Dec 11 08:36:15 crc kubenswrapper[4881]: E1211 08:36:15.435532 4881 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/1af483c2-ea3f-45cd-971d-797c06f5c6e2-etc-swift podName:1af483c2-ea3f-45cd-971d-797c06f5c6e2 nodeName:}" failed. No retries permitted until 2025-12-11 08:36:31.435515655 +0000 UTC m=+1239.812884352 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/1af483c2-ea3f-45cd-971d-797c06f5c6e2-etc-swift") pod "swift-storage-0" (UID: "1af483c2-ea3f-45cd-971d-797c06f5c6e2") : configmap "swift-ring-files" not found Dec 11 08:36:15 crc kubenswrapper[4881]: I1211 08:36:15.435321 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/1af483c2-ea3f-45cd-971d-797c06f5c6e2-etc-swift\") pod \"swift-storage-0\" (UID: \"1af483c2-ea3f-45cd-971d-797c06f5c6e2\") " pod="openstack/swift-storage-0" Dec 11 08:36:15 crc kubenswrapper[4881]: I1211 08:36:15.436065 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/cec096de-5459-4769-9e87-9a3f54d3e8dc-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-rmmgk\" (UID: \"cec096de-5459-4769-9e87-9a3f54d3e8dc\") " pod="openstack/ovn-controller-metrics-rmmgk" Dec 11 08:36:15 crc kubenswrapper[4881]: I1211 08:36:15.436107 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cec096de-5459-4769-9e87-9a3f54d3e8dc-config\") pod \"ovn-controller-metrics-rmmgk\" (UID: \"cec096de-5459-4769-9e87-9a3f54d3e8dc\") " pod="openstack/ovn-controller-metrics-rmmgk" Dec 11 08:36:15 crc kubenswrapper[4881]: I1211 08:36:15.436163 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/cec096de-5459-4769-9e87-9a3f54d3e8dc-ovn-rundir\") pod \"ovn-controller-metrics-rmmgk\" (UID: \"cec096de-5459-4769-9e87-9a3f54d3e8dc\") " pod="openstack/ovn-controller-metrics-rmmgk" Dec 11 08:36:15 crc kubenswrapper[4881]: I1211 08:36:15.436182 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cec096de-5459-4769-9e87-9a3f54d3e8dc-combined-ca-bundle\") pod \"ovn-controller-metrics-rmmgk\" (UID: \"cec096de-5459-4769-9e87-9a3f54d3e8dc\") " pod="openstack/ovn-controller-metrics-rmmgk" Dec 11 08:36:15 crc kubenswrapper[4881]: I1211 08:36:15.436282 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rfchs\" (UniqueName: \"kubernetes.io/projected/cec096de-5459-4769-9e87-9a3f54d3e8dc-kube-api-access-rfchs\") pod \"ovn-controller-metrics-rmmgk\" (UID: \"cec096de-5459-4769-9e87-9a3f54d3e8dc\") " pod="openstack/ovn-controller-metrics-rmmgk" Dec 11 08:36:15 crc kubenswrapper[4881]: I1211 08:36:15.436368 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/cec096de-5459-4769-9e87-9a3f54d3e8dc-ovs-rundir\") pod \"ovn-controller-metrics-rmmgk\" (UID: \"cec096de-5459-4769-9e87-9a3f54d3e8dc\") " pod="openstack/ovn-controller-metrics-rmmgk" Dec 11 08:36:15 crc kubenswrapper[4881]: I1211 08:36:15.453873 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-5485d"] Dec 11 08:36:15 crc kubenswrapper[4881]: I1211 08:36:15.483781 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-74f6f696b9-kfh6q"] Dec 11 08:36:15 crc kubenswrapper[4881]: I1211 08:36:15.485741 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-74f6f696b9-kfh6q" Dec 11 08:36:15 crc kubenswrapper[4881]: I1211 08:36:15.489835 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-nb" Dec 11 08:36:15 crc kubenswrapper[4881]: I1211 08:36:15.520399 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-74f6f696b9-kfh6q"] Dec 11 08:36:15 crc kubenswrapper[4881]: I1211 08:36:15.540494 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rfchs\" (UniqueName: \"kubernetes.io/projected/cec096de-5459-4769-9e87-9a3f54d3e8dc-kube-api-access-rfchs\") pod \"ovn-controller-metrics-rmmgk\" (UID: \"cec096de-5459-4769-9e87-9a3f54d3e8dc\") " pod="openstack/ovn-controller-metrics-rmmgk" Dec 11 08:36:15 crc kubenswrapper[4881]: I1211 08:36:15.540583 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/cec096de-5459-4769-9e87-9a3f54d3e8dc-ovs-rundir\") pod \"ovn-controller-metrics-rmmgk\" (UID: \"cec096de-5459-4769-9e87-9a3f54d3e8dc\") " pod="openstack/ovn-controller-metrics-rmmgk" Dec 11 08:36:15 crc kubenswrapper[4881]: I1211 08:36:15.540701 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/cec096de-5459-4769-9e87-9a3f54d3e8dc-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-rmmgk\" (UID: \"cec096de-5459-4769-9e87-9a3f54d3e8dc\") " pod="openstack/ovn-controller-metrics-rmmgk" Dec 11 08:36:15 crc kubenswrapper[4881]: I1211 08:36:15.540744 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cec096de-5459-4769-9e87-9a3f54d3e8dc-config\") pod \"ovn-controller-metrics-rmmgk\" (UID: \"cec096de-5459-4769-9e87-9a3f54d3e8dc\") " pod="openstack/ovn-controller-metrics-rmmgk" Dec 11 08:36:15 crc kubenswrapper[4881]: I1211 08:36:15.540774 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/cec096de-5459-4769-9e87-9a3f54d3e8dc-ovn-rundir\") pod \"ovn-controller-metrics-rmmgk\" (UID: \"cec096de-5459-4769-9e87-9a3f54d3e8dc\") " pod="openstack/ovn-controller-metrics-rmmgk" Dec 11 08:36:15 crc kubenswrapper[4881]: I1211 08:36:15.540798 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cec096de-5459-4769-9e87-9a3f54d3e8dc-combined-ca-bundle\") pod \"ovn-controller-metrics-rmmgk\" (UID: \"cec096de-5459-4769-9e87-9a3f54d3e8dc\") " pod="openstack/ovn-controller-metrics-rmmgk" Dec 11 08:36:15 crc kubenswrapper[4881]: I1211 08:36:15.542549 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/cec096de-5459-4769-9e87-9a3f54d3e8dc-ovs-rundir\") pod \"ovn-controller-metrics-rmmgk\" (UID: \"cec096de-5459-4769-9e87-9a3f54d3e8dc\") " pod="openstack/ovn-controller-metrics-rmmgk" Dec 11 08:36:15 crc kubenswrapper[4881]: I1211 08:36:15.542857 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/cec096de-5459-4769-9e87-9a3f54d3e8dc-ovn-rundir\") pod \"ovn-controller-metrics-rmmgk\" (UID: \"cec096de-5459-4769-9e87-9a3f54d3e8dc\") " pod="openstack/ovn-controller-metrics-rmmgk" Dec 11 08:36:15 crc kubenswrapper[4881]: I1211 08:36:15.542950 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cec096de-5459-4769-9e87-9a3f54d3e8dc-config\") pod \"ovn-controller-metrics-rmmgk\" (UID: \"cec096de-5459-4769-9e87-9a3f54d3e8dc\") " pod="openstack/ovn-controller-metrics-rmmgk" Dec 11 08:36:15 crc kubenswrapper[4881]: I1211 08:36:15.547050 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cec096de-5459-4769-9e87-9a3f54d3e8dc-combined-ca-bundle\") pod \"ovn-controller-metrics-rmmgk\" (UID: \"cec096de-5459-4769-9e87-9a3f54d3e8dc\") " pod="openstack/ovn-controller-metrics-rmmgk" Dec 11 08:36:15 crc kubenswrapper[4881]: I1211 08:36:15.548749 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/cec096de-5459-4769-9e87-9a3f54d3e8dc-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-rmmgk\" (UID: \"cec096de-5459-4769-9e87-9a3f54d3e8dc\") " pod="openstack/ovn-controller-metrics-rmmgk" Dec 11 08:36:15 crc kubenswrapper[4881]: I1211 08:36:15.580741 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rfchs\" (UniqueName: \"kubernetes.io/projected/cec096de-5459-4769-9e87-9a3f54d3e8dc-kube-api-access-rfchs\") pod \"ovn-controller-metrics-rmmgk\" (UID: \"cec096de-5459-4769-9e87-9a3f54d3e8dc\") " pod="openstack/ovn-controller-metrics-rmmgk" Dec 11 08:36:15 crc kubenswrapper[4881]: I1211 08:36:15.628007 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7cb5889db5-5tjbs"] Dec 11 08:36:15 crc kubenswrapper[4881]: I1211 08:36:15.658900 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0586421b-6bd9-4994-a95d-fbe21a506e46-dns-svc\") pod \"dnsmasq-dns-74f6f696b9-kfh6q\" (UID: \"0586421b-6bd9-4994-a95d-fbe21a506e46\") " pod="openstack/dnsmasq-dns-74f6f696b9-kfh6q" Dec 11 08:36:15 crc kubenswrapper[4881]: I1211 08:36:15.658974 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wx84k\" (UniqueName: \"kubernetes.io/projected/0586421b-6bd9-4994-a95d-fbe21a506e46-kube-api-access-wx84k\") pod \"dnsmasq-dns-74f6f696b9-kfh6q\" (UID: \"0586421b-6bd9-4994-a95d-fbe21a506e46\") " pod="openstack/dnsmasq-dns-74f6f696b9-kfh6q" Dec 11 08:36:15 crc kubenswrapper[4881]: I1211 08:36:15.659052 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/0586421b-6bd9-4994-a95d-fbe21a506e46-ovsdbserver-nb\") pod \"dnsmasq-dns-74f6f696b9-kfh6q\" (UID: \"0586421b-6bd9-4994-a95d-fbe21a506e46\") " pod="openstack/dnsmasq-dns-74f6f696b9-kfh6q" Dec 11 08:36:15 crc kubenswrapper[4881]: I1211 08:36:15.659066 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0586421b-6bd9-4994-a95d-fbe21a506e46-config\") pod \"dnsmasq-dns-74f6f696b9-kfh6q\" (UID: \"0586421b-6bd9-4994-a95d-fbe21a506e46\") " pod="openstack/dnsmasq-dns-74f6f696b9-kfh6q" Dec 11 08:36:15 crc kubenswrapper[4881]: I1211 08:36:15.669437 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-698758b865-mrlfq"] Dec 11 08:36:15 crc kubenswrapper[4881]: I1211 08:36:15.677778 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-698758b865-mrlfq" Dec 11 08:36:15 crc kubenswrapper[4881]: I1211 08:36:15.680236 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-sb" Dec 11 08:36:15 crc kubenswrapper[4881]: I1211 08:36:15.686621 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-rmmgk" Dec 11 08:36:15 crc kubenswrapper[4881]: I1211 08:36:15.691349 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-698758b865-mrlfq"] Dec 11 08:36:15 crc kubenswrapper[4881]: I1211 08:36:15.760588 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/affe9e76-f06d-45d9-93f1-7f00db52c82d-ovsdbserver-nb\") pod \"dnsmasq-dns-698758b865-mrlfq\" (UID: \"affe9e76-f06d-45d9-93f1-7f00db52c82d\") " pod="openstack/dnsmasq-dns-698758b865-mrlfq" Dec 11 08:36:15 crc kubenswrapper[4881]: I1211 08:36:15.760639 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/affe9e76-f06d-45d9-93f1-7f00db52c82d-dns-svc\") pod \"dnsmasq-dns-698758b865-mrlfq\" (UID: \"affe9e76-f06d-45d9-93f1-7f00db52c82d\") " pod="openstack/dnsmasq-dns-698758b865-mrlfq" Dec 11 08:36:15 crc kubenswrapper[4881]: I1211 08:36:15.760685 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0586421b-6bd9-4994-a95d-fbe21a506e46-dns-svc\") pod \"dnsmasq-dns-74f6f696b9-kfh6q\" (UID: \"0586421b-6bd9-4994-a95d-fbe21a506e46\") " pod="openstack/dnsmasq-dns-74f6f696b9-kfh6q" Dec 11 08:36:15 crc kubenswrapper[4881]: I1211 08:36:15.760721 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wx84k\" (UniqueName: \"kubernetes.io/projected/0586421b-6bd9-4994-a95d-fbe21a506e46-kube-api-access-wx84k\") pod \"dnsmasq-dns-74f6f696b9-kfh6q\" (UID: \"0586421b-6bd9-4994-a95d-fbe21a506e46\") " pod="openstack/dnsmasq-dns-74f6f696b9-kfh6q" Dec 11 08:36:15 crc kubenswrapper[4881]: I1211 08:36:15.760779 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/0586421b-6bd9-4994-a95d-fbe21a506e46-ovsdbserver-nb\") pod \"dnsmasq-dns-74f6f696b9-kfh6q\" (UID: \"0586421b-6bd9-4994-a95d-fbe21a506e46\") " pod="openstack/dnsmasq-dns-74f6f696b9-kfh6q" Dec 11 08:36:15 crc kubenswrapper[4881]: I1211 08:36:15.760796 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0586421b-6bd9-4994-a95d-fbe21a506e46-config\") pod \"dnsmasq-dns-74f6f696b9-kfh6q\" (UID: \"0586421b-6bd9-4994-a95d-fbe21a506e46\") " pod="openstack/dnsmasq-dns-74f6f696b9-kfh6q" Dec 11 08:36:15 crc kubenswrapper[4881]: I1211 08:36:15.760818 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/affe9e76-f06d-45d9-93f1-7f00db52c82d-ovsdbserver-sb\") pod \"dnsmasq-dns-698758b865-mrlfq\" (UID: \"affe9e76-f06d-45d9-93f1-7f00db52c82d\") " pod="openstack/dnsmasq-dns-698758b865-mrlfq" Dec 11 08:36:15 crc kubenswrapper[4881]: I1211 08:36:15.760837 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/affe9e76-f06d-45d9-93f1-7f00db52c82d-config\") pod \"dnsmasq-dns-698758b865-mrlfq\" (UID: \"affe9e76-f06d-45d9-93f1-7f00db52c82d\") " pod="openstack/dnsmasq-dns-698758b865-mrlfq" Dec 11 08:36:15 crc kubenswrapper[4881]: I1211 08:36:15.760880 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vj6vb\" (UniqueName: \"kubernetes.io/projected/affe9e76-f06d-45d9-93f1-7f00db52c82d-kube-api-access-vj6vb\") pod \"dnsmasq-dns-698758b865-mrlfq\" (UID: \"affe9e76-f06d-45d9-93f1-7f00db52c82d\") " pod="openstack/dnsmasq-dns-698758b865-mrlfq" Dec 11 08:36:15 crc kubenswrapper[4881]: I1211 08:36:15.761629 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0586421b-6bd9-4994-a95d-fbe21a506e46-dns-svc\") pod \"dnsmasq-dns-74f6f696b9-kfh6q\" (UID: \"0586421b-6bd9-4994-a95d-fbe21a506e46\") " pod="openstack/dnsmasq-dns-74f6f696b9-kfh6q" Dec 11 08:36:15 crc kubenswrapper[4881]: I1211 08:36:15.762377 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/0586421b-6bd9-4994-a95d-fbe21a506e46-ovsdbserver-nb\") pod \"dnsmasq-dns-74f6f696b9-kfh6q\" (UID: \"0586421b-6bd9-4994-a95d-fbe21a506e46\") " pod="openstack/dnsmasq-dns-74f6f696b9-kfh6q" Dec 11 08:36:15 crc kubenswrapper[4881]: I1211 08:36:15.762908 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0586421b-6bd9-4994-a95d-fbe21a506e46-config\") pod \"dnsmasq-dns-74f6f696b9-kfh6q\" (UID: \"0586421b-6bd9-4994-a95d-fbe21a506e46\") " pod="openstack/dnsmasq-dns-74f6f696b9-kfh6q" Dec 11 08:36:15 crc kubenswrapper[4881]: I1211 08:36:15.790167 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wx84k\" (UniqueName: \"kubernetes.io/projected/0586421b-6bd9-4994-a95d-fbe21a506e46-kube-api-access-wx84k\") pod \"dnsmasq-dns-74f6f696b9-kfh6q\" (UID: \"0586421b-6bd9-4994-a95d-fbe21a506e46\") " pod="openstack/dnsmasq-dns-74f6f696b9-kfh6q" Dec 11 08:36:15 crc kubenswrapper[4881]: I1211 08:36:15.795105 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-cell1-galera-0" Dec 11 08:36:15 crc kubenswrapper[4881]: I1211 08:36:15.796086 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-cell1-galera-0" Dec 11 08:36:15 crc kubenswrapper[4881]: I1211 08:36:15.815919 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-74f6f696b9-kfh6q" Dec 11 08:36:15 crc kubenswrapper[4881]: I1211 08:36:15.866292 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/affe9e76-f06d-45d9-93f1-7f00db52c82d-ovsdbserver-sb\") pod \"dnsmasq-dns-698758b865-mrlfq\" (UID: \"affe9e76-f06d-45d9-93f1-7f00db52c82d\") " pod="openstack/dnsmasq-dns-698758b865-mrlfq" Dec 11 08:36:15 crc kubenswrapper[4881]: I1211 08:36:15.866346 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/affe9e76-f06d-45d9-93f1-7f00db52c82d-config\") pod \"dnsmasq-dns-698758b865-mrlfq\" (UID: \"affe9e76-f06d-45d9-93f1-7f00db52c82d\") " pod="openstack/dnsmasq-dns-698758b865-mrlfq" Dec 11 08:36:15 crc kubenswrapper[4881]: I1211 08:36:15.866422 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vj6vb\" (UniqueName: \"kubernetes.io/projected/affe9e76-f06d-45d9-93f1-7f00db52c82d-kube-api-access-vj6vb\") pod \"dnsmasq-dns-698758b865-mrlfq\" (UID: \"affe9e76-f06d-45d9-93f1-7f00db52c82d\") " pod="openstack/dnsmasq-dns-698758b865-mrlfq" Dec 11 08:36:15 crc kubenswrapper[4881]: I1211 08:36:15.866498 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/affe9e76-f06d-45d9-93f1-7f00db52c82d-ovsdbserver-nb\") pod \"dnsmasq-dns-698758b865-mrlfq\" (UID: \"affe9e76-f06d-45d9-93f1-7f00db52c82d\") " pod="openstack/dnsmasq-dns-698758b865-mrlfq" Dec 11 08:36:15 crc kubenswrapper[4881]: I1211 08:36:15.866523 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/affe9e76-f06d-45d9-93f1-7f00db52c82d-dns-svc\") pod \"dnsmasq-dns-698758b865-mrlfq\" (UID: \"affe9e76-f06d-45d9-93f1-7f00db52c82d\") " pod="openstack/dnsmasq-dns-698758b865-mrlfq" Dec 11 08:36:15 crc kubenswrapper[4881]: I1211 08:36:15.867551 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/affe9e76-f06d-45d9-93f1-7f00db52c82d-dns-svc\") pod \"dnsmasq-dns-698758b865-mrlfq\" (UID: \"affe9e76-f06d-45d9-93f1-7f00db52c82d\") " pod="openstack/dnsmasq-dns-698758b865-mrlfq" Dec 11 08:36:15 crc kubenswrapper[4881]: I1211 08:36:15.868372 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/affe9e76-f06d-45d9-93f1-7f00db52c82d-ovsdbserver-sb\") pod \"dnsmasq-dns-698758b865-mrlfq\" (UID: \"affe9e76-f06d-45d9-93f1-7f00db52c82d\") " pod="openstack/dnsmasq-dns-698758b865-mrlfq" Dec 11 08:36:15 crc kubenswrapper[4881]: I1211 08:36:15.876888 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/affe9e76-f06d-45d9-93f1-7f00db52c82d-ovsdbserver-nb\") pod \"dnsmasq-dns-698758b865-mrlfq\" (UID: \"affe9e76-f06d-45d9-93f1-7f00db52c82d\") " pod="openstack/dnsmasq-dns-698758b865-mrlfq" Dec 11 08:36:15 crc kubenswrapper[4881]: I1211 08:36:15.877639 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/affe9e76-f06d-45d9-93f1-7f00db52c82d-config\") pod \"dnsmasq-dns-698758b865-mrlfq\" (UID: \"affe9e76-f06d-45d9-93f1-7f00db52c82d\") " pod="openstack/dnsmasq-dns-698758b865-mrlfq" Dec 11 08:36:15 crc kubenswrapper[4881]: I1211 08:36:15.885102 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vj6vb\" (UniqueName: \"kubernetes.io/projected/affe9e76-f06d-45d9-93f1-7f00db52c82d-kube-api-access-vj6vb\") pod \"dnsmasq-dns-698758b865-mrlfq\" (UID: \"affe9e76-f06d-45d9-93f1-7f00db52c82d\") " pod="openstack/dnsmasq-dns-698758b865-mrlfq" Dec 11 08:36:15 crc kubenswrapper[4881]: I1211 08:36:15.997131 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-698758b865-mrlfq" Dec 11 08:36:16 crc kubenswrapper[4881]: I1211 08:36:16.252352 4881 generic.go:334] "Generic (PLEG): container finished" podID="7a825abb-23ec-4f51-940d-2500da233e14" containerID="993432871bb7f46e4abcca704ad878aef76d0a19c019cf0f6e8bbccb4b833128" exitCode=0 Dec 11 08:36:16 crc kubenswrapper[4881]: I1211 08:36:16.252399 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"7a825abb-23ec-4f51-940d-2500da233e14","Type":"ContainerDied","Data":"993432871bb7f46e4abcca704ad878aef76d0a19c019cf0f6e8bbccb4b833128"} Dec 11 08:36:18 crc kubenswrapper[4881]: I1211 08:36:18.451399 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-74f6f696b9-kfh6q"] Dec 11 08:36:18 crc kubenswrapper[4881]: I1211 08:36:18.482988 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-metrics-rmmgk"] Dec 11 08:36:18 crc kubenswrapper[4881]: I1211 08:36:18.498692 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-698758b865-mrlfq"] Dec 11 08:36:18 crc kubenswrapper[4881]: E1211 08:36:18.780735 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovsdbserver-nb\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/ovsdbserver-nb-0" podUID="7f0aa090-3aac-4da8-9efa-a31a7b3b130f" Dec 11 08:36:19 crc kubenswrapper[4881]: I1211 08:36:19.321890 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"b9b67c1c-0e11-4c19-8d1f-6c046375659c","Type":"ContainerStarted","Data":"5ffc67ee4914a581b5d08a84260d1a261e5c3aa1336449b18a1e53c5b8d4a5c4"} Dec 11 08:36:19 crc kubenswrapper[4881]: I1211 08:36:19.325083 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"7a825abb-23ec-4f51-940d-2500da233e14","Type":"ContainerStarted","Data":"a86d6c3642d4303480e021bebc7ea4e905f1215b34e8ffae8f9d2440975f8215"} Dec 11 08:36:19 crc kubenswrapper[4881]: I1211 08:36:19.327629 4881 generic.go:334] "Generic (PLEG): container finished" podID="affe9e76-f06d-45d9-93f1-7f00db52c82d" containerID="4aa7ebcf4c4847b75bbf85fad95bf72a0d1ec3c082947285051dedd0d1e05fef" exitCode=0 Dec 11 08:36:19 crc kubenswrapper[4881]: I1211 08:36:19.327701 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-698758b865-mrlfq" event={"ID":"affe9e76-f06d-45d9-93f1-7f00db52c82d","Type":"ContainerDied","Data":"4aa7ebcf4c4847b75bbf85fad95bf72a0d1ec3c082947285051dedd0d1e05fef"} Dec 11 08:36:19 crc kubenswrapper[4881]: I1211 08:36:19.327719 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-698758b865-mrlfq" event={"ID":"affe9e76-f06d-45d9-93f1-7f00db52c82d","Type":"ContainerStarted","Data":"57dd363b3b978ebe185b042faa69d0e446a8668c24289933bf5c8c69bd1efd93"} Dec 11 08:36:19 crc kubenswrapper[4881]: I1211 08:36:19.329833 4881 generic.go:334] "Generic (PLEG): container finished" podID="0586421b-6bd9-4994-a95d-fbe21a506e46" containerID="44ca4b39d2c447fa4b2810ef15cd2637326ea68559aaa20cdf2bac5ee4290fd6" exitCode=0 Dec 11 08:36:19 crc kubenswrapper[4881]: I1211 08:36:19.329899 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-74f6f696b9-kfh6q" event={"ID":"0586421b-6bd9-4994-a95d-fbe21a506e46","Type":"ContainerDied","Data":"44ca4b39d2c447fa4b2810ef15cd2637326ea68559aaa20cdf2bac5ee4290fd6"} Dec 11 08:36:19 crc kubenswrapper[4881]: I1211 08:36:19.330219 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-74f6f696b9-kfh6q" event={"ID":"0586421b-6bd9-4994-a95d-fbe21a506e46","Type":"ContainerStarted","Data":"00dff6da7f0a4fb9ad98a946fd4333aeb7ade903fd88fbc2338d4a05b3acff63"} Dec 11 08:36:19 crc kubenswrapper[4881]: I1211 08:36:19.332635 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-rmmgk" event={"ID":"cec096de-5459-4769-9e87-9a3f54d3e8dc","Type":"ContainerStarted","Data":"0c4f282d4d986aeea4eedf92f09d9ebee6df1738283fe1b14c6e32ac22cfa2bd"} Dec 11 08:36:19 crc kubenswrapper[4881]: I1211 08:36:19.332692 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-rmmgk" event={"ID":"cec096de-5459-4769-9e87-9a3f54d3e8dc","Type":"ContainerStarted","Data":"67ff68d8831619acec409823ea19d439d3ff2a7ca564cf6a5414a2ebb782445a"} Dec 11 08:36:19 crc kubenswrapper[4881]: I1211 08:36:19.335040 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7cb5889db5-5tjbs" event={"ID":"e1cbf25b-5855-4bdd-8b9a-af1d48ef038d","Type":"ContainerStarted","Data":"afba34d0541f3a617bbd22315ff9717b831464d8a4ba0f8b8017969828cf95d7"} Dec 11 08:36:19 crc kubenswrapper[4881]: I1211 08:36:19.335177 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-7cb5889db5-5tjbs" podUID="e1cbf25b-5855-4bdd-8b9a-af1d48ef038d" containerName="dnsmasq-dns" containerID="cri-o://afba34d0541f3a617bbd22315ff9717b831464d8a4ba0f8b8017969828cf95d7" gracePeriod=10 Dec 11 08:36:19 crc kubenswrapper[4881]: I1211 08:36:19.335215 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-7cb5889db5-5tjbs" Dec 11 08:36:19 crc kubenswrapper[4881]: I1211 08:36:19.341474 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-5gqh8" event={"ID":"ede1ec9d-4207-4a9c-ba57-3f2037f68632","Type":"ContainerStarted","Data":"691c80b8ba18b08ae907f532883748653160dbd02aee177fcc8ca8f593d11f63"} Dec 11 08:36:19 crc kubenswrapper[4881]: I1211 08:36:19.341526 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-5gqh8" event={"ID":"ede1ec9d-4207-4a9c-ba57-3f2037f68632","Type":"ContainerStarted","Data":"55dbecdbe90f10bb6cea9946db64037f463164abcf0f5474361bc716f65a1506"} Dec 11 08:36:19 crc kubenswrapper[4881]: I1211 08:36:19.342209 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-ovs-5gqh8" Dec 11 08:36:19 crc kubenswrapper[4881]: I1211 08:36:19.342355 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-ovs-5gqh8" Dec 11 08:36:19 crc kubenswrapper[4881]: I1211 08:36:19.345736 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-666b6646f7-5485d" event={"ID":"5821aceb-9d65-4e6f-940d-2a20117397f6","Type":"ContainerStarted","Data":"300c9cc21d7c99e88f619059b20812e4d22519f3a2fa93be568aaa8a35c666db"} Dec 11 08:36:19 crc kubenswrapper[4881]: I1211 08:36:19.345892 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-666b6646f7-5485d" podUID="5821aceb-9d65-4e6f-940d-2a20117397f6" containerName="dnsmasq-dns" containerID="cri-o://300c9cc21d7c99e88f619059b20812e4d22519f3a2fa93be568aaa8a35c666db" gracePeriod=10 Dec 11 08:36:19 crc kubenswrapper[4881]: I1211 08:36:19.346006 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-666b6646f7-5485d" Dec 11 08:36:19 crc kubenswrapper[4881]: I1211 08:36:19.367374 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"7f0aa090-3aac-4da8-9efa-a31a7b3b130f","Type":"ContainerStarted","Data":"d6d239b13f70dd8d93674b1681a7e881dfd8741836655e89172b131c54bb83e6"} Dec 11 08:36:19 crc kubenswrapper[4881]: I1211 08:36:19.371510 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-sb-0" podStartSLOduration=19.563827795 podStartE2EDuration="46.371494834s" podCreationTimestamp="2025-12-11 08:35:33 +0000 UTC" firstStartedPulling="2025-12-11 08:35:51.100240582 +0000 UTC m=+1199.477609279" lastFinishedPulling="2025-12-11 08:36:17.907907621 +0000 UTC m=+1226.285276318" observedRunningTime="2025-12-11 08:36:19.357880874 +0000 UTC m=+1227.735249641" watchObservedRunningTime="2025-12-11 08:36:19.371494834 +0000 UTC m=+1227.748863541" Dec 11 08:36:19 crc kubenswrapper[4881]: E1211 08:36:19.373810 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovsdbserver-nb\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-ovn-nb-db-server:current-podified\\\"\"" pod="openstack/ovsdbserver-nb-0" podUID="7f0aa090-3aac-4da8-9efa-a31a7b3b130f" Dec 11 08:36:19 crc kubenswrapper[4881]: I1211 08:36:19.378109 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-qt8lj" event={"ID":"d40e3cbd-c017-4b42-94ee-dea2565d55a3","Type":"ContainerStarted","Data":"5e44991ce8c87019f1fa3dc8bcbae2bf02e6db132cee4406f0ce0c273f1f447e"} Dec 11 08:36:19 crc kubenswrapper[4881]: I1211 08:36:19.530591 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-666b6646f7-5485d" podStartSLOduration=9.069435045 podStartE2EDuration="58.530570788s" podCreationTimestamp="2025-12-11 08:35:21 +0000 UTC" firstStartedPulling="2025-12-11 08:35:22.246830392 +0000 UTC m=+1170.624199089" lastFinishedPulling="2025-12-11 08:36:11.707966135 +0000 UTC m=+1220.085334832" observedRunningTime="2025-12-11 08:36:19.466867053 +0000 UTC m=+1227.844235770" watchObservedRunningTime="2025-12-11 08:36:19.530570788 +0000 UTC m=+1227.907939485" Dec 11 08:36:19 crc kubenswrapper[4881]: I1211 08:36:19.546547 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-metrics-rmmgk" podStartSLOduration=4.546527687 podStartE2EDuration="4.546527687s" podCreationTimestamp="2025-12-11 08:36:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 08:36:19.497786227 +0000 UTC m=+1227.875154924" watchObservedRunningTime="2025-12-11 08:36:19.546527687 +0000 UTC m=+1227.923896384" Dec 11 08:36:19 crc kubenswrapper[4881]: I1211 08:36:19.552244 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstack-galera-0" podStartSLOduration=-9223371980.302551 podStartE2EDuration="56.552225061s" podCreationTimestamp="2025-12-11 08:35:23 +0000 UTC" firstStartedPulling="2025-12-11 08:35:25.468631066 +0000 UTC m=+1173.845999763" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 08:36:19.530112257 +0000 UTC m=+1227.907480964" watchObservedRunningTime="2025-12-11 08:36:19.552225061 +0000 UTC m=+1227.929593758" Dec 11 08:36:19 crc kubenswrapper[4881]: I1211 08:36:19.565896 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-7cb5889db5-5tjbs" podStartSLOduration=20.737804154 podStartE2EDuration="21.565874742s" podCreationTimestamp="2025-12-11 08:35:58 +0000 UTC" firstStartedPulling="2025-12-11 08:36:11.618452204 +0000 UTC m=+1219.995820911" lastFinishedPulling="2025-12-11 08:36:12.446522802 +0000 UTC m=+1220.823891499" observedRunningTime="2025-12-11 08:36:19.555461962 +0000 UTC m=+1227.932830669" watchObservedRunningTime="2025-12-11 08:36:19.565874742 +0000 UTC m=+1227.943243439" Dec 11 08:36:19 crc kubenswrapper[4881]: I1211 08:36:19.592641 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-ovs-5gqh8" podStartSLOduration=28.481847108 podStartE2EDuration="47.592618352s" podCreationTimestamp="2025-12-11 08:35:32 +0000 UTC" firstStartedPulling="2025-12-11 08:35:51.677918429 +0000 UTC m=+1200.055287126" lastFinishedPulling="2025-12-11 08:36:10.788689673 +0000 UTC m=+1219.166058370" observedRunningTime="2025-12-11 08:36:19.586734604 +0000 UTC m=+1227.964103311" watchObservedRunningTime="2025-12-11 08:36:19.592618352 +0000 UTC m=+1227.969987049" Dec 11 08:36:19 crc kubenswrapper[4881]: I1211 08:36:19.620299 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-ring-rebalance-qt8lj" podStartSLOduration=13.375424754 podStartE2EDuration="19.620275364s" podCreationTimestamp="2025-12-11 08:36:00 +0000 UTC" firstStartedPulling="2025-12-11 08:36:11.618325171 +0000 UTC m=+1219.995693868" lastFinishedPulling="2025-12-11 08:36:17.863175781 +0000 UTC m=+1226.240544478" observedRunningTime="2025-12-11 08:36:19.609915465 +0000 UTC m=+1227.987284162" watchObservedRunningTime="2025-12-11 08:36:19.620275364 +0000 UTC m=+1227.997644051" Dec 11 08:36:20 crc kubenswrapper[4881]: I1211 08:36:20.017882 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-cell1-galera-0" Dec 11 08:36:20 crc kubenswrapper[4881]: I1211 08:36:20.094720 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-cell1-galera-0" Dec 11 08:36:20 crc kubenswrapper[4881]: I1211 08:36:20.152605 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-sb-0" Dec 11 08:36:20 crc kubenswrapper[4881]: I1211 08:36:20.152657 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-sb-0" Dec 11 08:36:20 crc kubenswrapper[4881]: I1211 08:36:20.208714 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-sb-0" Dec 11 08:36:20 crc kubenswrapper[4881]: I1211 08:36:20.387428 4881 generic.go:334] "Generic (PLEG): container finished" podID="e1cbf25b-5855-4bdd-8b9a-af1d48ef038d" containerID="afba34d0541f3a617bbd22315ff9717b831464d8a4ba0f8b8017969828cf95d7" exitCode=0 Dec 11 08:36:20 crc kubenswrapper[4881]: I1211 08:36:20.387498 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7cb5889db5-5tjbs" event={"ID":"e1cbf25b-5855-4bdd-8b9a-af1d48ef038d","Type":"ContainerDied","Data":"afba34d0541f3a617bbd22315ff9717b831464d8a4ba0f8b8017969828cf95d7"} Dec 11 08:36:20 crc kubenswrapper[4881]: I1211 08:36:20.389570 4881 generic.go:334] "Generic (PLEG): container finished" podID="5821aceb-9d65-4e6f-940d-2a20117397f6" containerID="300c9cc21d7c99e88f619059b20812e4d22519f3a2fa93be568aaa8a35c666db" exitCode=0 Dec 11 08:36:20 crc kubenswrapper[4881]: I1211 08:36:20.389727 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-666b6646f7-5485d" event={"ID":"5821aceb-9d65-4e6f-940d-2a20117397f6","Type":"ContainerDied","Data":"300c9cc21d7c99e88f619059b20812e4d22519f3a2fa93be568aaa8a35c666db"} Dec 11 08:36:20 crc kubenswrapper[4881]: E1211 08:36:20.394780 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovsdbserver-nb\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-ovn-nb-db-server:current-podified\\\"\"" pod="openstack/ovsdbserver-nb-0" podUID="7f0aa090-3aac-4da8-9efa-a31a7b3b130f" Dec 11 08:36:20 crc kubenswrapper[4881]: I1211 08:36:20.435351 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-sb-0" Dec 11 08:36:22 crc kubenswrapper[4881]: I1211 08:36:22.409509 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-666b6646f7-5485d" event={"ID":"5821aceb-9d65-4e6f-940d-2a20117397f6","Type":"ContainerDied","Data":"deaa8d121fc24b0dee390ec6f33b93af21996dd0190bc540f1a4a92964bfdb1a"} Dec 11 08:36:22 crc kubenswrapper[4881]: I1211 08:36:22.409838 4881 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="deaa8d121fc24b0dee390ec6f33b93af21996dd0190bc540f1a4a92964bfdb1a" Dec 11 08:36:22 crc kubenswrapper[4881]: I1211 08:36:22.411629 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-698758b865-mrlfq" event={"ID":"affe9e76-f06d-45d9-93f1-7f00db52c82d","Type":"ContainerStarted","Data":"bb06310dc7df1e3b19e56273aa9d843f75d5abd6c1956b6aebf39d0899fa3217"} Dec 11 08:36:22 crc kubenswrapper[4881]: I1211 08:36:22.411785 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-698758b865-mrlfq" Dec 11 08:36:22 crc kubenswrapper[4881]: I1211 08:36:22.415175 4881 generic.go:334] "Generic (PLEG): container finished" podID="1206112a-8438-4dcf-9cad-a3e38790a344" containerID="aacb4d3d3e7abb2a614eb9a8d21b36b2a3df4e493512d61d72fa8e2f8536328b" exitCode=0 Dec 11 08:36:22 crc kubenswrapper[4881]: I1211 08:36:22.415271 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"1206112a-8438-4dcf-9cad-a3e38790a344","Type":"ContainerDied","Data":"aacb4d3d3e7abb2a614eb9a8d21b36b2a3df4e493512d61d72fa8e2f8536328b"} Dec 11 08:36:22 crc kubenswrapper[4881]: I1211 08:36:22.417786 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-74f6f696b9-kfh6q" event={"ID":"0586421b-6bd9-4994-a95d-fbe21a506e46","Type":"ContainerStarted","Data":"0cd756bbcccd27b2b38b0b92093d46082c45e1cd37e7e18f1b573fbae37584bc"} Dec 11 08:36:22 crc kubenswrapper[4881]: I1211 08:36:22.418546 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-74f6f696b9-kfh6q" Dec 11 08:36:22 crc kubenswrapper[4881]: I1211 08:36:22.422486 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7cb5889db5-5tjbs" event={"ID":"e1cbf25b-5855-4bdd-8b9a-af1d48ef038d","Type":"ContainerDied","Data":"a6ed656b2f45f5ea056e7f47470fcdeac7bee96b766ad8af245d132d25f0b122"} Dec 11 08:36:22 crc kubenswrapper[4881]: I1211 08:36:22.422527 4881 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a6ed656b2f45f5ea056e7f47470fcdeac7bee96b766ad8af245d132d25f0b122" Dec 11 08:36:22 crc kubenswrapper[4881]: I1211 08:36:22.454980 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-666b6646f7-5485d" Dec 11 08:36:22 crc kubenswrapper[4881]: I1211 08:36:22.466851 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7cb5889db5-5tjbs" Dec 11 08:36:22 crc kubenswrapper[4881]: I1211 08:36:22.473096 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-698758b865-mrlfq" podStartSLOduration=7.473073918 podStartE2EDuration="7.473073918s" podCreationTimestamp="2025-12-11 08:36:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 08:36:22.437194479 +0000 UTC m=+1230.814563186" watchObservedRunningTime="2025-12-11 08:36:22.473073918 +0000 UTC m=+1230.850442615" Dec 11 08:36:22 crc kubenswrapper[4881]: I1211 08:36:22.493476 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-74f6f696b9-kfh6q" podStartSLOduration=7.493456668 podStartE2EDuration="7.493456668s" podCreationTimestamp="2025-12-11 08:36:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 08:36:22.485117159 +0000 UTC m=+1230.862485856" watchObservedRunningTime="2025-12-11 08:36:22.493456668 +0000 UTC m=+1230.870825365" Dec 11 08:36:22 crc kubenswrapper[4881]: I1211 08:36:22.541246 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-g8sl4\" (UniqueName: \"kubernetes.io/projected/5821aceb-9d65-4e6f-940d-2a20117397f6-kube-api-access-g8sl4\") pod \"5821aceb-9d65-4e6f-940d-2a20117397f6\" (UID: \"5821aceb-9d65-4e6f-940d-2a20117397f6\") " Dec 11 08:36:22 crc kubenswrapper[4881]: I1211 08:36:22.541392 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e1cbf25b-5855-4bdd-8b9a-af1d48ef038d-dns-svc\") pod \"e1cbf25b-5855-4bdd-8b9a-af1d48ef038d\" (UID: \"e1cbf25b-5855-4bdd-8b9a-af1d48ef038d\") " Dec 11 08:36:22 crc kubenswrapper[4881]: I1211 08:36:22.541452 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-g9slf\" (UniqueName: \"kubernetes.io/projected/e1cbf25b-5855-4bdd-8b9a-af1d48ef038d-kube-api-access-g9slf\") pod \"e1cbf25b-5855-4bdd-8b9a-af1d48ef038d\" (UID: \"e1cbf25b-5855-4bdd-8b9a-af1d48ef038d\") " Dec 11 08:36:22 crc kubenswrapper[4881]: I1211 08:36:22.541535 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5821aceb-9d65-4e6f-940d-2a20117397f6-config\") pod \"5821aceb-9d65-4e6f-940d-2a20117397f6\" (UID: \"5821aceb-9d65-4e6f-940d-2a20117397f6\") " Dec 11 08:36:22 crc kubenswrapper[4881]: I1211 08:36:22.541630 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/5821aceb-9d65-4e6f-940d-2a20117397f6-dns-svc\") pod \"5821aceb-9d65-4e6f-940d-2a20117397f6\" (UID: \"5821aceb-9d65-4e6f-940d-2a20117397f6\") " Dec 11 08:36:22 crc kubenswrapper[4881]: I1211 08:36:22.541712 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e1cbf25b-5855-4bdd-8b9a-af1d48ef038d-config\") pod \"e1cbf25b-5855-4bdd-8b9a-af1d48ef038d\" (UID: \"e1cbf25b-5855-4bdd-8b9a-af1d48ef038d\") " Dec 11 08:36:22 crc kubenswrapper[4881]: I1211 08:36:22.556498 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5821aceb-9d65-4e6f-940d-2a20117397f6-kube-api-access-g8sl4" (OuterVolumeSpecName: "kube-api-access-g8sl4") pod "5821aceb-9d65-4e6f-940d-2a20117397f6" (UID: "5821aceb-9d65-4e6f-940d-2a20117397f6"). InnerVolumeSpecName "kube-api-access-g8sl4". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:36:22 crc kubenswrapper[4881]: I1211 08:36:22.568389 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e1cbf25b-5855-4bdd-8b9a-af1d48ef038d-kube-api-access-g9slf" (OuterVolumeSpecName: "kube-api-access-g9slf") pod "e1cbf25b-5855-4bdd-8b9a-af1d48ef038d" (UID: "e1cbf25b-5855-4bdd-8b9a-af1d48ef038d"). InnerVolumeSpecName "kube-api-access-g9slf". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:36:22 crc kubenswrapper[4881]: I1211 08:36:22.614302 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e1cbf25b-5855-4bdd-8b9a-af1d48ef038d-config" (OuterVolumeSpecName: "config") pod "e1cbf25b-5855-4bdd-8b9a-af1d48ef038d" (UID: "e1cbf25b-5855-4bdd-8b9a-af1d48ef038d"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:36:22 crc kubenswrapper[4881]: I1211 08:36:22.627674 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e1cbf25b-5855-4bdd-8b9a-af1d48ef038d-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "e1cbf25b-5855-4bdd-8b9a-af1d48ef038d" (UID: "e1cbf25b-5855-4bdd-8b9a-af1d48ef038d"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:36:22 crc kubenswrapper[4881]: I1211 08:36:22.634779 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5821aceb-9d65-4e6f-940d-2a20117397f6-config" (OuterVolumeSpecName: "config") pod "5821aceb-9d65-4e6f-940d-2a20117397f6" (UID: "5821aceb-9d65-4e6f-940d-2a20117397f6"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:36:22 crc kubenswrapper[4881]: I1211 08:36:22.635460 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5821aceb-9d65-4e6f-940d-2a20117397f6-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "5821aceb-9d65-4e6f-940d-2a20117397f6" (UID: "5821aceb-9d65-4e6f-940d-2a20117397f6"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:36:22 crc kubenswrapper[4881]: I1211 08:36:22.644884 4881 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e1cbf25b-5855-4bdd-8b9a-af1d48ef038d-config\") on node \"crc\" DevicePath \"\"" Dec 11 08:36:22 crc kubenswrapper[4881]: I1211 08:36:22.644916 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-g8sl4\" (UniqueName: \"kubernetes.io/projected/5821aceb-9d65-4e6f-940d-2a20117397f6-kube-api-access-g8sl4\") on node \"crc\" DevicePath \"\"" Dec 11 08:36:22 crc kubenswrapper[4881]: I1211 08:36:22.644932 4881 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e1cbf25b-5855-4bdd-8b9a-af1d48ef038d-dns-svc\") on node \"crc\" DevicePath \"\"" Dec 11 08:36:22 crc kubenswrapper[4881]: I1211 08:36:22.644942 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-g9slf\" (UniqueName: \"kubernetes.io/projected/e1cbf25b-5855-4bdd-8b9a-af1d48ef038d-kube-api-access-g9slf\") on node \"crc\" DevicePath \"\"" Dec 11 08:36:22 crc kubenswrapper[4881]: I1211 08:36:22.644953 4881 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5821aceb-9d65-4e6f-940d-2a20117397f6-config\") on node \"crc\" DevicePath \"\"" Dec 11 08:36:22 crc kubenswrapper[4881]: I1211 08:36:22.644963 4881 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/5821aceb-9d65-4e6f-940d-2a20117397f6-dns-svc\") on node \"crc\" DevicePath \"\"" Dec 11 08:36:23 crc kubenswrapper[4881]: I1211 08:36:23.432470 4881 generic.go:334] "Generic (PLEG): container finished" podID="ed841687-cd89-4419-8726-85e086a5cc21" containerID="080f4092c28abc98f3cd45e00f973d275bf68faf0a43fb8d189c7bc8955176a2" exitCode=0 Dec 11 08:36:23 crc kubenswrapper[4881]: I1211 08:36:23.432787 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7cb5889db5-5tjbs" Dec 11 08:36:23 crc kubenswrapper[4881]: I1211 08:36:23.432843 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-666b6646f7-5485d" Dec 11 08:36:23 crc kubenswrapper[4881]: I1211 08:36:23.432533 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"ed841687-cd89-4419-8726-85e086a5cc21","Type":"ContainerDied","Data":"080f4092c28abc98f3cd45e00f973d275bf68faf0a43fb8d189c7bc8955176a2"} Dec 11 08:36:23 crc kubenswrapper[4881]: I1211 08:36:23.502966 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-5485d"] Dec 11 08:36:23 crc kubenswrapper[4881]: I1211 08:36:23.512025 4881 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-5485d"] Dec 11 08:36:23 crc kubenswrapper[4881]: I1211 08:36:23.524476 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7cb5889db5-5tjbs"] Dec 11 08:36:23 crc kubenswrapper[4881]: I1211 08:36:23.533321 4881 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-7cb5889db5-5tjbs"] Dec 11 08:36:24 crc kubenswrapper[4881]: I1211 08:36:24.443554 4881 generic.go:334] "Generic (PLEG): container finished" podID="83d70e0f-d672-49c8-89d6-c1aa99c572a0" containerID="fad51349caab58607ba58815c5f2340b3cd292421c0d6836f33b9f343fcadef1" exitCode=0 Dec 11 08:36:24 crc kubenswrapper[4881]: I1211 08:36:24.443647 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"83d70e0f-d672-49c8-89d6-c1aa99c572a0","Type":"ContainerDied","Data":"fad51349caab58607ba58815c5f2340b3cd292421c0d6836f33b9f343fcadef1"} Dec 11 08:36:24 crc kubenswrapper[4881]: I1211 08:36:24.449197 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"ed841687-cd89-4419-8726-85e086a5cc21","Type":"ContainerStarted","Data":"b76ca59b759174d4b3b68dec243d1e27ea0c579c0ee348a6d617483e4567b5ce"} Dec 11 08:36:24 crc kubenswrapper[4881]: I1211 08:36:24.449550 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-server-0" Dec 11 08:36:24 crc kubenswrapper[4881]: I1211 08:36:24.521755 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-server-0" podStartSLOduration=37.025746001 podStartE2EDuration="1m3.521738493s" podCreationTimestamp="2025-12-11 08:35:21 +0000 UTC" firstStartedPulling="2025-12-11 08:35:23.493604386 +0000 UTC m=+1171.870973083" lastFinishedPulling="2025-12-11 08:35:49.989596878 +0000 UTC m=+1198.366965575" observedRunningTime="2025-12-11 08:36:24.51364071 +0000 UTC m=+1232.891009427" watchObservedRunningTime="2025-12-11 08:36:24.521738493 +0000 UTC m=+1232.899107190" Dec 11 08:36:24 crc kubenswrapper[4881]: I1211 08:36:24.705793 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-galera-0" Dec 11 08:36:24 crc kubenswrapper[4881]: I1211 08:36:24.705860 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-galera-0" Dec 11 08:36:25 crc kubenswrapper[4881]: I1211 08:36:25.083854 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5821aceb-9d65-4e6f-940d-2a20117397f6" path="/var/lib/kubelet/pods/5821aceb-9d65-4e6f-940d-2a20117397f6/volumes" Dec 11 08:36:25 crc kubenswrapper[4881]: I1211 08:36:25.084992 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e1cbf25b-5855-4bdd-8b9a-af1d48ef038d" path="/var/lib/kubelet/pods/e1cbf25b-5855-4bdd-8b9a-af1d48ef038d/volumes" Dec 11 08:36:25 crc kubenswrapper[4881]: I1211 08:36:25.408360 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-galera-0" Dec 11 08:36:25 crc kubenswrapper[4881]: I1211 08:36:25.468490 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-xfltd" event={"ID":"49d6015f-9f76-4e77-821e-2a11887e497c","Type":"ContainerStarted","Data":"c7bfa482293093dc0bdd363398338df0aa5757e7c89aa4f225823cc8ded80475"} Dec 11 08:36:25 crc kubenswrapper[4881]: I1211 08:36:25.468715 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-xfltd" Dec 11 08:36:25 crc kubenswrapper[4881]: I1211 08:36:25.470889 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"83d70e0f-d672-49c8-89d6-c1aa99c572a0","Type":"ContainerStarted","Data":"9c1e8aa9aea1cf14c00df462fc461900deea0c524c22d4f6fe990e20562dd413"} Dec 11 08:36:25 crc kubenswrapper[4881]: I1211 08:36:25.495348 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-xfltd" podStartSLOduration=20.004329832 podStartE2EDuration="53.495319443s" podCreationTimestamp="2025-12-11 08:35:32 +0000 UTC" firstStartedPulling="2025-12-11 08:35:51.103293959 +0000 UTC m=+1199.480662656" lastFinishedPulling="2025-12-11 08:36:24.59428357 +0000 UTC m=+1232.971652267" observedRunningTime="2025-12-11 08:36:25.486432143 +0000 UTC m=+1233.863800840" watchObservedRunningTime="2025-12-11 08:36:25.495319443 +0000 UTC m=+1233.872688150" Dec 11 08:36:25 crc kubenswrapper[4881]: I1211 08:36:25.514762 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-cell1-server-0" podStartSLOduration=38.22093138 podStartE2EDuration="1m4.514743199s" podCreationTimestamp="2025-12-11 08:35:21 +0000 UTC" firstStartedPulling="2025-12-11 08:35:23.730498328 +0000 UTC m=+1172.107867025" lastFinishedPulling="2025-12-11 08:35:50.024310147 +0000 UTC m=+1198.401678844" observedRunningTime="2025-12-11 08:36:25.510858677 +0000 UTC m=+1233.888227374" watchObservedRunningTime="2025-12-11 08:36:25.514743199 +0000 UTC m=+1233.892111896" Dec 11 08:36:25 crc kubenswrapper[4881]: I1211 08:36:25.533619 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-galera-0" Dec 11 08:36:25 crc kubenswrapper[4881]: I1211 08:36:25.801767 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-db-create-k7v7r"] Dec 11 08:36:25 crc kubenswrapper[4881]: E1211 08:36:25.802254 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e1cbf25b-5855-4bdd-8b9a-af1d48ef038d" containerName="dnsmasq-dns" Dec 11 08:36:25 crc kubenswrapper[4881]: I1211 08:36:25.802270 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="e1cbf25b-5855-4bdd-8b9a-af1d48ef038d" containerName="dnsmasq-dns" Dec 11 08:36:25 crc kubenswrapper[4881]: E1211 08:36:25.802288 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5821aceb-9d65-4e6f-940d-2a20117397f6" containerName="dnsmasq-dns" Dec 11 08:36:25 crc kubenswrapper[4881]: I1211 08:36:25.802295 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="5821aceb-9d65-4e6f-940d-2a20117397f6" containerName="dnsmasq-dns" Dec 11 08:36:25 crc kubenswrapper[4881]: E1211 08:36:25.802320 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e1cbf25b-5855-4bdd-8b9a-af1d48ef038d" containerName="init" Dec 11 08:36:25 crc kubenswrapper[4881]: I1211 08:36:25.802328 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="e1cbf25b-5855-4bdd-8b9a-af1d48ef038d" containerName="init" Dec 11 08:36:25 crc kubenswrapper[4881]: E1211 08:36:25.802378 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5821aceb-9d65-4e6f-940d-2a20117397f6" containerName="init" Dec 11 08:36:25 crc kubenswrapper[4881]: I1211 08:36:25.802388 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="5821aceb-9d65-4e6f-940d-2a20117397f6" containerName="init" Dec 11 08:36:25 crc kubenswrapper[4881]: I1211 08:36:25.802627 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="e1cbf25b-5855-4bdd-8b9a-af1d48ef038d" containerName="dnsmasq-dns" Dec 11 08:36:25 crc kubenswrapper[4881]: I1211 08:36:25.802645 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="5821aceb-9d65-4e6f-940d-2a20117397f6" containerName="dnsmasq-dns" Dec 11 08:36:25 crc kubenswrapper[4881]: I1211 08:36:25.803509 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-k7v7r" Dec 11 08:36:25 crc kubenswrapper[4881]: I1211 08:36:25.822930 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-create-k7v7r"] Dec 11 08:36:25 crc kubenswrapper[4881]: I1211 08:36:25.826601 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gxb77\" (UniqueName: \"kubernetes.io/projected/a99bf32a-aa0f-4726-8fa2-9f9d8c503cf1-kube-api-access-gxb77\") pod \"keystone-db-create-k7v7r\" (UID: \"a99bf32a-aa0f-4726-8fa2-9f9d8c503cf1\") " pod="openstack/keystone-db-create-k7v7r" Dec 11 08:36:25 crc kubenswrapper[4881]: I1211 08:36:25.929227 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gxb77\" (UniqueName: \"kubernetes.io/projected/a99bf32a-aa0f-4726-8fa2-9f9d8c503cf1-kube-api-access-gxb77\") pod \"keystone-db-create-k7v7r\" (UID: \"a99bf32a-aa0f-4726-8fa2-9f9d8c503cf1\") " pod="openstack/keystone-db-create-k7v7r" Dec 11 08:36:25 crc kubenswrapper[4881]: I1211 08:36:25.946857 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gxb77\" (UniqueName: \"kubernetes.io/projected/a99bf32a-aa0f-4726-8fa2-9f9d8c503cf1-kube-api-access-gxb77\") pod \"keystone-db-create-k7v7r\" (UID: \"a99bf32a-aa0f-4726-8fa2-9f9d8c503cf1\") " pod="openstack/keystone-db-create-k7v7r" Dec 11 08:36:26 crc kubenswrapper[4881]: I1211 08:36:26.011766 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-console/console-69ddbf8769-4wnph" podUID="05fbeafe-b228-42a0-8cfa-dc070853c0d7" containerName="console" containerID="cri-o://4e41a6248105052d2b5a161cd418325715f0a7ff208cc2c03c4b50e6266fb0db" gracePeriod=15 Dec 11 08:36:26 crc kubenswrapper[4881]: I1211 08:36:26.031711 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-db-create-86k8z"] Dec 11 08:36:26 crc kubenswrapper[4881]: I1211 08:36:26.033067 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-86k8z" Dec 11 08:36:26 crc kubenswrapper[4881]: I1211 08:36:26.040779 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-create-86k8z"] Dec 11 08:36:26 crc kubenswrapper[4881]: I1211 08:36:26.123856 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-k7v7r" Dec 11 08:36:26 crc kubenswrapper[4881]: I1211 08:36:26.136155 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lpb68\" (UniqueName: \"kubernetes.io/projected/cb33f8d2-c5b8-485b-8be2-505ba688ddc0-kube-api-access-lpb68\") pod \"placement-db-create-86k8z\" (UID: \"cb33f8d2-c5b8-485b-8be2-505ba688ddc0\") " pod="openstack/placement-db-create-86k8z" Dec 11 08:36:26 crc kubenswrapper[4881]: I1211 08:36:26.238188 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lpb68\" (UniqueName: \"kubernetes.io/projected/cb33f8d2-c5b8-485b-8be2-505ba688ddc0-kube-api-access-lpb68\") pod \"placement-db-create-86k8z\" (UID: \"cb33f8d2-c5b8-485b-8be2-505ba688ddc0\") " pod="openstack/placement-db-create-86k8z" Dec 11 08:36:26 crc kubenswrapper[4881]: I1211 08:36:26.295977 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lpb68\" (UniqueName: \"kubernetes.io/projected/cb33f8d2-c5b8-485b-8be2-505ba688ddc0-kube-api-access-lpb68\") pod \"placement-db-create-86k8z\" (UID: \"cb33f8d2-c5b8-485b-8be2-505ba688ddc0\") " pod="openstack/placement-db-create-86k8z" Dec 11 08:36:26 crc kubenswrapper[4881]: I1211 08:36:26.337064 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-db-create-9pp5h"] Dec 11 08:36:26 crc kubenswrapper[4881]: I1211 08:36:26.338301 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-9pp5h" Dec 11 08:36:26 crc kubenswrapper[4881]: I1211 08:36:26.353494 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-create-9pp5h"] Dec 11 08:36:26 crc kubenswrapper[4881]: I1211 08:36:26.441113 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cfzb9\" (UniqueName: \"kubernetes.io/projected/574b7634-a0fa-484a-b0c5-4f1a6541d3a9-kube-api-access-cfzb9\") pod \"glance-db-create-9pp5h\" (UID: \"574b7634-a0fa-484a-b0c5-4f1a6541d3a9\") " pod="openstack/glance-db-create-9pp5h" Dec 11 08:36:26 crc kubenswrapper[4881]: I1211 08:36:26.504755 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-69ddbf8769-4wnph_05fbeafe-b228-42a0-8cfa-dc070853c0d7/console/0.log" Dec 11 08:36:26 crc kubenswrapper[4881]: I1211 08:36:26.504804 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-86k8z" Dec 11 08:36:26 crc kubenswrapper[4881]: I1211 08:36:26.504827 4881 generic.go:334] "Generic (PLEG): container finished" podID="05fbeafe-b228-42a0-8cfa-dc070853c0d7" containerID="4e41a6248105052d2b5a161cd418325715f0a7ff208cc2c03c4b50e6266fb0db" exitCode=2 Dec 11 08:36:26 crc kubenswrapper[4881]: I1211 08:36:26.504902 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-69ddbf8769-4wnph" event={"ID":"05fbeafe-b228-42a0-8cfa-dc070853c0d7","Type":"ContainerDied","Data":"4e41a6248105052d2b5a161cd418325715f0a7ff208cc2c03c4b50e6266fb0db"} Dec 11 08:36:26 crc kubenswrapper[4881]: I1211 08:36:26.508076 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"558f097f-277d-4824-bafc-28c4c0f139f3","Type":"ContainerStarted","Data":"6f5ccd333037555a031831817d11ecc7d4a25ca7cf05d3819deda856ae8cdce8"} Dec 11 08:36:26 crc kubenswrapper[4881]: I1211 08:36:26.508974 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Dec 11 08:36:26 crc kubenswrapper[4881]: I1211 08:36:26.531139 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/kube-state-metrics-0" podStartSLOduration=25.112163397 podStartE2EDuration="59.531120763s" podCreationTimestamp="2025-12-11 08:35:27 +0000 UTC" firstStartedPulling="2025-12-11 08:35:51.009670344 +0000 UTC m=+1199.387039041" lastFinishedPulling="2025-12-11 08:36:25.42862771 +0000 UTC m=+1233.805996407" observedRunningTime="2025-12-11 08:36:26.522406857 +0000 UTC m=+1234.899775564" watchObservedRunningTime="2025-12-11 08:36:26.531120763 +0000 UTC m=+1234.908489460" Dec 11 08:36:26 crc kubenswrapper[4881]: I1211 08:36:26.547525 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cfzb9\" (UniqueName: \"kubernetes.io/projected/574b7634-a0fa-484a-b0c5-4f1a6541d3a9-kube-api-access-cfzb9\") pod \"glance-db-create-9pp5h\" (UID: \"574b7634-a0fa-484a-b0c5-4f1a6541d3a9\") " pod="openstack/glance-db-create-9pp5h" Dec 11 08:36:26 crc kubenswrapper[4881]: I1211 08:36:26.593070 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cfzb9\" (UniqueName: \"kubernetes.io/projected/574b7634-a0fa-484a-b0c5-4f1a6541d3a9-kube-api-access-cfzb9\") pod \"glance-db-create-9pp5h\" (UID: \"574b7634-a0fa-484a-b0c5-4f1a6541d3a9\") " pod="openstack/glance-db-create-9pp5h" Dec 11 08:36:26 crc kubenswrapper[4881]: I1211 08:36:26.664311 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-9pp5h" Dec 11 08:36:26 crc kubenswrapper[4881]: I1211 08:36:26.772248 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-create-k7v7r"] Dec 11 08:36:26 crc kubenswrapper[4881]: I1211 08:36:26.792872 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-69ddbf8769-4wnph_05fbeafe-b228-42a0-8cfa-dc070853c0d7/console/0.log" Dec 11 08:36:26 crc kubenswrapper[4881]: I1211 08:36:26.792965 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-69ddbf8769-4wnph" Dec 11 08:36:26 crc kubenswrapper[4881]: I1211 08:36:26.855976 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/05fbeafe-b228-42a0-8cfa-dc070853c0d7-console-config\") pod \"05fbeafe-b228-42a0-8cfa-dc070853c0d7\" (UID: \"05fbeafe-b228-42a0-8cfa-dc070853c0d7\") " Dec 11 08:36:26 crc kubenswrapper[4881]: I1211 08:36:26.856015 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/05fbeafe-b228-42a0-8cfa-dc070853c0d7-trusted-ca-bundle\") pod \"05fbeafe-b228-42a0-8cfa-dc070853c0d7\" (UID: \"05fbeafe-b228-42a0-8cfa-dc070853c0d7\") " Dec 11 08:36:26 crc kubenswrapper[4881]: I1211 08:36:26.856063 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/05fbeafe-b228-42a0-8cfa-dc070853c0d7-console-serving-cert\") pod \"05fbeafe-b228-42a0-8cfa-dc070853c0d7\" (UID: \"05fbeafe-b228-42a0-8cfa-dc070853c0d7\") " Dec 11 08:36:26 crc kubenswrapper[4881]: I1211 08:36:26.856151 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/05fbeafe-b228-42a0-8cfa-dc070853c0d7-service-ca\") pod \"05fbeafe-b228-42a0-8cfa-dc070853c0d7\" (UID: \"05fbeafe-b228-42a0-8cfa-dc070853c0d7\") " Dec 11 08:36:26 crc kubenswrapper[4881]: I1211 08:36:26.856282 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/05fbeafe-b228-42a0-8cfa-dc070853c0d7-oauth-serving-cert\") pod \"05fbeafe-b228-42a0-8cfa-dc070853c0d7\" (UID: \"05fbeafe-b228-42a0-8cfa-dc070853c0d7\") " Dec 11 08:36:26 crc kubenswrapper[4881]: I1211 08:36:26.856583 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-slhgb\" (UniqueName: \"kubernetes.io/projected/05fbeafe-b228-42a0-8cfa-dc070853c0d7-kube-api-access-slhgb\") pod \"05fbeafe-b228-42a0-8cfa-dc070853c0d7\" (UID: \"05fbeafe-b228-42a0-8cfa-dc070853c0d7\") " Dec 11 08:36:26 crc kubenswrapper[4881]: I1211 08:36:26.856612 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/05fbeafe-b228-42a0-8cfa-dc070853c0d7-console-oauth-config\") pod \"05fbeafe-b228-42a0-8cfa-dc070853c0d7\" (UID: \"05fbeafe-b228-42a0-8cfa-dc070853c0d7\") " Dec 11 08:36:26 crc kubenswrapper[4881]: I1211 08:36:26.858267 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/05fbeafe-b228-42a0-8cfa-dc070853c0d7-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "05fbeafe-b228-42a0-8cfa-dc070853c0d7" (UID: "05fbeafe-b228-42a0-8cfa-dc070853c0d7"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:36:26 crc kubenswrapper[4881]: I1211 08:36:26.858628 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/05fbeafe-b228-42a0-8cfa-dc070853c0d7-service-ca" (OuterVolumeSpecName: "service-ca") pod "05fbeafe-b228-42a0-8cfa-dc070853c0d7" (UID: "05fbeafe-b228-42a0-8cfa-dc070853c0d7"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:36:26 crc kubenswrapper[4881]: I1211 08:36:26.858935 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/05fbeafe-b228-42a0-8cfa-dc070853c0d7-console-config" (OuterVolumeSpecName: "console-config") pod "05fbeafe-b228-42a0-8cfa-dc070853c0d7" (UID: "05fbeafe-b228-42a0-8cfa-dc070853c0d7"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:36:26 crc kubenswrapper[4881]: I1211 08:36:26.859277 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/05fbeafe-b228-42a0-8cfa-dc070853c0d7-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "05fbeafe-b228-42a0-8cfa-dc070853c0d7" (UID: "05fbeafe-b228-42a0-8cfa-dc070853c0d7"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:36:26 crc kubenswrapper[4881]: I1211 08:36:26.862331 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/05fbeafe-b228-42a0-8cfa-dc070853c0d7-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "05fbeafe-b228-42a0-8cfa-dc070853c0d7" (UID: "05fbeafe-b228-42a0-8cfa-dc070853c0d7"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:36:26 crc kubenswrapper[4881]: I1211 08:36:26.865692 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/05fbeafe-b228-42a0-8cfa-dc070853c0d7-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "05fbeafe-b228-42a0-8cfa-dc070853c0d7" (UID: "05fbeafe-b228-42a0-8cfa-dc070853c0d7"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:36:26 crc kubenswrapper[4881]: I1211 08:36:26.868393 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/05fbeafe-b228-42a0-8cfa-dc070853c0d7-kube-api-access-slhgb" (OuterVolumeSpecName: "kube-api-access-slhgb") pod "05fbeafe-b228-42a0-8cfa-dc070853c0d7" (UID: "05fbeafe-b228-42a0-8cfa-dc070853c0d7"). InnerVolumeSpecName "kube-api-access-slhgb". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:36:26 crc kubenswrapper[4881]: I1211 08:36:26.960075 4881 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/05fbeafe-b228-42a0-8cfa-dc070853c0d7-console-config\") on node \"crc\" DevicePath \"\"" Dec 11 08:36:26 crc kubenswrapper[4881]: I1211 08:36:26.960103 4881 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/05fbeafe-b228-42a0-8cfa-dc070853c0d7-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 11 08:36:26 crc kubenswrapper[4881]: I1211 08:36:26.960113 4881 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/05fbeafe-b228-42a0-8cfa-dc070853c0d7-console-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 11 08:36:26 crc kubenswrapper[4881]: I1211 08:36:26.960122 4881 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/05fbeafe-b228-42a0-8cfa-dc070853c0d7-service-ca\") on node \"crc\" DevicePath \"\"" Dec 11 08:36:26 crc kubenswrapper[4881]: I1211 08:36:26.960131 4881 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/05fbeafe-b228-42a0-8cfa-dc070853c0d7-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 11 08:36:26 crc kubenswrapper[4881]: I1211 08:36:26.960139 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-slhgb\" (UniqueName: \"kubernetes.io/projected/05fbeafe-b228-42a0-8cfa-dc070853c0d7-kube-api-access-slhgb\") on node \"crc\" DevicePath \"\"" Dec 11 08:36:26 crc kubenswrapper[4881]: I1211 08:36:26.960148 4881 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/05fbeafe-b228-42a0-8cfa-dc070853c0d7-console-oauth-config\") on node \"crc\" DevicePath \"\"" Dec 11 08:36:27 crc kubenswrapper[4881]: I1211 08:36:27.126154 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-create-86k8z"] Dec 11 08:36:27 crc kubenswrapper[4881]: W1211 08:36:27.227449 4881 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod574b7634_a0fa_484a_b0c5_4f1a6541d3a9.slice/crio-bc47357d72b135bee51285aff2faea94221b8d69fae97eb69fcb93639aab67cf WatchSource:0}: Error finding container bc47357d72b135bee51285aff2faea94221b8d69fae97eb69fcb93639aab67cf: Status 404 returned error can't find the container with id bc47357d72b135bee51285aff2faea94221b8d69fae97eb69fcb93639aab67cf Dec 11 08:36:27 crc kubenswrapper[4881]: I1211 08:36:27.229397 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-create-9pp5h"] Dec 11 08:36:27 crc kubenswrapper[4881]: I1211 08:36:27.519741 4881 generic.go:334] "Generic (PLEG): container finished" podID="a99bf32a-aa0f-4726-8fa2-9f9d8c503cf1" containerID="ad72b995d12f058193d8a2117a39ff0cde2f61b432a29b220d444b2bebeeec8d" exitCode=0 Dec 11 08:36:27 crc kubenswrapper[4881]: I1211 08:36:27.519931 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-k7v7r" event={"ID":"a99bf32a-aa0f-4726-8fa2-9f9d8c503cf1","Type":"ContainerDied","Data":"ad72b995d12f058193d8a2117a39ff0cde2f61b432a29b220d444b2bebeeec8d"} Dec 11 08:36:27 crc kubenswrapper[4881]: I1211 08:36:27.520171 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-k7v7r" event={"ID":"a99bf32a-aa0f-4726-8fa2-9f9d8c503cf1","Type":"ContainerStarted","Data":"d69a9e6ba30f0193067b417220bdc9e26dd531958c6bf26f75162082d3efc779"} Dec 11 08:36:27 crc kubenswrapper[4881]: I1211 08:36:27.521779 4881 generic.go:334] "Generic (PLEG): container finished" podID="cb33f8d2-c5b8-485b-8be2-505ba688ddc0" containerID="eee8c8fc77d74034183d1d910210913c2c8ccb0520b46f7e04968231b146b925" exitCode=0 Dec 11 08:36:27 crc kubenswrapper[4881]: I1211 08:36:27.521846 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-86k8z" event={"ID":"cb33f8d2-c5b8-485b-8be2-505ba688ddc0","Type":"ContainerDied","Data":"eee8c8fc77d74034183d1d910210913c2c8ccb0520b46f7e04968231b146b925"} Dec 11 08:36:27 crc kubenswrapper[4881]: I1211 08:36:27.521869 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-86k8z" event={"ID":"cb33f8d2-c5b8-485b-8be2-505ba688ddc0","Type":"ContainerStarted","Data":"e26c3a93b8aec8ac54f67e8a9301b902a80b7a1ef47b226233c4aa0b1af253f5"} Dec 11 08:36:27 crc kubenswrapper[4881]: I1211 08:36:27.523206 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-69ddbf8769-4wnph_05fbeafe-b228-42a0-8cfa-dc070853c0d7/console/0.log" Dec 11 08:36:27 crc kubenswrapper[4881]: I1211 08:36:27.523272 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-69ddbf8769-4wnph" event={"ID":"05fbeafe-b228-42a0-8cfa-dc070853c0d7","Type":"ContainerDied","Data":"6cac496caddbe4caba5d4512edc18c308c126297080e4e7856fda80a28de0d48"} Dec 11 08:36:27 crc kubenswrapper[4881]: I1211 08:36:27.523303 4881 scope.go:117] "RemoveContainer" containerID="4e41a6248105052d2b5a161cd418325715f0a7ff208cc2c03c4b50e6266fb0db" Dec 11 08:36:27 crc kubenswrapper[4881]: I1211 08:36:27.523419 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-69ddbf8769-4wnph" Dec 11 08:36:27 crc kubenswrapper[4881]: I1211 08:36:27.525661 4881 generic.go:334] "Generic (PLEG): container finished" podID="d40e3cbd-c017-4b42-94ee-dea2565d55a3" containerID="5e44991ce8c87019f1fa3dc8bcbae2bf02e6db132cee4406f0ce0c273f1f447e" exitCode=0 Dec 11 08:36:27 crc kubenswrapper[4881]: I1211 08:36:27.525701 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-qt8lj" event={"ID":"d40e3cbd-c017-4b42-94ee-dea2565d55a3","Type":"ContainerDied","Data":"5e44991ce8c87019f1fa3dc8bcbae2bf02e6db132cee4406f0ce0c273f1f447e"} Dec 11 08:36:27 crc kubenswrapper[4881]: I1211 08:36:27.528903 4881 generic.go:334] "Generic (PLEG): container finished" podID="574b7634-a0fa-484a-b0c5-4f1a6541d3a9" containerID="dce9f57d92ae5197a26df3a8ef60dbb7bcef7b674c984fbf3c2d26b3a31e6a2f" exitCode=0 Dec 11 08:36:27 crc kubenswrapper[4881]: I1211 08:36:27.529612 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-9pp5h" event={"ID":"574b7634-a0fa-484a-b0c5-4f1a6541d3a9","Type":"ContainerDied","Data":"dce9f57d92ae5197a26df3a8ef60dbb7bcef7b674c984fbf3c2d26b3a31e6a2f"} Dec 11 08:36:27 crc kubenswrapper[4881]: I1211 08:36:27.529637 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-9pp5h" event={"ID":"574b7634-a0fa-484a-b0c5-4f1a6541d3a9","Type":"ContainerStarted","Data":"bc47357d72b135bee51285aff2faea94221b8d69fae97eb69fcb93639aab67cf"} Dec 11 08:36:27 crc kubenswrapper[4881]: I1211 08:36:27.584860 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-69ddbf8769-4wnph"] Dec 11 08:36:27 crc kubenswrapper[4881]: I1211 08:36:27.600544 4881 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-console/console-69ddbf8769-4wnph"] Dec 11 08:36:28 crc kubenswrapper[4881]: I1211 08:36:28.254026 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mysqld-exporter-openstack-db-create-grwv8"] Dec 11 08:36:28 crc kubenswrapper[4881]: E1211 08:36:28.254708 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="05fbeafe-b228-42a0-8cfa-dc070853c0d7" containerName="console" Dec 11 08:36:28 crc kubenswrapper[4881]: I1211 08:36:28.254820 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="05fbeafe-b228-42a0-8cfa-dc070853c0d7" containerName="console" Dec 11 08:36:28 crc kubenswrapper[4881]: I1211 08:36:28.255079 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="05fbeafe-b228-42a0-8cfa-dc070853c0d7" containerName="console" Dec 11 08:36:28 crc kubenswrapper[4881]: I1211 08:36:28.255888 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-openstack-db-create-grwv8" Dec 11 08:36:28 crc kubenswrapper[4881]: I1211 08:36:28.266599 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mysqld-exporter-openstack-db-create-grwv8"] Dec 11 08:36:28 crc kubenswrapper[4881]: I1211 08:36:28.287705 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ndq8z\" (UniqueName: \"kubernetes.io/projected/05cca6b4-3f02-4baf-8b58-3b7de77c5ec3-kube-api-access-ndq8z\") pod \"mysqld-exporter-openstack-db-create-grwv8\" (UID: \"05cca6b4-3f02-4baf-8b58-3b7de77c5ec3\") " pod="openstack/mysqld-exporter-openstack-db-create-grwv8" Dec 11 08:36:28 crc kubenswrapper[4881]: I1211 08:36:28.389669 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ndq8z\" (UniqueName: \"kubernetes.io/projected/05cca6b4-3f02-4baf-8b58-3b7de77c5ec3-kube-api-access-ndq8z\") pod \"mysqld-exporter-openstack-db-create-grwv8\" (UID: \"05cca6b4-3f02-4baf-8b58-3b7de77c5ec3\") " pod="openstack/mysqld-exporter-openstack-db-create-grwv8" Dec 11 08:36:28 crc kubenswrapper[4881]: I1211 08:36:28.414006 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ndq8z\" (UniqueName: \"kubernetes.io/projected/05cca6b4-3f02-4baf-8b58-3b7de77c5ec3-kube-api-access-ndq8z\") pod \"mysqld-exporter-openstack-db-create-grwv8\" (UID: \"05cca6b4-3f02-4baf-8b58-3b7de77c5ec3\") " pod="openstack/mysqld-exporter-openstack-db-create-grwv8" Dec 11 08:36:28 crc kubenswrapper[4881]: I1211 08:36:28.583544 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-openstack-db-create-grwv8" Dec 11 08:36:29 crc kubenswrapper[4881]: I1211 08:36:29.018092 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="05fbeafe-b228-42a0-8cfa-dc070853c0d7" path="/var/lib/kubelet/pods/05fbeafe-b228-42a0-8cfa-dc070853c0d7/volumes" Dec 11 08:36:29 crc kubenswrapper[4881]: I1211 08:36:29.236423 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mysqld-exporter-openstack-db-create-grwv8"] Dec 11 08:36:29 crc kubenswrapper[4881]: I1211 08:36:29.284210 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-9pp5h" Dec 11 08:36:29 crc kubenswrapper[4881]: I1211 08:36:29.308047 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cfzb9\" (UniqueName: \"kubernetes.io/projected/574b7634-a0fa-484a-b0c5-4f1a6541d3a9-kube-api-access-cfzb9\") pod \"574b7634-a0fa-484a-b0c5-4f1a6541d3a9\" (UID: \"574b7634-a0fa-484a-b0c5-4f1a6541d3a9\") " Dec 11 08:36:29 crc kubenswrapper[4881]: I1211 08:36:29.309200 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-qt8lj" Dec 11 08:36:29 crc kubenswrapper[4881]: I1211 08:36:29.317024 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-86k8z" Dec 11 08:36:29 crc kubenswrapper[4881]: I1211 08:36:29.321221 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/574b7634-a0fa-484a-b0c5-4f1a6541d3a9-kube-api-access-cfzb9" (OuterVolumeSpecName: "kube-api-access-cfzb9") pod "574b7634-a0fa-484a-b0c5-4f1a6541d3a9" (UID: "574b7634-a0fa-484a-b0c5-4f1a6541d3a9"). InnerVolumeSpecName "kube-api-access-cfzb9". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:36:29 crc kubenswrapper[4881]: I1211 08:36:29.370051 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-k7v7r" Dec 11 08:36:29 crc kubenswrapper[4881]: I1211 08:36:29.397243 4881 patch_prober.go:28] interesting pod/machine-config-daemon-z9nnh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 11 08:36:29 crc kubenswrapper[4881]: I1211 08:36:29.397309 4881 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 11 08:36:29 crc kubenswrapper[4881]: I1211 08:36:29.409894 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d40e3cbd-c017-4b42-94ee-dea2565d55a3-combined-ca-bundle\") pod \"d40e3cbd-c017-4b42-94ee-dea2565d55a3\" (UID: \"d40e3cbd-c017-4b42-94ee-dea2565d55a3\") " Dec 11 08:36:29 crc kubenswrapper[4881]: I1211 08:36:29.410008 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/d40e3cbd-c017-4b42-94ee-dea2565d55a3-ring-data-devices\") pod \"d40e3cbd-c017-4b42-94ee-dea2565d55a3\" (UID: \"d40e3cbd-c017-4b42-94ee-dea2565d55a3\") " Dec 11 08:36:29 crc kubenswrapper[4881]: I1211 08:36:29.410073 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/d40e3cbd-c017-4b42-94ee-dea2565d55a3-swiftconf\") pod \"d40e3cbd-c017-4b42-94ee-dea2565d55a3\" (UID: \"d40e3cbd-c017-4b42-94ee-dea2565d55a3\") " Dec 11 08:36:29 crc kubenswrapper[4881]: I1211 08:36:29.410116 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gxb77\" (UniqueName: \"kubernetes.io/projected/a99bf32a-aa0f-4726-8fa2-9f9d8c503cf1-kube-api-access-gxb77\") pod \"a99bf32a-aa0f-4726-8fa2-9f9d8c503cf1\" (UID: \"a99bf32a-aa0f-4726-8fa2-9f9d8c503cf1\") " Dec 11 08:36:29 crc kubenswrapper[4881]: I1211 08:36:29.410142 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/d40e3cbd-c017-4b42-94ee-dea2565d55a3-dispersionconf\") pod \"d40e3cbd-c017-4b42-94ee-dea2565d55a3\" (UID: \"d40e3cbd-c017-4b42-94ee-dea2565d55a3\") " Dec 11 08:36:29 crc kubenswrapper[4881]: I1211 08:36:29.410241 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/d40e3cbd-c017-4b42-94ee-dea2565d55a3-etc-swift\") pod \"d40e3cbd-c017-4b42-94ee-dea2565d55a3\" (UID: \"d40e3cbd-c017-4b42-94ee-dea2565d55a3\") " Dec 11 08:36:29 crc kubenswrapper[4881]: I1211 08:36:29.410308 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-76mk6\" (UniqueName: \"kubernetes.io/projected/d40e3cbd-c017-4b42-94ee-dea2565d55a3-kube-api-access-76mk6\") pod \"d40e3cbd-c017-4b42-94ee-dea2565d55a3\" (UID: \"d40e3cbd-c017-4b42-94ee-dea2565d55a3\") " Dec 11 08:36:29 crc kubenswrapper[4881]: I1211 08:36:29.410355 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lpb68\" (UniqueName: \"kubernetes.io/projected/cb33f8d2-c5b8-485b-8be2-505ba688ddc0-kube-api-access-lpb68\") pod \"cb33f8d2-c5b8-485b-8be2-505ba688ddc0\" (UID: \"cb33f8d2-c5b8-485b-8be2-505ba688ddc0\") " Dec 11 08:36:29 crc kubenswrapper[4881]: I1211 08:36:29.410443 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/d40e3cbd-c017-4b42-94ee-dea2565d55a3-scripts\") pod \"d40e3cbd-c017-4b42-94ee-dea2565d55a3\" (UID: \"d40e3cbd-c017-4b42-94ee-dea2565d55a3\") " Dec 11 08:36:29 crc kubenswrapper[4881]: I1211 08:36:29.410720 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d40e3cbd-c017-4b42-94ee-dea2565d55a3-ring-data-devices" (OuterVolumeSpecName: "ring-data-devices") pod "d40e3cbd-c017-4b42-94ee-dea2565d55a3" (UID: "d40e3cbd-c017-4b42-94ee-dea2565d55a3"). InnerVolumeSpecName "ring-data-devices". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:36:29 crc kubenswrapper[4881]: I1211 08:36:29.411736 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d40e3cbd-c017-4b42-94ee-dea2565d55a3-etc-swift" (OuterVolumeSpecName: "etc-swift") pod "d40e3cbd-c017-4b42-94ee-dea2565d55a3" (UID: "d40e3cbd-c017-4b42-94ee-dea2565d55a3"). InnerVolumeSpecName "etc-swift". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 08:36:29 crc kubenswrapper[4881]: I1211 08:36:29.412712 4881 reconciler_common.go:293] "Volume detached for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/d40e3cbd-c017-4b42-94ee-dea2565d55a3-etc-swift\") on node \"crc\" DevicePath \"\"" Dec 11 08:36:29 crc kubenswrapper[4881]: I1211 08:36:29.412734 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cfzb9\" (UniqueName: \"kubernetes.io/projected/574b7634-a0fa-484a-b0c5-4f1a6541d3a9-kube-api-access-cfzb9\") on node \"crc\" DevicePath \"\"" Dec 11 08:36:29 crc kubenswrapper[4881]: I1211 08:36:29.412747 4881 reconciler_common.go:293] "Volume detached for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/d40e3cbd-c017-4b42-94ee-dea2565d55a3-ring-data-devices\") on node \"crc\" DevicePath \"\"" Dec 11 08:36:29 crc kubenswrapper[4881]: I1211 08:36:29.416737 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a99bf32a-aa0f-4726-8fa2-9f9d8c503cf1-kube-api-access-gxb77" (OuterVolumeSpecName: "kube-api-access-gxb77") pod "a99bf32a-aa0f-4726-8fa2-9f9d8c503cf1" (UID: "a99bf32a-aa0f-4726-8fa2-9f9d8c503cf1"). InnerVolumeSpecName "kube-api-access-gxb77". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:36:29 crc kubenswrapper[4881]: I1211 08:36:29.417214 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d40e3cbd-c017-4b42-94ee-dea2565d55a3-kube-api-access-76mk6" (OuterVolumeSpecName: "kube-api-access-76mk6") pod "d40e3cbd-c017-4b42-94ee-dea2565d55a3" (UID: "d40e3cbd-c017-4b42-94ee-dea2565d55a3"). InnerVolumeSpecName "kube-api-access-76mk6". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:36:29 crc kubenswrapper[4881]: I1211 08:36:29.417262 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cb33f8d2-c5b8-485b-8be2-505ba688ddc0-kube-api-access-lpb68" (OuterVolumeSpecName: "kube-api-access-lpb68") pod "cb33f8d2-c5b8-485b-8be2-505ba688ddc0" (UID: "cb33f8d2-c5b8-485b-8be2-505ba688ddc0"). InnerVolumeSpecName "kube-api-access-lpb68". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:36:29 crc kubenswrapper[4881]: I1211 08:36:29.420032 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d40e3cbd-c017-4b42-94ee-dea2565d55a3-dispersionconf" (OuterVolumeSpecName: "dispersionconf") pod "d40e3cbd-c017-4b42-94ee-dea2565d55a3" (UID: "d40e3cbd-c017-4b42-94ee-dea2565d55a3"). InnerVolumeSpecName "dispersionconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:36:29 crc kubenswrapper[4881]: I1211 08:36:29.437114 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d40e3cbd-c017-4b42-94ee-dea2565d55a3-scripts" (OuterVolumeSpecName: "scripts") pod "d40e3cbd-c017-4b42-94ee-dea2565d55a3" (UID: "d40e3cbd-c017-4b42-94ee-dea2565d55a3"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:36:29 crc kubenswrapper[4881]: I1211 08:36:29.441866 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d40e3cbd-c017-4b42-94ee-dea2565d55a3-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "d40e3cbd-c017-4b42-94ee-dea2565d55a3" (UID: "d40e3cbd-c017-4b42-94ee-dea2565d55a3"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:36:29 crc kubenswrapper[4881]: I1211 08:36:29.448716 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d40e3cbd-c017-4b42-94ee-dea2565d55a3-swiftconf" (OuterVolumeSpecName: "swiftconf") pod "d40e3cbd-c017-4b42-94ee-dea2565d55a3" (UID: "d40e3cbd-c017-4b42-94ee-dea2565d55a3"). InnerVolumeSpecName "swiftconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:36:29 crc kubenswrapper[4881]: I1211 08:36:29.514062 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-76mk6\" (UniqueName: \"kubernetes.io/projected/d40e3cbd-c017-4b42-94ee-dea2565d55a3-kube-api-access-76mk6\") on node \"crc\" DevicePath \"\"" Dec 11 08:36:29 crc kubenswrapper[4881]: I1211 08:36:29.514100 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lpb68\" (UniqueName: \"kubernetes.io/projected/cb33f8d2-c5b8-485b-8be2-505ba688ddc0-kube-api-access-lpb68\") on node \"crc\" DevicePath \"\"" Dec 11 08:36:29 crc kubenswrapper[4881]: I1211 08:36:29.514112 4881 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/d40e3cbd-c017-4b42-94ee-dea2565d55a3-scripts\") on node \"crc\" DevicePath \"\"" Dec 11 08:36:29 crc kubenswrapper[4881]: I1211 08:36:29.514124 4881 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d40e3cbd-c017-4b42-94ee-dea2565d55a3-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 11 08:36:29 crc kubenswrapper[4881]: I1211 08:36:29.514136 4881 reconciler_common.go:293] "Volume detached for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/d40e3cbd-c017-4b42-94ee-dea2565d55a3-swiftconf\") on node \"crc\" DevicePath \"\"" Dec 11 08:36:29 crc kubenswrapper[4881]: I1211 08:36:29.514148 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gxb77\" (UniqueName: \"kubernetes.io/projected/a99bf32a-aa0f-4726-8fa2-9f9d8c503cf1-kube-api-access-gxb77\") on node \"crc\" DevicePath \"\"" Dec 11 08:36:29 crc kubenswrapper[4881]: I1211 08:36:29.514158 4881 reconciler_common.go:293] "Volume detached for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/d40e3cbd-c017-4b42-94ee-dea2565d55a3-dispersionconf\") on node \"crc\" DevicePath \"\"" Dec 11 08:36:29 crc kubenswrapper[4881]: I1211 08:36:29.547660 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-k7v7r" event={"ID":"a99bf32a-aa0f-4726-8fa2-9f9d8c503cf1","Type":"ContainerDied","Data":"d69a9e6ba30f0193067b417220bdc9e26dd531958c6bf26f75162082d3efc779"} Dec 11 08:36:29 crc kubenswrapper[4881]: I1211 08:36:29.547678 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-k7v7r" Dec 11 08:36:29 crc kubenswrapper[4881]: I1211 08:36:29.547943 4881 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d69a9e6ba30f0193067b417220bdc9e26dd531958c6bf26f75162082d3efc779" Dec 11 08:36:29 crc kubenswrapper[4881]: I1211 08:36:29.549031 4881 generic.go:334] "Generic (PLEG): container finished" podID="05cca6b4-3f02-4baf-8b58-3b7de77c5ec3" containerID="182269b56b596c919a0f1fabff20d956dfda7c8f13eb2ead3394c9420f9c24f4" exitCode=0 Dec 11 08:36:29 crc kubenswrapper[4881]: I1211 08:36:29.549104 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-openstack-db-create-grwv8" event={"ID":"05cca6b4-3f02-4baf-8b58-3b7de77c5ec3","Type":"ContainerDied","Data":"182269b56b596c919a0f1fabff20d956dfda7c8f13eb2ead3394c9420f9c24f4"} Dec 11 08:36:29 crc kubenswrapper[4881]: I1211 08:36:29.549140 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-openstack-db-create-grwv8" event={"ID":"05cca6b4-3f02-4baf-8b58-3b7de77c5ec3","Type":"ContainerStarted","Data":"92980818082ab2f5718218e382b638cad7ee81fde636b6c731b4f180ab6ab1de"} Dec 11 08:36:29 crc kubenswrapper[4881]: I1211 08:36:29.550717 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-86k8z" event={"ID":"cb33f8d2-c5b8-485b-8be2-505ba688ddc0","Type":"ContainerDied","Data":"e26c3a93b8aec8ac54f67e8a9301b902a80b7a1ef47b226233c4aa0b1af253f5"} Dec 11 08:36:29 crc kubenswrapper[4881]: I1211 08:36:29.550738 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-86k8z" Dec 11 08:36:29 crc kubenswrapper[4881]: I1211 08:36:29.550741 4881 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e26c3a93b8aec8ac54f67e8a9301b902a80b7a1ef47b226233c4aa0b1af253f5" Dec 11 08:36:29 crc kubenswrapper[4881]: I1211 08:36:29.554215 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-qt8lj" event={"ID":"d40e3cbd-c017-4b42-94ee-dea2565d55a3","Type":"ContainerDied","Data":"7387deb0c9b28eaaa6c529f683a2d563ef404ab7ae77646c6e6bb0c763c157b9"} Dec 11 08:36:29 crc kubenswrapper[4881]: I1211 08:36:29.554244 4881 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7387deb0c9b28eaaa6c529f683a2d563ef404ab7ae77646c6e6bb0c763c157b9" Dec 11 08:36:29 crc kubenswrapper[4881]: I1211 08:36:29.554319 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-qt8lj" Dec 11 08:36:29 crc kubenswrapper[4881]: I1211 08:36:29.558022 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-9pp5h" event={"ID":"574b7634-a0fa-484a-b0c5-4f1a6541d3a9","Type":"ContainerDied","Data":"bc47357d72b135bee51285aff2faea94221b8d69fae97eb69fcb93639aab67cf"} Dec 11 08:36:29 crc kubenswrapper[4881]: I1211 08:36:29.558066 4881 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="bc47357d72b135bee51285aff2faea94221b8d69fae97eb69fcb93639aab67cf" Dec 11 08:36:29 crc kubenswrapper[4881]: I1211 08:36:29.558290 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-9pp5h" Dec 11 08:36:30 crc kubenswrapper[4881]: I1211 08:36:30.818514 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-74f6f696b9-kfh6q" Dec 11 08:36:30 crc kubenswrapper[4881]: I1211 08:36:30.920451 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-openstack-db-create-grwv8" Dec 11 08:36:30 crc kubenswrapper[4881]: I1211 08:36:30.941489 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ndq8z\" (UniqueName: \"kubernetes.io/projected/05cca6b4-3f02-4baf-8b58-3b7de77c5ec3-kube-api-access-ndq8z\") pod \"05cca6b4-3f02-4baf-8b58-3b7de77c5ec3\" (UID: \"05cca6b4-3f02-4baf-8b58-3b7de77c5ec3\") " Dec 11 08:36:30 crc kubenswrapper[4881]: I1211 08:36:30.949696 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/05cca6b4-3f02-4baf-8b58-3b7de77c5ec3-kube-api-access-ndq8z" (OuterVolumeSpecName: "kube-api-access-ndq8z") pod "05cca6b4-3f02-4baf-8b58-3b7de77c5ec3" (UID: "05cca6b4-3f02-4baf-8b58-3b7de77c5ec3"). InnerVolumeSpecName "kube-api-access-ndq8z". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:36:31 crc kubenswrapper[4881]: I1211 08:36:31.000504 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-698758b865-mrlfq" Dec 11 08:36:31 crc kubenswrapper[4881]: I1211 08:36:31.045287 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ndq8z\" (UniqueName: \"kubernetes.io/projected/05cca6b4-3f02-4baf-8b58-3b7de77c5ec3-kube-api-access-ndq8z\") on node \"crc\" DevicePath \"\"" Dec 11 08:36:31 crc kubenswrapper[4881]: I1211 08:36:31.072854 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-74f6f696b9-kfh6q"] Dec 11 08:36:31 crc kubenswrapper[4881]: I1211 08:36:31.453319 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/1af483c2-ea3f-45cd-971d-797c06f5c6e2-etc-swift\") pod \"swift-storage-0\" (UID: \"1af483c2-ea3f-45cd-971d-797c06f5c6e2\") " pod="openstack/swift-storage-0" Dec 11 08:36:31 crc kubenswrapper[4881]: I1211 08:36:31.458696 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/1af483c2-ea3f-45cd-971d-797c06f5c6e2-etc-swift\") pod \"swift-storage-0\" (UID: \"1af483c2-ea3f-45cd-971d-797c06f5c6e2\") " pod="openstack/swift-storage-0" Dec 11 08:36:31 crc kubenswrapper[4881]: I1211 08:36:31.575286 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-openstack-db-create-grwv8" event={"ID":"05cca6b4-3f02-4baf-8b58-3b7de77c5ec3","Type":"ContainerDied","Data":"92980818082ab2f5718218e382b638cad7ee81fde636b6c731b4f180ab6ab1de"} Dec 11 08:36:31 crc kubenswrapper[4881]: I1211 08:36:31.575365 4881 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="92980818082ab2f5718218e382b638cad7ee81fde636b6c731b4f180ab6ab1de" Dec 11 08:36:31 crc kubenswrapper[4881]: I1211 08:36:31.575314 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-openstack-db-create-grwv8" Dec 11 08:36:31 crc kubenswrapper[4881]: I1211 08:36:31.575443 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-74f6f696b9-kfh6q" podUID="0586421b-6bd9-4994-a95d-fbe21a506e46" containerName="dnsmasq-dns" containerID="cri-o://0cd756bbcccd27b2b38b0b92093d46082c45e1cd37e7e18f1b573fbae37584bc" gracePeriod=10 Dec 11 08:36:31 crc kubenswrapper[4881]: I1211 08:36:31.671513 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-storage-0" Dec 11 08:36:32 crc kubenswrapper[4881]: I1211 08:36:32.135583 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-74f6f696b9-kfh6q" Dec 11 08:36:32 crc kubenswrapper[4881]: I1211 08:36:32.168010 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0586421b-6bd9-4994-a95d-fbe21a506e46-dns-svc\") pod \"0586421b-6bd9-4994-a95d-fbe21a506e46\" (UID: \"0586421b-6bd9-4994-a95d-fbe21a506e46\") " Dec 11 08:36:32 crc kubenswrapper[4881]: I1211 08:36:32.168236 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/0586421b-6bd9-4994-a95d-fbe21a506e46-ovsdbserver-nb\") pod \"0586421b-6bd9-4994-a95d-fbe21a506e46\" (UID: \"0586421b-6bd9-4994-a95d-fbe21a506e46\") " Dec 11 08:36:32 crc kubenswrapper[4881]: I1211 08:36:32.168292 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0586421b-6bd9-4994-a95d-fbe21a506e46-config\") pod \"0586421b-6bd9-4994-a95d-fbe21a506e46\" (UID: \"0586421b-6bd9-4994-a95d-fbe21a506e46\") " Dec 11 08:36:32 crc kubenswrapper[4881]: I1211 08:36:32.168327 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wx84k\" (UniqueName: \"kubernetes.io/projected/0586421b-6bd9-4994-a95d-fbe21a506e46-kube-api-access-wx84k\") pod \"0586421b-6bd9-4994-a95d-fbe21a506e46\" (UID: \"0586421b-6bd9-4994-a95d-fbe21a506e46\") " Dec 11 08:36:32 crc kubenswrapper[4881]: I1211 08:36:32.182698 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0586421b-6bd9-4994-a95d-fbe21a506e46-kube-api-access-wx84k" (OuterVolumeSpecName: "kube-api-access-wx84k") pod "0586421b-6bd9-4994-a95d-fbe21a506e46" (UID: "0586421b-6bd9-4994-a95d-fbe21a506e46"). InnerVolumeSpecName "kube-api-access-wx84k". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:36:32 crc kubenswrapper[4881]: I1211 08:36:32.235739 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0586421b-6bd9-4994-a95d-fbe21a506e46-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "0586421b-6bd9-4994-a95d-fbe21a506e46" (UID: "0586421b-6bd9-4994-a95d-fbe21a506e46"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:36:32 crc kubenswrapper[4881]: I1211 08:36:32.235802 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0586421b-6bd9-4994-a95d-fbe21a506e46-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "0586421b-6bd9-4994-a95d-fbe21a506e46" (UID: "0586421b-6bd9-4994-a95d-fbe21a506e46"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:36:32 crc kubenswrapper[4881]: I1211 08:36:32.241163 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0586421b-6bd9-4994-a95d-fbe21a506e46-config" (OuterVolumeSpecName: "config") pod "0586421b-6bd9-4994-a95d-fbe21a506e46" (UID: "0586421b-6bd9-4994-a95d-fbe21a506e46"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:36:32 crc kubenswrapper[4881]: I1211 08:36:32.270735 4881 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0586421b-6bd9-4994-a95d-fbe21a506e46-dns-svc\") on node \"crc\" DevicePath \"\"" Dec 11 08:36:32 crc kubenswrapper[4881]: I1211 08:36:32.270767 4881 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/0586421b-6bd9-4994-a95d-fbe21a506e46-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Dec 11 08:36:32 crc kubenswrapper[4881]: I1211 08:36:32.270777 4881 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0586421b-6bd9-4994-a95d-fbe21a506e46-config\") on node \"crc\" DevicePath \"\"" Dec 11 08:36:32 crc kubenswrapper[4881]: I1211 08:36:32.270786 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wx84k\" (UniqueName: \"kubernetes.io/projected/0586421b-6bd9-4994-a95d-fbe21a506e46-kube-api-access-wx84k\") on node \"crc\" DevicePath \"\"" Dec 11 08:36:32 crc kubenswrapper[4881]: I1211 08:36:32.329717 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-storage-0"] Dec 11 08:36:32 crc kubenswrapper[4881]: W1211 08:36:32.331686 4881 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1af483c2_ea3f_45cd_971d_797c06f5c6e2.slice/crio-501f77756c0fa548c1b77abe69bd4a4017a1d91c343cdcdc069665fd77e8c018 WatchSource:0}: Error finding container 501f77756c0fa548c1b77abe69bd4a4017a1d91c343cdcdc069665fd77e8c018: Status 404 returned error can't find the container with id 501f77756c0fa548c1b77abe69bd4a4017a1d91c343cdcdc069665fd77e8c018 Dec 11 08:36:32 crc kubenswrapper[4881]: I1211 08:36:32.592187 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"1af483c2-ea3f-45cd-971d-797c06f5c6e2","Type":"ContainerStarted","Data":"501f77756c0fa548c1b77abe69bd4a4017a1d91c343cdcdc069665fd77e8c018"} Dec 11 08:36:32 crc kubenswrapper[4881]: I1211 08:36:32.595891 4881 generic.go:334] "Generic (PLEG): container finished" podID="0586421b-6bd9-4994-a95d-fbe21a506e46" containerID="0cd756bbcccd27b2b38b0b92093d46082c45e1cd37e7e18f1b573fbae37584bc" exitCode=0 Dec 11 08:36:32 crc kubenswrapper[4881]: I1211 08:36:32.595935 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-74f6f696b9-kfh6q" event={"ID":"0586421b-6bd9-4994-a95d-fbe21a506e46","Type":"ContainerDied","Data":"0cd756bbcccd27b2b38b0b92093d46082c45e1cd37e7e18f1b573fbae37584bc"} Dec 11 08:36:32 crc kubenswrapper[4881]: I1211 08:36:32.595964 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-74f6f696b9-kfh6q" event={"ID":"0586421b-6bd9-4994-a95d-fbe21a506e46","Type":"ContainerDied","Data":"00dff6da7f0a4fb9ad98a946fd4333aeb7ade903fd88fbc2338d4a05b3acff63"} Dec 11 08:36:32 crc kubenswrapper[4881]: I1211 08:36:32.595979 4881 scope.go:117] "RemoveContainer" containerID="0cd756bbcccd27b2b38b0b92093d46082c45e1cd37e7e18f1b573fbae37584bc" Dec 11 08:36:32 crc kubenswrapper[4881]: I1211 08:36:32.596274 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-74f6f696b9-kfh6q" Dec 11 08:36:32 crc kubenswrapper[4881]: I1211 08:36:32.622381 4881 scope.go:117] "RemoveContainer" containerID="44ca4b39d2c447fa4b2810ef15cd2637326ea68559aaa20cdf2bac5ee4290fd6" Dec 11 08:36:32 crc kubenswrapper[4881]: I1211 08:36:32.632909 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-74f6f696b9-kfh6q"] Dec 11 08:36:32 crc kubenswrapper[4881]: I1211 08:36:32.643085 4881 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-74f6f696b9-kfh6q"] Dec 11 08:36:32 crc kubenswrapper[4881]: I1211 08:36:32.650929 4881 scope.go:117] "RemoveContainer" containerID="0cd756bbcccd27b2b38b0b92093d46082c45e1cd37e7e18f1b573fbae37584bc" Dec 11 08:36:32 crc kubenswrapper[4881]: E1211 08:36:32.651591 4881 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0cd756bbcccd27b2b38b0b92093d46082c45e1cd37e7e18f1b573fbae37584bc\": container with ID starting with 0cd756bbcccd27b2b38b0b92093d46082c45e1cd37e7e18f1b573fbae37584bc not found: ID does not exist" containerID="0cd756bbcccd27b2b38b0b92093d46082c45e1cd37e7e18f1b573fbae37584bc" Dec 11 08:36:32 crc kubenswrapper[4881]: I1211 08:36:32.651640 4881 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0cd756bbcccd27b2b38b0b92093d46082c45e1cd37e7e18f1b573fbae37584bc"} err="failed to get container status \"0cd756bbcccd27b2b38b0b92093d46082c45e1cd37e7e18f1b573fbae37584bc\": rpc error: code = NotFound desc = could not find container \"0cd756bbcccd27b2b38b0b92093d46082c45e1cd37e7e18f1b573fbae37584bc\": container with ID starting with 0cd756bbcccd27b2b38b0b92093d46082c45e1cd37e7e18f1b573fbae37584bc not found: ID does not exist" Dec 11 08:36:32 crc kubenswrapper[4881]: I1211 08:36:32.651671 4881 scope.go:117] "RemoveContainer" containerID="44ca4b39d2c447fa4b2810ef15cd2637326ea68559aaa20cdf2bac5ee4290fd6" Dec 11 08:36:32 crc kubenswrapper[4881]: E1211 08:36:32.652119 4881 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"44ca4b39d2c447fa4b2810ef15cd2637326ea68559aaa20cdf2bac5ee4290fd6\": container with ID starting with 44ca4b39d2c447fa4b2810ef15cd2637326ea68559aaa20cdf2bac5ee4290fd6 not found: ID does not exist" containerID="44ca4b39d2c447fa4b2810ef15cd2637326ea68559aaa20cdf2bac5ee4290fd6" Dec 11 08:36:32 crc kubenswrapper[4881]: I1211 08:36:32.652157 4881 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"44ca4b39d2c447fa4b2810ef15cd2637326ea68559aaa20cdf2bac5ee4290fd6"} err="failed to get container status \"44ca4b39d2c447fa4b2810ef15cd2637326ea68559aaa20cdf2bac5ee4290fd6\": rpc error: code = NotFound desc = could not find container \"44ca4b39d2c447fa4b2810ef15cd2637326ea68559aaa20cdf2bac5ee4290fd6\": container with ID starting with 44ca4b39d2c447fa4b2810ef15cd2637326ea68559aaa20cdf2bac5ee4290fd6 not found: ID does not exist" Dec 11 08:36:32 crc kubenswrapper[4881]: I1211 08:36:32.936538 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-cell1-server-0" Dec 11 08:36:33 crc kubenswrapper[4881]: I1211 08:36:33.016224 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0586421b-6bd9-4994-a95d-fbe21a506e46" path="/var/lib/kubelet/pods/0586421b-6bd9-4994-a95d-fbe21a506e46/volumes" Dec 11 08:36:34 crc kubenswrapper[4881]: I1211 08:36:34.615571 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"1af483c2-ea3f-45cd-971d-797c06f5c6e2","Type":"ContainerStarted","Data":"546be47b0c4e1657a96417def1ec18088b73ae6ff1a54684b99d0874a2b8c6f0"} Dec 11 08:36:34 crc kubenswrapper[4881]: I1211 08:36:34.616212 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"1af483c2-ea3f-45cd-971d-797c06f5c6e2","Type":"ContainerStarted","Data":"ff745c22a810299e43044c549116fccfbdd50901e206cb2c528735fc5a951120"} Dec 11 08:36:34 crc kubenswrapper[4881]: I1211 08:36:34.616226 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"1af483c2-ea3f-45cd-971d-797c06f5c6e2","Type":"ContainerStarted","Data":"2d12dd4889b363717588e20d3cc97072079818040b308a5f99e9bfd1c6c44d8a"} Dec 11 08:36:35 crc kubenswrapper[4881]: I1211 08:36:35.628321 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"1af483c2-ea3f-45cd-971d-797c06f5c6e2","Type":"ContainerStarted","Data":"668ac1b72c9c1b8c4ddcec17dffb49145cecdec2febd11b761957424a4caa969"} Dec 11 08:36:35 crc kubenswrapper[4881]: I1211 08:36:35.843718 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-ee83-account-create-7zk6g"] Dec 11 08:36:35 crc kubenswrapper[4881]: E1211 08:36:35.855900 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="574b7634-a0fa-484a-b0c5-4f1a6541d3a9" containerName="mariadb-database-create" Dec 11 08:36:35 crc kubenswrapper[4881]: I1211 08:36:35.855938 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="574b7634-a0fa-484a-b0c5-4f1a6541d3a9" containerName="mariadb-database-create" Dec 11 08:36:35 crc kubenswrapper[4881]: E1211 08:36:35.855957 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0586421b-6bd9-4994-a95d-fbe21a506e46" containerName="init" Dec 11 08:36:35 crc kubenswrapper[4881]: I1211 08:36:35.855964 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="0586421b-6bd9-4994-a95d-fbe21a506e46" containerName="init" Dec 11 08:36:35 crc kubenswrapper[4881]: E1211 08:36:35.855976 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cb33f8d2-c5b8-485b-8be2-505ba688ddc0" containerName="mariadb-database-create" Dec 11 08:36:35 crc kubenswrapper[4881]: I1211 08:36:35.855983 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="cb33f8d2-c5b8-485b-8be2-505ba688ddc0" containerName="mariadb-database-create" Dec 11 08:36:35 crc kubenswrapper[4881]: E1211 08:36:35.855993 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="05cca6b4-3f02-4baf-8b58-3b7de77c5ec3" containerName="mariadb-database-create" Dec 11 08:36:35 crc kubenswrapper[4881]: I1211 08:36:35.856000 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="05cca6b4-3f02-4baf-8b58-3b7de77c5ec3" containerName="mariadb-database-create" Dec 11 08:36:35 crc kubenswrapper[4881]: E1211 08:36:35.856008 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0586421b-6bd9-4994-a95d-fbe21a506e46" containerName="dnsmasq-dns" Dec 11 08:36:35 crc kubenswrapper[4881]: I1211 08:36:35.856014 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="0586421b-6bd9-4994-a95d-fbe21a506e46" containerName="dnsmasq-dns" Dec 11 08:36:35 crc kubenswrapper[4881]: E1211 08:36:35.856037 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a99bf32a-aa0f-4726-8fa2-9f9d8c503cf1" containerName="mariadb-database-create" Dec 11 08:36:35 crc kubenswrapper[4881]: I1211 08:36:35.856044 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="a99bf32a-aa0f-4726-8fa2-9f9d8c503cf1" containerName="mariadb-database-create" Dec 11 08:36:35 crc kubenswrapper[4881]: E1211 08:36:35.856077 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d40e3cbd-c017-4b42-94ee-dea2565d55a3" containerName="swift-ring-rebalance" Dec 11 08:36:35 crc kubenswrapper[4881]: I1211 08:36:35.856169 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="d40e3cbd-c017-4b42-94ee-dea2565d55a3" containerName="swift-ring-rebalance" Dec 11 08:36:35 crc kubenswrapper[4881]: I1211 08:36:35.856636 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="574b7634-a0fa-484a-b0c5-4f1a6541d3a9" containerName="mariadb-database-create" Dec 11 08:36:35 crc kubenswrapper[4881]: I1211 08:36:35.856677 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="05cca6b4-3f02-4baf-8b58-3b7de77c5ec3" containerName="mariadb-database-create" Dec 11 08:36:35 crc kubenswrapper[4881]: I1211 08:36:35.856700 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="0586421b-6bd9-4994-a95d-fbe21a506e46" containerName="dnsmasq-dns" Dec 11 08:36:35 crc kubenswrapper[4881]: I1211 08:36:35.856720 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="d40e3cbd-c017-4b42-94ee-dea2565d55a3" containerName="swift-ring-rebalance" Dec 11 08:36:35 crc kubenswrapper[4881]: I1211 08:36:35.856741 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="cb33f8d2-c5b8-485b-8be2-505ba688ddc0" containerName="mariadb-database-create" Dec 11 08:36:35 crc kubenswrapper[4881]: I1211 08:36:35.856749 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="a99bf32a-aa0f-4726-8fa2-9f9d8c503cf1" containerName="mariadb-database-create" Dec 11 08:36:35 crc kubenswrapper[4881]: I1211 08:36:35.858056 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-ee83-account-create-7zk6g"] Dec 11 08:36:35 crc kubenswrapper[4881]: I1211 08:36:35.858169 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-ee83-account-create-7zk6g" Dec 11 08:36:35 crc kubenswrapper[4881]: I1211 08:36:35.860004 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-db-secret" Dec 11 08:36:36 crc kubenswrapper[4881]: I1211 08:36:36.051540 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ppb5c\" (UniqueName: \"kubernetes.io/projected/73794710-ff03-415b-b6f7-48e86190e910-kube-api-access-ppb5c\") pod \"keystone-ee83-account-create-7zk6g\" (UID: \"73794710-ff03-415b-b6f7-48e86190e910\") " pod="openstack/keystone-ee83-account-create-7zk6g" Dec 11 08:36:36 crc kubenswrapper[4881]: I1211 08:36:36.151202 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-9bb0-account-create-8rxjm"] Dec 11 08:36:36 crc kubenswrapper[4881]: I1211 08:36:36.153253 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-9bb0-account-create-8rxjm" Dec 11 08:36:36 crc kubenswrapper[4881]: I1211 08:36:36.153813 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ppb5c\" (UniqueName: \"kubernetes.io/projected/73794710-ff03-415b-b6f7-48e86190e910-kube-api-access-ppb5c\") pod \"keystone-ee83-account-create-7zk6g\" (UID: \"73794710-ff03-415b-b6f7-48e86190e910\") " pod="openstack/keystone-ee83-account-create-7zk6g" Dec 11 08:36:36 crc kubenswrapper[4881]: I1211 08:36:36.155947 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-db-secret" Dec 11 08:36:36 crc kubenswrapper[4881]: I1211 08:36:36.185771 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ppb5c\" (UniqueName: \"kubernetes.io/projected/73794710-ff03-415b-b6f7-48e86190e910-kube-api-access-ppb5c\") pod \"keystone-ee83-account-create-7zk6g\" (UID: \"73794710-ff03-415b-b6f7-48e86190e910\") " pod="openstack/keystone-ee83-account-create-7zk6g" Dec 11 08:36:36 crc kubenswrapper[4881]: I1211 08:36:36.192574 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-ee83-account-create-7zk6g" Dec 11 08:36:36 crc kubenswrapper[4881]: I1211 08:36:36.196931 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-9bb0-account-create-8rxjm"] Dec 11 08:36:36 crc kubenswrapper[4881]: I1211 08:36:36.256060 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6kj86\" (UniqueName: \"kubernetes.io/projected/3718a33b-862b-4600-82d4-b54c568ae5a4-kube-api-access-6kj86\") pod \"placement-9bb0-account-create-8rxjm\" (UID: \"3718a33b-862b-4600-82d4-b54c568ae5a4\") " pod="openstack/placement-9bb0-account-create-8rxjm" Dec 11 08:36:36 crc kubenswrapper[4881]: I1211 08:36:36.358461 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6kj86\" (UniqueName: \"kubernetes.io/projected/3718a33b-862b-4600-82d4-b54c568ae5a4-kube-api-access-6kj86\") pod \"placement-9bb0-account-create-8rxjm\" (UID: \"3718a33b-862b-4600-82d4-b54c568ae5a4\") " pod="openstack/placement-9bb0-account-create-8rxjm" Dec 11 08:36:36 crc kubenswrapper[4881]: I1211 08:36:36.378518 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6kj86\" (UniqueName: \"kubernetes.io/projected/3718a33b-862b-4600-82d4-b54c568ae5a4-kube-api-access-6kj86\") pod \"placement-9bb0-account-create-8rxjm\" (UID: \"3718a33b-862b-4600-82d4-b54c568ae5a4\") " pod="openstack/placement-9bb0-account-create-8rxjm" Dec 11 08:36:36 crc kubenswrapper[4881]: I1211 08:36:36.472144 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-9bb0-account-create-8rxjm" Dec 11 08:36:36 crc kubenswrapper[4881]: I1211 08:36:36.641701 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"7f0aa090-3aac-4da8-9efa-a31a7b3b130f","Type":"ContainerStarted","Data":"17c37ffa3fd1ce94ad82b5e77d494fa73e0d48a22e401ade2169a44ff80138be"} Dec 11 08:36:36 crc kubenswrapper[4881]: I1211 08:36:36.652807 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"1af483c2-ea3f-45cd-971d-797c06f5c6e2","Type":"ContainerStarted","Data":"af9405aeb48e86260ea75046adc33a7e05845c79b08082eb62302c114abba94f"} Dec 11 08:36:36 crc kubenswrapper[4881]: I1211 08:36:36.677639 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-nb-0" podStartSLOduration=20.971988945 podStartE2EDuration="1m6.677615333s" podCreationTimestamp="2025-12-11 08:35:30 +0000 UTC" firstStartedPulling="2025-12-11 08:35:49.972561971 +0000 UTC m=+1198.349930668" lastFinishedPulling="2025-12-11 08:36:35.678188369 +0000 UTC m=+1244.055557056" observedRunningTime="2025-12-11 08:36:36.659181544 +0000 UTC m=+1245.036550241" watchObservedRunningTime="2025-12-11 08:36:36.677615333 +0000 UTC m=+1245.054984020" Dec 11 08:36:36 crc kubenswrapper[4881]: I1211 08:36:36.694084 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-ee83-account-create-7zk6g"] Dec 11 08:36:36 crc kubenswrapper[4881]: I1211 08:36:36.934996 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-9bb0-account-create-8rxjm"] Dec 11 08:36:36 crc kubenswrapper[4881]: W1211 08:36:36.963890 4881 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3718a33b_862b_4600_82d4_b54c568ae5a4.slice/crio-3dfd8c1401c9f3eafb95e1123b37dc9756c95f0d12c505b8d1dcfca4aee354df WatchSource:0}: Error finding container 3dfd8c1401c9f3eafb95e1123b37dc9756c95f0d12c505b8d1dcfca4aee354df: Status 404 returned error can't find the container with id 3dfd8c1401c9f3eafb95e1123b37dc9756c95f0d12c505b8d1dcfca4aee354df Dec 11 08:36:37 crc kubenswrapper[4881]: I1211 08:36:37.280453 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-0123-account-create-nxblk"] Dec 11 08:36:37 crc kubenswrapper[4881]: I1211 08:36:37.281878 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-0123-account-create-nxblk" Dec 11 08:36:37 crc kubenswrapper[4881]: I1211 08:36:37.284754 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-db-secret" Dec 11 08:36:37 crc kubenswrapper[4881]: I1211 08:36:37.301634 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-0123-account-create-nxblk"] Dec 11 08:36:37 crc kubenswrapper[4881]: I1211 08:36:37.387035 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fjlqz\" (UniqueName: \"kubernetes.io/projected/c528a5b2-04d9-4ec0-83a6-8fc7776fdcc3-kube-api-access-fjlqz\") pod \"glance-0123-account-create-nxblk\" (UID: \"c528a5b2-04d9-4ec0-83a6-8fc7776fdcc3\") " pod="openstack/glance-0123-account-create-nxblk" Dec 11 08:36:37 crc kubenswrapper[4881]: I1211 08:36:37.489393 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fjlqz\" (UniqueName: \"kubernetes.io/projected/c528a5b2-04d9-4ec0-83a6-8fc7776fdcc3-kube-api-access-fjlqz\") pod \"glance-0123-account-create-nxblk\" (UID: \"c528a5b2-04d9-4ec0-83a6-8fc7776fdcc3\") " pod="openstack/glance-0123-account-create-nxblk" Dec 11 08:36:37 crc kubenswrapper[4881]: I1211 08:36:37.509715 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fjlqz\" (UniqueName: \"kubernetes.io/projected/c528a5b2-04d9-4ec0-83a6-8fc7776fdcc3-kube-api-access-fjlqz\") pod \"glance-0123-account-create-nxblk\" (UID: \"c528a5b2-04d9-4ec0-83a6-8fc7776fdcc3\") " pod="openstack/glance-0123-account-create-nxblk" Dec 11 08:36:37 crc kubenswrapper[4881]: I1211 08:36:37.588521 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-nb-0" Dec 11 08:36:37 crc kubenswrapper[4881]: I1211 08:36:37.603372 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-0123-account-create-nxblk" Dec 11 08:36:37 crc kubenswrapper[4881]: I1211 08:36:37.668977 4881 generic.go:334] "Generic (PLEG): container finished" podID="73794710-ff03-415b-b6f7-48e86190e910" containerID="ff5228f418e12b75f5f60d84275c714f444f0a72f409069b532d1e643bfd3198" exitCode=0 Dec 11 08:36:37 crc kubenswrapper[4881]: I1211 08:36:37.669133 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-ee83-account-create-7zk6g" event={"ID":"73794710-ff03-415b-b6f7-48e86190e910","Type":"ContainerDied","Data":"ff5228f418e12b75f5f60d84275c714f444f0a72f409069b532d1e643bfd3198"} Dec 11 08:36:37 crc kubenswrapper[4881]: I1211 08:36:37.669167 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-ee83-account-create-7zk6g" event={"ID":"73794710-ff03-415b-b6f7-48e86190e910","Type":"ContainerStarted","Data":"83d9b72dfc10c16b4fe2a40b17e531bb277b688929920c2bfa6d143eff22efa0"} Dec 11 08:36:37 crc kubenswrapper[4881]: I1211 08:36:37.680766 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"1af483c2-ea3f-45cd-971d-797c06f5c6e2","Type":"ContainerStarted","Data":"acdc78083bc3564d65a4f1c43e3d1ffa3c5f3138c7f03e89875381fc0a5bff8b"} Dec 11 08:36:37 crc kubenswrapper[4881]: I1211 08:36:37.680831 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"1af483c2-ea3f-45cd-971d-797c06f5c6e2","Type":"ContainerStarted","Data":"77c324cd06ab7354f5c3ed0966c7d593b700aa0d3d86e8c2608eac86bfc48d57"} Dec 11 08:36:37 crc kubenswrapper[4881]: I1211 08:36:37.680843 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"1af483c2-ea3f-45cd-971d-797c06f5c6e2","Type":"ContainerStarted","Data":"8d628bd6bac457e66978e3462ae735865641a86f1e6fbc6af935f2df7af08bef"} Dec 11 08:36:37 crc kubenswrapper[4881]: I1211 08:36:37.697306 4881 generic.go:334] "Generic (PLEG): container finished" podID="3718a33b-862b-4600-82d4-b54c568ae5a4" containerID="aff65a6ce6d13d872dc7208fe947c7953af82bd5d3ee0e0bf4ee364cd3ee0f0b" exitCode=0 Dec 11 08:36:37 crc kubenswrapper[4881]: I1211 08:36:37.698375 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-9bb0-account-create-8rxjm" event={"ID":"3718a33b-862b-4600-82d4-b54c568ae5a4","Type":"ContainerDied","Data":"aff65a6ce6d13d872dc7208fe947c7953af82bd5d3ee0e0bf4ee364cd3ee0f0b"} Dec 11 08:36:37 crc kubenswrapper[4881]: I1211 08:36:37.698402 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-9bb0-account-create-8rxjm" event={"ID":"3718a33b-862b-4600-82d4-b54c568ae5a4","Type":"ContainerStarted","Data":"3dfd8c1401c9f3eafb95e1123b37dc9756c95f0d12c505b8d1dcfca4aee354df"} Dec 11 08:36:38 crc kubenswrapper[4881]: I1211 08:36:38.086797 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-0123-account-create-nxblk"] Dec 11 08:36:38 crc kubenswrapper[4881]: I1211 08:36:38.326562 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/kube-state-metrics-0" Dec 11 08:36:38 crc kubenswrapper[4881]: I1211 08:36:38.332102 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mysqld-exporter-b509-account-create-tcvfc"] Dec 11 08:36:38 crc kubenswrapper[4881]: I1211 08:36:38.333604 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-b509-account-create-tcvfc" Dec 11 08:36:38 crc kubenswrapper[4881]: I1211 08:36:38.335451 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"mysqld-exporter-openstack-db-secret" Dec 11 08:36:38 crc kubenswrapper[4881]: I1211 08:36:38.343387 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mysqld-exporter-b509-account-create-tcvfc"] Dec 11 08:36:38 crc kubenswrapper[4881]: I1211 08:36:38.422393 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6ctgr\" (UniqueName: \"kubernetes.io/projected/c0f8242c-de63-4ad0-9164-d941d7a1d67c-kube-api-access-6ctgr\") pod \"mysqld-exporter-b509-account-create-tcvfc\" (UID: \"c0f8242c-de63-4ad0-9164-d941d7a1d67c\") " pod="openstack/mysqld-exporter-b509-account-create-tcvfc" Dec 11 08:36:38 crc kubenswrapper[4881]: I1211 08:36:38.524436 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6ctgr\" (UniqueName: \"kubernetes.io/projected/c0f8242c-de63-4ad0-9164-d941d7a1d67c-kube-api-access-6ctgr\") pod \"mysqld-exporter-b509-account-create-tcvfc\" (UID: \"c0f8242c-de63-4ad0-9164-d941d7a1d67c\") " pod="openstack/mysqld-exporter-b509-account-create-tcvfc" Dec 11 08:36:38 crc kubenswrapper[4881]: I1211 08:36:38.543037 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6ctgr\" (UniqueName: \"kubernetes.io/projected/c0f8242c-de63-4ad0-9164-d941d7a1d67c-kube-api-access-6ctgr\") pod \"mysqld-exporter-b509-account-create-tcvfc\" (UID: \"c0f8242c-de63-4ad0-9164-d941d7a1d67c\") " pod="openstack/mysqld-exporter-b509-account-create-tcvfc" Dec 11 08:36:38 crc kubenswrapper[4881]: I1211 08:36:38.665139 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-b509-account-create-tcvfc" Dec 11 08:36:40 crc kubenswrapper[4881]: W1211 08:36:40.431989 4881 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc528a5b2_04d9_4ec0_83a6_8fc7776fdcc3.slice/crio-8ff99118a5a6985ea42622f0e54066f87cb5721aa8b58585cfb9e1b1a89a8583 WatchSource:0}: Error finding container 8ff99118a5a6985ea42622f0e54066f87cb5721aa8b58585cfb9e1b1a89a8583: Status 404 returned error can't find the container with id 8ff99118a5a6985ea42622f0e54066f87cb5721aa8b58585cfb9e1b1a89a8583 Dec 11 08:36:40 crc kubenswrapper[4881]: I1211 08:36:40.579623 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-ee83-account-create-7zk6g" Dec 11 08:36:40 crc kubenswrapper[4881]: I1211 08:36:40.590257 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-9bb0-account-create-8rxjm" Dec 11 08:36:40 crc kubenswrapper[4881]: I1211 08:36:40.646958 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-nb-0" Dec 11 08:36:40 crc kubenswrapper[4881]: I1211 08:36:40.647748 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-nb-0" Dec 11 08:36:40 crc kubenswrapper[4881]: I1211 08:36:40.676294 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6kj86\" (UniqueName: \"kubernetes.io/projected/3718a33b-862b-4600-82d4-b54c568ae5a4-kube-api-access-6kj86\") pod \"3718a33b-862b-4600-82d4-b54c568ae5a4\" (UID: \"3718a33b-862b-4600-82d4-b54c568ae5a4\") " Dec 11 08:36:40 crc kubenswrapper[4881]: I1211 08:36:40.676560 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ppb5c\" (UniqueName: \"kubernetes.io/projected/73794710-ff03-415b-b6f7-48e86190e910-kube-api-access-ppb5c\") pod \"73794710-ff03-415b-b6f7-48e86190e910\" (UID: \"73794710-ff03-415b-b6f7-48e86190e910\") " Dec 11 08:36:40 crc kubenswrapper[4881]: I1211 08:36:40.686411 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/73794710-ff03-415b-b6f7-48e86190e910-kube-api-access-ppb5c" (OuterVolumeSpecName: "kube-api-access-ppb5c") pod "73794710-ff03-415b-b6f7-48e86190e910" (UID: "73794710-ff03-415b-b6f7-48e86190e910"). InnerVolumeSpecName "kube-api-access-ppb5c". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:36:40 crc kubenswrapper[4881]: I1211 08:36:40.695524 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3718a33b-862b-4600-82d4-b54c568ae5a4-kube-api-access-6kj86" (OuterVolumeSpecName: "kube-api-access-6kj86") pod "3718a33b-862b-4600-82d4-b54c568ae5a4" (UID: "3718a33b-862b-4600-82d4-b54c568ae5a4"). InnerVolumeSpecName "kube-api-access-6kj86". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:36:40 crc kubenswrapper[4881]: I1211 08:36:40.699926 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-nb-0" Dec 11 08:36:40 crc kubenswrapper[4881]: I1211 08:36:40.756494 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-ee83-account-create-7zk6g" event={"ID":"73794710-ff03-415b-b6f7-48e86190e910","Type":"ContainerDied","Data":"83d9b72dfc10c16b4fe2a40b17e531bb277b688929920c2bfa6d143eff22efa0"} Dec 11 08:36:40 crc kubenswrapper[4881]: I1211 08:36:40.756536 4881 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="83d9b72dfc10c16b4fe2a40b17e531bb277b688929920c2bfa6d143eff22efa0" Dec 11 08:36:40 crc kubenswrapper[4881]: I1211 08:36:40.756601 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-ee83-account-create-7zk6g" Dec 11 08:36:40 crc kubenswrapper[4881]: I1211 08:36:40.761316 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-9bb0-account-create-8rxjm" event={"ID":"3718a33b-862b-4600-82d4-b54c568ae5a4","Type":"ContainerDied","Data":"3dfd8c1401c9f3eafb95e1123b37dc9756c95f0d12c505b8d1dcfca4aee354df"} Dec 11 08:36:40 crc kubenswrapper[4881]: I1211 08:36:40.761365 4881 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3dfd8c1401c9f3eafb95e1123b37dc9756c95f0d12c505b8d1dcfca4aee354df" Dec 11 08:36:40 crc kubenswrapper[4881]: I1211 08:36:40.761418 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-9bb0-account-create-8rxjm" Dec 11 08:36:40 crc kubenswrapper[4881]: I1211 08:36:40.762798 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-0123-account-create-nxblk" event={"ID":"c528a5b2-04d9-4ec0-83a6-8fc7776fdcc3","Type":"ContainerStarted","Data":"dbe9533fef09f890147f548b4721e46534fc44694f09bb4ac1991822c0e746e9"} Dec 11 08:36:40 crc kubenswrapper[4881]: I1211 08:36:40.764145 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-0123-account-create-nxblk" event={"ID":"c528a5b2-04d9-4ec0-83a6-8fc7776fdcc3","Type":"ContainerStarted","Data":"8ff99118a5a6985ea42622f0e54066f87cb5721aa8b58585cfb9e1b1a89a8583"} Dec 11 08:36:40 crc kubenswrapper[4881]: I1211 08:36:40.782843 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6kj86\" (UniqueName: \"kubernetes.io/projected/3718a33b-862b-4600-82d4-b54c568ae5a4-kube-api-access-6kj86\") on node \"crc\" DevicePath \"\"" Dec 11 08:36:40 crc kubenswrapper[4881]: I1211 08:36:40.782877 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ppb5c\" (UniqueName: \"kubernetes.io/projected/73794710-ff03-415b-b6f7-48e86190e910-kube-api-access-ppb5c\") on node \"crc\" DevicePath \"\"" Dec 11 08:36:40 crc kubenswrapper[4881]: I1211 08:36:40.787796 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-0123-account-create-nxblk" podStartSLOduration=3.7877795130000003 podStartE2EDuration="3.787779513s" podCreationTimestamp="2025-12-11 08:36:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 08:36:40.780460043 +0000 UTC m=+1249.157828770" watchObservedRunningTime="2025-12-11 08:36:40.787779513 +0000 UTC m=+1249.165148210" Dec 11 08:36:40 crc kubenswrapper[4881]: I1211 08:36:40.885450 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mysqld-exporter-b509-account-create-tcvfc"] Dec 11 08:36:40 crc kubenswrapper[4881]: I1211 08:36:40.994113 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-northd-0"] Dec 11 08:36:40 crc kubenswrapper[4881]: E1211 08:36:40.994874 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3718a33b-862b-4600-82d4-b54c568ae5a4" containerName="mariadb-account-create" Dec 11 08:36:40 crc kubenswrapper[4881]: I1211 08:36:40.994887 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="3718a33b-862b-4600-82d4-b54c568ae5a4" containerName="mariadb-account-create" Dec 11 08:36:40 crc kubenswrapper[4881]: E1211 08:36:40.994910 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="73794710-ff03-415b-b6f7-48e86190e910" containerName="mariadb-account-create" Dec 11 08:36:40 crc kubenswrapper[4881]: I1211 08:36:40.994917 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="73794710-ff03-415b-b6f7-48e86190e910" containerName="mariadb-account-create" Dec 11 08:36:40 crc kubenswrapper[4881]: I1211 08:36:40.995110 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="3718a33b-862b-4600-82d4-b54c568ae5a4" containerName="mariadb-account-create" Dec 11 08:36:40 crc kubenswrapper[4881]: I1211 08:36:40.995124 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="73794710-ff03-415b-b6f7-48e86190e910" containerName="mariadb-account-create" Dec 11 08:36:40 crc kubenswrapper[4881]: I1211 08:36:40.996167 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Dec 11 08:36:41 crc kubenswrapper[4881]: I1211 08:36:41.002756 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovnnorthd-ovndbs" Dec 11 08:36:41 crc kubenswrapper[4881]: I1211 08:36:41.003004 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-config" Dec 11 08:36:41 crc kubenswrapper[4881]: I1211 08:36:41.003012 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovnnorthd-ovnnorthd-dockercfg-7vlk8" Dec 11 08:36:41 crc kubenswrapper[4881]: I1211 08:36:41.003545 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-scripts" Dec 11 08:36:41 crc kubenswrapper[4881]: I1211 08:36:41.026867 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-northd-0"] Dec 11 08:36:41 crc kubenswrapper[4881]: I1211 08:36:41.089874 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/ac1565ce-1297-4689-b199-d88c339feb68-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"ac1565ce-1297-4689-b199-d88c339feb68\") " pod="openstack/ovn-northd-0" Dec 11 08:36:41 crc kubenswrapper[4881]: I1211 08:36:41.089937 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7pjc6\" (UniqueName: \"kubernetes.io/projected/ac1565ce-1297-4689-b199-d88c339feb68-kube-api-access-7pjc6\") pod \"ovn-northd-0\" (UID: \"ac1565ce-1297-4689-b199-d88c339feb68\") " pod="openstack/ovn-northd-0" Dec 11 08:36:41 crc kubenswrapper[4881]: I1211 08:36:41.089956 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/ac1565ce-1297-4689-b199-d88c339feb68-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"ac1565ce-1297-4689-b199-d88c339feb68\") " pod="openstack/ovn-northd-0" Dec 11 08:36:41 crc kubenswrapper[4881]: I1211 08:36:41.089993 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/ac1565ce-1297-4689-b199-d88c339feb68-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"ac1565ce-1297-4689-b199-d88c339feb68\") " pod="openstack/ovn-northd-0" Dec 11 08:36:41 crc kubenswrapper[4881]: I1211 08:36:41.090030 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ac1565ce-1297-4689-b199-d88c339feb68-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"ac1565ce-1297-4689-b199-d88c339feb68\") " pod="openstack/ovn-northd-0" Dec 11 08:36:41 crc kubenswrapper[4881]: I1211 08:36:41.090047 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/ac1565ce-1297-4689-b199-d88c339feb68-scripts\") pod \"ovn-northd-0\" (UID: \"ac1565ce-1297-4689-b199-d88c339feb68\") " pod="openstack/ovn-northd-0" Dec 11 08:36:41 crc kubenswrapper[4881]: I1211 08:36:41.090111 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ac1565ce-1297-4689-b199-d88c339feb68-config\") pod \"ovn-northd-0\" (UID: \"ac1565ce-1297-4689-b199-d88c339feb68\") " pod="openstack/ovn-northd-0" Dec 11 08:36:41 crc kubenswrapper[4881]: I1211 08:36:41.191649 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7pjc6\" (UniqueName: \"kubernetes.io/projected/ac1565ce-1297-4689-b199-d88c339feb68-kube-api-access-7pjc6\") pod \"ovn-northd-0\" (UID: \"ac1565ce-1297-4689-b199-d88c339feb68\") " pod="openstack/ovn-northd-0" Dec 11 08:36:41 crc kubenswrapper[4881]: I1211 08:36:41.191738 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/ac1565ce-1297-4689-b199-d88c339feb68-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"ac1565ce-1297-4689-b199-d88c339feb68\") " pod="openstack/ovn-northd-0" Dec 11 08:36:41 crc kubenswrapper[4881]: I1211 08:36:41.191787 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/ac1565ce-1297-4689-b199-d88c339feb68-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"ac1565ce-1297-4689-b199-d88c339feb68\") " pod="openstack/ovn-northd-0" Dec 11 08:36:41 crc kubenswrapper[4881]: I1211 08:36:41.191826 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ac1565ce-1297-4689-b199-d88c339feb68-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"ac1565ce-1297-4689-b199-d88c339feb68\") " pod="openstack/ovn-northd-0" Dec 11 08:36:41 crc kubenswrapper[4881]: I1211 08:36:41.191843 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/ac1565ce-1297-4689-b199-d88c339feb68-scripts\") pod \"ovn-northd-0\" (UID: \"ac1565ce-1297-4689-b199-d88c339feb68\") " pod="openstack/ovn-northd-0" Dec 11 08:36:41 crc kubenswrapper[4881]: I1211 08:36:41.191909 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ac1565ce-1297-4689-b199-d88c339feb68-config\") pod \"ovn-northd-0\" (UID: \"ac1565ce-1297-4689-b199-d88c339feb68\") " pod="openstack/ovn-northd-0" Dec 11 08:36:41 crc kubenswrapper[4881]: I1211 08:36:41.191991 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/ac1565ce-1297-4689-b199-d88c339feb68-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"ac1565ce-1297-4689-b199-d88c339feb68\") " pod="openstack/ovn-northd-0" Dec 11 08:36:41 crc kubenswrapper[4881]: I1211 08:36:41.192907 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/ac1565ce-1297-4689-b199-d88c339feb68-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"ac1565ce-1297-4689-b199-d88c339feb68\") " pod="openstack/ovn-northd-0" Dec 11 08:36:41 crc kubenswrapper[4881]: I1211 08:36:41.193247 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/ac1565ce-1297-4689-b199-d88c339feb68-scripts\") pod \"ovn-northd-0\" (UID: \"ac1565ce-1297-4689-b199-d88c339feb68\") " pod="openstack/ovn-northd-0" Dec 11 08:36:41 crc kubenswrapper[4881]: I1211 08:36:41.193665 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ac1565ce-1297-4689-b199-d88c339feb68-config\") pod \"ovn-northd-0\" (UID: \"ac1565ce-1297-4689-b199-d88c339feb68\") " pod="openstack/ovn-northd-0" Dec 11 08:36:41 crc kubenswrapper[4881]: I1211 08:36:41.196119 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/ac1565ce-1297-4689-b199-d88c339feb68-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"ac1565ce-1297-4689-b199-d88c339feb68\") " pod="openstack/ovn-northd-0" Dec 11 08:36:41 crc kubenswrapper[4881]: I1211 08:36:41.196929 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ac1565ce-1297-4689-b199-d88c339feb68-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"ac1565ce-1297-4689-b199-d88c339feb68\") " pod="openstack/ovn-northd-0" Dec 11 08:36:41 crc kubenswrapper[4881]: I1211 08:36:41.199310 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/ac1565ce-1297-4689-b199-d88c339feb68-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"ac1565ce-1297-4689-b199-d88c339feb68\") " pod="openstack/ovn-northd-0" Dec 11 08:36:41 crc kubenswrapper[4881]: I1211 08:36:41.216224 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7pjc6\" (UniqueName: \"kubernetes.io/projected/ac1565ce-1297-4689-b199-d88c339feb68-kube-api-access-7pjc6\") pod \"ovn-northd-0\" (UID: \"ac1565ce-1297-4689-b199-d88c339feb68\") " pod="openstack/ovn-northd-0" Dec 11 08:36:41 crc kubenswrapper[4881]: I1211 08:36:41.319106 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Dec 11 08:36:41 crc kubenswrapper[4881]: I1211 08:36:41.783594 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"1206112a-8438-4dcf-9cad-a3e38790a344","Type":"ContainerStarted","Data":"ba6e0d3779b0e6c9e61ddaa181461d7ea87ee57213fe6efacda7acba5a4d1228"} Dec 11 08:36:41 crc kubenswrapper[4881]: I1211 08:36:41.786412 4881 generic.go:334] "Generic (PLEG): container finished" podID="c0f8242c-de63-4ad0-9164-d941d7a1d67c" containerID="a65c0f2809c22b9782bc496952fd63040f08164a35d37c3c1a3f88b56392d6b0" exitCode=0 Dec 11 08:36:41 crc kubenswrapper[4881]: I1211 08:36:41.786715 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-b509-account-create-tcvfc" event={"ID":"c0f8242c-de63-4ad0-9164-d941d7a1d67c","Type":"ContainerDied","Data":"a65c0f2809c22b9782bc496952fd63040f08164a35d37c3c1a3f88b56392d6b0"} Dec 11 08:36:41 crc kubenswrapper[4881]: I1211 08:36:41.786760 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-b509-account-create-tcvfc" event={"ID":"c0f8242c-de63-4ad0-9164-d941d7a1d67c","Type":"ContainerStarted","Data":"0e3738185101a31ab0cfa7a0adffe0cc3c4a7b297b67bd8178c585f74ce4e629"} Dec 11 08:36:41 crc kubenswrapper[4881]: I1211 08:36:41.806935 4881 generic.go:334] "Generic (PLEG): container finished" podID="c528a5b2-04d9-4ec0-83a6-8fc7776fdcc3" containerID="dbe9533fef09f890147f548b4721e46534fc44694f09bb4ac1991822c0e746e9" exitCode=0 Dec 11 08:36:41 crc kubenswrapper[4881]: I1211 08:36:41.807167 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-0123-account-create-nxblk" event={"ID":"c528a5b2-04d9-4ec0-83a6-8fc7776fdcc3","Type":"ContainerDied","Data":"dbe9533fef09f890147f548b4721e46534fc44694f09bb4ac1991822c0e746e9"} Dec 11 08:36:42 crc kubenswrapper[4881]: I1211 08:36:42.005437 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-northd-0"] Dec 11 08:36:42 crc kubenswrapper[4881]: I1211 08:36:42.644143 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-server-0" Dec 11 08:36:42 crc kubenswrapper[4881]: I1211 08:36:42.865013 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"1af483c2-ea3f-45cd-971d-797c06f5c6e2","Type":"ContainerStarted","Data":"a9aff31a6a6517a170e0bdf48190ec9f361ef24c0c38ba21ccce0dd2452353a5"} Dec 11 08:36:42 crc kubenswrapper[4881]: I1211 08:36:42.865374 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"1af483c2-ea3f-45cd-971d-797c06f5c6e2","Type":"ContainerStarted","Data":"fcea0974c153022ea2bd03bd7ffcd8364736806ae87c065918b293eae572a2a4"} Dec 11 08:36:42 crc kubenswrapper[4881]: I1211 08:36:42.865393 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"1af483c2-ea3f-45cd-971d-797c06f5c6e2","Type":"ContainerStarted","Data":"0ce1c24fed625452b44880b9882f52f311fbb4802ab9166af35f61323cefedc7"} Dec 11 08:36:42 crc kubenswrapper[4881]: I1211 08:36:42.865405 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"1af483c2-ea3f-45cd-971d-797c06f5c6e2","Type":"ContainerStarted","Data":"f896efe3d86fc376d14f8485e641ed33eb6ce080ab0fe5edb16ca2abb6d042eb"} Dec 11 08:36:42 crc kubenswrapper[4881]: I1211 08:36:42.865416 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"1af483c2-ea3f-45cd-971d-797c06f5c6e2","Type":"ContainerStarted","Data":"cd7e971d0909aa19b946767f5b9773f8292e22791d0c6d1f568b025b178ab79e"} Dec 11 08:36:42 crc kubenswrapper[4881]: I1211 08:36:42.866985 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"ac1565ce-1297-4689-b199-d88c339feb68","Type":"ContainerStarted","Data":"53327f3fe516bf85ea7327f014d77f89554c506b74596a21d049d14065239ad8"} Dec 11 08:36:42 crc kubenswrapper[4881]: I1211 08:36:42.938503 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-cell1-server-0" Dec 11 08:36:43 crc kubenswrapper[4881]: I1211 08:36:43.038153 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-db-create-8z5np"] Dec 11 08:36:43 crc kubenswrapper[4881]: I1211 08:36:43.041198 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-8z5np" Dec 11 08:36:43 crc kubenswrapper[4881]: I1211 08:36:43.044424 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-create-8z5np"] Dec 11 08:36:43 crc kubenswrapper[4881]: I1211 08:36:43.128838 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-db-create-ql9dq"] Dec 11 08:36:43 crc kubenswrapper[4881]: I1211 08:36:43.130395 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-ql9dq" Dec 11 08:36:43 crc kubenswrapper[4881]: I1211 08:36:43.146491 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q2cvb\" (UniqueName: \"kubernetes.io/projected/2a84e1c9-f761-496c-aae3-6ec403f26898-kube-api-access-q2cvb\") pod \"barbican-db-create-ql9dq\" (UID: \"2a84e1c9-f761-496c-aae3-6ec403f26898\") " pod="openstack/barbican-db-create-ql9dq" Dec 11 08:36:43 crc kubenswrapper[4881]: I1211 08:36:43.146651 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rfppv\" (UniqueName: \"kubernetes.io/projected/f3b36526-9590-49d0-9b9a-0b86736538bb-kube-api-access-rfppv\") pod \"cinder-db-create-8z5np\" (UID: \"f3b36526-9590-49d0-9b9a-0b86736538bb\") " pod="openstack/cinder-db-create-8z5np" Dec 11 08:36:43 crc kubenswrapper[4881]: I1211 08:36:43.155008 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-create-ql9dq"] Dec 11 08:36:43 crc kubenswrapper[4881]: I1211 08:36:43.248177 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q2cvb\" (UniqueName: \"kubernetes.io/projected/2a84e1c9-f761-496c-aae3-6ec403f26898-kube-api-access-q2cvb\") pod \"barbican-db-create-ql9dq\" (UID: \"2a84e1c9-f761-496c-aae3-6ec403f26898\") " pod="openstack/barbican-db-create-ql9dq" Dec 11 08:36:43 crc kubenswrapper[4881]: I1211 08:36:43.248358 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rfppv\" (UniqueName: \"kubernetes.io/projected/f3b36526-9590-49d0-9b9a-0b86736538bb-kube-api-access-rfppv\") pod \"cinder-db-create-8z5np\" (UID: \"f3b36526-9590-49d0-9b9a-0b86736538bb\") " pod="openstack/cinder-db-create-8z5np" Dec 11 08:36:43 crc kubenswrapper[4881]: I1211 08:36:43.307933 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-db-create-xznx2"] Dec 11 08:36:43 crc kubenswrapper[4881]: I1211 08:36:43.311073 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rfppv\" (UniqueName: \"kubernetes.io/projected/f3b36526-9590-49d0-9b9a-0b86736538bb-kube-api-access-rfppv\") pod \"cinder-db-create-8z5np\" (UID: \"f3b36526-9590-49d0-9b9a-0b86736538bb\") " pod="openstack/cinder-db-create-8z5np" Dec 11 08:36:43 crc kubenswrapper[4881]: I1211 08:36:43.311222 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q2cvb\" (UniqueName: \"kubernetes.io/projected/2a84e1c9-f761-496c-aae3-6ec403f26898-kube-api-access-q2cvb\") pod \"barbican-db-create-ql9dq\" (UID: \"2a84e1c9-f761-496c-aae3-6ec403f26898\") " pod="openstack/barbican-db-create-ql9dq" Dec 11 08:36:43 crc kubenswrapper[4881]: I1211 08:36:43.314269 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-create-xznx2" Dec 11 08:36:43 crc kubenswrapper[4881]: I1211 08:36:43.334110 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-db-create-xznx2"] Dec 11 08:36:43 crc kubenswrapper[4881]: I1211 08:36:43.350350 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-478mv\" (UniqueName: \"kubernetes.io/projected/30f00215-2531-44c8-80d6-7f3be540e71b-kube-api-access-478mv\") pod \"heat-db-create-xznx2\" (UID: \"30f00215-2531-44c8-80d6-7f3be540e71b\") " pod="openstack/heat-db-create-xznx2" Dec 11 08:36:43 crc kubenswrapper[4881]: I1211 08:36:43.417202 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-8z5np" Dec 11 08:36:43 crc kubenswrapper[4881]: I1211 08:36:43.453371 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-478mv\" (UniqueName: \"kubernetes.io/projected/30f00215-2531-44c8-80d6-7f3be540e71b-kube-api-access-478mv\") pod \"heat-db-create-xznx2\" (UID: \"30f00215-2531-44c8-80d6-7f3be540e71b\") " pod="openstack/heat-db-create-xznx2" Dec 11 08:36:43 crc kubenswrapper[4881]: I1211 08:36:43.480848 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-478mv\" (UniqueName: \"kubernetes.io/projected/30f00215-2531-44c8-80d6-7f3be540e71b-kube-api-access-478mv\") pod \"heat-db-create-xznx2\" (UID: \"30f00215-2531-44c8-80d6-7f3be540e71b\") " pod="openstack/heat-db-create-xznx2" Dec 11 08:36:43 crc kubenswrapper[4881]: I1211 08:36:43.505871 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-db-create-vwbg2"] Dec 11 08:36:43 crc kubenswrapper[4881]: I1211 08:36:43.517998 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-vwbg2" Dec 11 08:36:43 crc kubenswrapper[4881]: I1211 08:36:43.519168 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-create-vwbg2"] Dec 11 08:36:43 crc kubenswrapper[4881]: I1211 08:36:43.555300 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9zws8\" (UniqueName: \"kubernetes.io/projected/5e6b6074-f624-4016-beb9-a926af51e986-kube-api-access-9zws8\") pod \"neutron-db-create-vwbg2\" (UID: \"5e6b6074-f624-4016-beb9-a926af51e986\") " pod="openstack/neutron-db-create-vwbg2" Dec 11 08:36:43 crc kubenswrapper[4881]: I1211 08:36:43.656640 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9zws8\" (UniqueName: \"kubernetes.io/projected/5e6b6074-f624-4016-beb9-a926af51e986-kube-api-access-9zws8\") pod \"neutron-db-create-vwbg2\" (UID: \"5e6b6074-f624-4016-beb9-a926af51e986\") " pod="openstack/neutron-db-create-vwbg2" Dec 11 08:36:43 crc kubenswrapper[4881]: I1211 08:36:43.676764 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9zws8\" (UniqueName: \"kubernetes.io/projected/5e6b6074-f624-4016-beb9-a926af51e986-kube-api-access-9zws8\") pod \"neutron-db-create-vwbg2\" (UID: \"5e6b6074-f624-4016-beb9-a926af51e986\") " pod="openstack/neutron-db-create-vwbg2" Dec 11 08:36:43 crc kubenswrapper[4881]: I1211 08:36:43.807923 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-ql9dq" Dec 11 08:36:43 crc kubenswrapper[4881]: I1211 08:36:43.816537 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-create-xznx2" Dec 11 08:36:43 crc kubenswrapper[4881]: I1211 08:36:43.822964 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-b509-account-create-tcvfc" Dec 11 08:36:43 crc kubenswrapper[4881]: I1211 08:36:43.831580 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-0123-account-create-nxblk" Dec 11 08:36:43 crc kubenswrapper[4881]: I1211 08:36:43.875354 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6ctgr\" (UniqueName: \"kubernetes.io/projected/c0f8242c-de63-4ad0-9164-d941d7a1d67c-kube-api-access-6ctgr\") pod \"c0f8242c-de63-4ad0-9164-d941d7a1d67c\" (UID: \"c0f8242c-de63-4ad0-9164-d941d7a1d67c\") " Dec 11 08:36:43 crc kubenswrapper[4881]: I1211 08:36:43.877177 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fjlqz\" (UniqueName: \"kubernetes.io/projected/c528a5b2-04d9-4ec0-83a6-8fc7776fdcc3-kube-api-access-fjlqz\") pod \"c528a5b2-04d9-4ec0-83a6-8fc7776fdcc3\" (UID: \"c528a5b2-04d9-4ec0-83a6-8fc7776fdcc3\") " Dec 11 08:36:43 crc kubenswrapper[4881]: I1211 08:36:43.881052 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c528a5b2-04d9-4ec0-83a6-8fc7776fdcc3-kube-api-access-fjlqz" (OuterVolumeSpecName: "kube-api-access-fjlqz") pod "c528a5b2-04d9-4ec0-83a6-8fc7776fdcc3" (UID: "c528a5b2-04d9-4ec0-83a6-8fc7776fdcc3"). InnerVolumeSpecName "kube-api-access-fjlqz". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:36:43 crc kubenswrapper[4881]: I1211 08:36:43.885069 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c0f8242c-de63-4ad0-9164-d941d7a1d67c-kube-api-access-6ctgr" (OuterVolumeSpecName: "kube-api-access-6ctgr") pod "c0f8242c-de63-4ad0-9164-d941d7a1d67c" (UID: "c0f8242c-de63-4ad0-9164-d941d7a1d67c"). InnerVolumeSpecName "kube-api-access-6ctgr". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:36:43 crc kubenswrapper[4881]: I1211 08:36:43.899685 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-0123-account-create-nxblk" event={"ID":"c528a5b2-04d9-4ec0-83a6-8fc7776fdcc3","Type":"ContainerDied","Data":"8ff99118a5a6985ea42622f0e54066f87cb5721aa8b58585cfb9e1b1a89a8583"} Dec 11 08:36:43 crc kubenswrapper[4881]: I1211 08:36:43.899742 4881 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8ff99118a5a6985ea42622f0e54066f87cb5721aa8b58585cfb9e1b1a89a8583" Dec 11 08:36:43 crc kubenswrapper[4881]: I1211 08:36:43.899805 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-0123-account-create-nxblk" Dec 11 08:36:43 crc kubenswrapper[4881]: I1211 08:36:43.915622 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-b509-account-create-tcvfc" event={"ID":"c0f8242c-de63-4ad0-9164-d941d7a1d67c","Type":"ContainerDied","Data":"0e3738185101a31ab0cfa7a0adffe0cc3c4a7b297b67bd8178c585f74ce4e629"} Dec 11 08:36:43 crc kubenswrapper[4881]: I1211 08:36:43.915656 4881 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0e3738185101a31ab0cfa7a0adffe0cc3c4a7b297b67bd8178c585f74ce4e629" Dec 11 08:36:43 crc kubenswrapper[4881]: I1211 08:36:43.915707 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-b509-account-create-tcvfc" Dec 11 08:36:43 crc kubenswrapper[4881]: I1211 08:36:43.944100 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-create-8z5np"] Dec 11 08:36:43 crc kubenswrapper[4881]: I1211 08:36:43.985471 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fjlqz\" (UniqueName: \"kubernetes.io/projected/c528a5b2-04d9-4ec0-83a6-8fc7776fdcc3-kube-api-access-fjlqz\") on node \"crc\" DevicePath \"\"" Dec 11 08:36:43 crc kubenswrapper[4881]: I1211 08:36:43.986607 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6ctgr\" (UniqueName: \"kubernetes.io/projected/c0f8242c-de63-4ad0-9164-d941d7a1d67c-kube-api-access-6ctgr\") on node \"crc\" DevicePath \"\"" Dec 11 08:36:44 crc kubenswrapper[4881]: I1211 08:36:44.072732 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-vwbg2" Dec 11 08:36:44 crc kubenswrapper[4881]: I1211 08:36:44.390692 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-create-ql9dq"] Dec 11 08:36:44 crc kubenswrapper[4881]: I1211 08:36:44.547140 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-db-create-xznx2"] Dec 11 08:36:44 crc kubenswrapper[4881]: W1211 08:36:44.562216 4881 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod30f00215_2531_44c8_80d6_7f3be540e71b.slice/crio-8702b9d30d9feafacd7099e9ee04312ccea06e63bcbf67610dd4b1cf28241755 WatchSource:0}: Error finding container 8702b9d30d9feafacd7099e9ee04312ccea06e63bcbf67610dd4b1cf28241755: Status 404 returned error can't find the container with id 8702b9d30d9feafacd7099e9ee04312ccea06e63bcbf67610dd4b1cf28241755 Dec 11 08:36:44 crc kubenswrapper[4881]: W1211 08:36:44.697407 4881 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod5e6b6074_f624_4016_beb9_a926af51e986.slice/crio-75e83de236deeb0a3cf9a54fc43936d695fe9dda85fd4fdfbb8de4c33a9d05da WatchSource:0}: Error finding container 75e83de236deeb0a3cf9a54fc43936d695fe9dda85fd4fdfbb8de4c33a9d05da: Status 404 returned error can't find the container with id 75e83de236deeb0a3cf9a54fc43936d695fe9dda85fd4fdfbb8de4c33a9d05da Dec 11 08:36:44 crc kubenswrapper[4881]: I1211 08:36:44.700169 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-create-vwbg2"] Dec 11 08:36:44 crc kubenswrapper[4881]: I1211 08:36:44.924792 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-vwbg2" event={"ID":"5e6b6074-f624-4016-beb9-a926af51e986","Type":"ContainerStarted","Data":"75e83de236deeb0a3cf9a54fc43936d695fe9dda85fd4fdfbb8de4c33a9d05da"} Dec 11 08:36:44 crc kubenswrapper[4881]: I1211 08:36:44.927039 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-8z5np" event={"ID":"f3b36526-9590-49d0-9b9a-0b86736538bb","Type":"ContainerStarted","Data":"52b26e45e66a5641270eb871b48b976adee1fc1524b33e724e6495635864a740"} Dec 11 08:36:44 crc kubenswrapper[4881]: I1211 08:36:44.928278 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-ql9dq" event={"ID":"2a84e1c9-f761-496c-aae3-6ec403f26898","Type":"ContainerStarted","Data":"221b17d11f120b547eee0593ec3b0d474a32b23971b70c144cd86992cdb49c1f"} Dec 11 08:36:44 crc kubenswrapper[4881]: I1211 08:36:44.929595 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-create-xznx2" event={"ID":"30f00215-2531-44c8-80d6-7f3be540e71b","Type":"ContainerStarted","Data":"8702b9d30d9feafacd7099e9ee04312ccea06e63bcbf67610dd4b1cf28241755"} Dec 11 08:36:46 crc kubenswrapper[4881]: I1211 08:36:46.570328 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-db-sync-s75l2"] Dec 11 08:36:46 crc kubenswrapper[4881]: E1211 08:36:46.571096 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c0f8242c-de63-4ad0-9164-d941d7a1d67c" containerName="mariadb-account-create" Dec 11 08:36:46 crc kubenswrapper[4881]: I1211 08:36:46.571114 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="c0f8242c-de63-4ad0-9164-d941d7a1d67c" containerName="mariadb-account-create" Dec 11 08:36:46 crc kubenswrapper[4881]: E1211 08:36:46.571150 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c528a5b2-04d9-4ec0-83a6-8fc7776fdcc3" containerName="mariadb-account-create" Dec 11 08:36:46 crc kubenswrapper[4881]: I1211 08:36:46.571158 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="c528a5b2-04d9-4ec0-83a6-8fc7776fdcc3" containerName="mariadb-account-create" Dec 11 08:36:46 crc kubenswrapper[4881]: I1211 08:36:46.571500 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="c0f8242c-de63-4ad0-9164-d941d7a1d67c" containerName="mariadb-account-create" Dec 11 08:36:46 crc kubenswrapper[4881]: I1211 08:36:46.571537 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="c528a5b2-04d9-4ec0-83a6-8fc7776fdcc3" containerName="mariadb-account-create" Dec 11 08:36:46 crc kubenswrapper[4881]: I1211 08:36:46.572324 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-s75l2" Dec 11 08:36:46 crc kubenswrapper[4881]: I1211 08:36:46.575034 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Dec 11 08:36:46 crc kubenswrapper[4881]: I1211 08:36:46.575721 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Dec 11 08:36:46 crc kubenswrapper[4881]: I1211 08:36:46.579870 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-9vkph" Dec 11 08:36:46 crc kubenswrapper[4881]: I1211 08:36:46.580003 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Dec 11 08:36:46 crc kubenswrapper[4881]: I1211 08:36:46.591475 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-sync-s75l2"] Dec 11 08:36:46 crc kubenswrapper[4881]: I1211 08:36:46.647039 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3f4a6ba9-97eb-425b-b2c1-54245c81a6df-config-data\") pod \"keystone-db-sync-s75l2\" (UID: \"3f4a6ba9-97eb-425b-b2c1-54245c81a6df\") " pod="openstack/keystone-db-sync-s75l2" Dec 11 08:36:46 crc kubenswrapper[4881]: I1211 08:36:46.647164 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7wh44\" (UniqueName: \"kubernetes.io/projected/3f4a6ba9-97eb-425b-b2c1-54245c81a6df-kube-api-access-7wh44\") pod \"keystone-db-sync-s75l2\" (UID: \"3f4a6ba9-97eb-425b-b2c1-54245c81a6df\") " pod="openstack/keystone-db-sync-s75l2" Dec 11 08:36:46 crc kubenswrapper[4881]: I1211 08:36:46.647204 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3f4a6ba9-97eb-425b-b2c1-54245c81a6df-combined-ca-bundle\") pod \"keystone-db-sync-s75l2\" (UID: \"3f4a6ba9-97eb-425b-b2c1-54245c81a6df\") " pod="openstack/keystone-db-sync-s75l2" Dec 11 08:36:46 crc kubenswrapper[4881]: I1211 08:36:46.748518 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3f4a6ba9-97eb-425b-b2c1-54245c81a6df-config-data\") pod \"keystone-db-sync-s75l2\" (UID: \"3f4a6ba9-97eb-425b-b2c1-54245c81a6df\") " pod="openstack/keystone-db-sync-s75l2" Dec 11 08:36:46 crc kubenswrapper[4881]: I1211 08:36:46.748643 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7wh44\" (UniqueName: \"kubernetes.io/projected/3f4a6ba9-97eb-425b-b2c1-54245c81a6df-kube-api-access-7wh44\") pod \"keystone-db-sync-s75l2\" (UID: \"3f4a6ba9-97eb-425b-b2c1-54245c81a6df\") " pod="openstack/keystone-db-sync-s75l2" Dec 11 08:36:46 crc kubenswrapper[4881]: I1211 08:36:46.748682 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3f4a6ba9-97eb-425b-b2c1-54245c81a6df-combined-ca-bundle\") pod \"keystone-db-sync-s75l2\" (UID: \"3f4a6ba9-97eb-425b-b2c1-54245c81a6df\") " pod="openstack/keystone-db-sync-s75l2" Dec 11 08:36:46 crc kubenswrapper[4881]: I1211 08:36:46.755531 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3f4a6ba9-97eb-425b-b2c1-54245c81a6df-combined-ca-bundle\") pod \"keystone-db-sync-s75l2\" (UID: \"3f4a6ba9-97eb-425b-b2c1-54245c81a6df\") " pod="openstack/keystone-db-sync-s75l2" Dec 11 08:36:46 crc kubenswrapper[4881]: I1211 08:36:46.756324 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3f4a6ba9-97eb-425b-b2c1-54245c81a6df-config-data\") pod \"keystone-db-sync-s75l2\" (UID: \"3f4a6ba9-97eb-425b-b2c1-54245c81a6df\") " pod="openstack/keystone-db-sync-s75l2" Dec 11 08:36:46 crc kubenswrapper[4881]: I1211 08:36:46.769264 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7wh44\" (UniqueName: \"kubernetes.io/projected/3f4a6ba9-97eb-425b-b2c1-54245c81a6df-kube-api-access-7wh44\") pod \"keystone-db-sync-s75l2\" (UID: \"3f4a6ba9-97eb-425b-b2c1-54245c81a6df\") " pod="openstack/keystone-db-sync-s75l2" Dec 11 08:36:46 crc kubenswrapper[4881]: I1211 08:36:46.940522 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-s75l2" Dec 11 08:36:46 crc kubenswrapper[4881]: I1211 08:36:46.949998 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"1206112a-8438-4dcf-9cad-a3e38790a344","Type":"ContainerStarted","Data":"7bb4b7a05a63591d49e144e279752306ea71b1cb98919777df69c105fc734a0e"} Dec 11 08:36:46 crc kubenswrapper[4881]: I1211 08:36:46.955540 4881 generic.go:334] "Generic (PLEG): container finished" podID="2a84e1c9-f761-496c-aae3-6ec403f26898" containerID="134f45a5d8d70f6de6ef94067aa29106fdd31590a99d05a5028dabde451fac93" exitCode=0 Dec 11 08:36:46 crc kubenswrapper[4881]: I1211 08:36:46.955618 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-ql9dq" event={"ID":"2a84e1c9-f761-496c-aae3-6ec403f26898","Type":"ContainerDied","Data":"134f45a5d8d70f6de6ef94067aa29106fdd31590a99d05a5028dabde451fac93"} Dec 11 08:36:46 crc kubenswrapper[4881]: I1211 08:36:46.962703 4881 generic.go:334] "Generic (PLEG): container finished" podID="30f00215-2531-44c8-80d6-7f3be540e71b" containerID="85965898509bda4c29184c354981309c5fb29a2ec7fe7fc941f70a6101d2fb54" exitCode=0 Dec 11 08:36:46 crc kubenswrapper[4881]: I1211 08:36:46.962788 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-create-xznx2" event={"ID":"30f00215-2531-44c8-80d6-7f3be540e71b","Type":"ContainerDied","Data":"85965898509bda4c29184c354981309c5fb29a2ec7fe7fc941f70a6101d2fb54"} Dec 11 08:36:46 crc kubenswrapper[4881]: I1211 08:36:46.977468 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"1af483c2-ea3f-45cd-971d-797c06f5c6e2","Type":"ContainerStarted","Data":"5595422816c5975819fd06476fdd7254e62f7c28bd9901251cdcb729e6d7bd0b"} Dec 11 08:36:46 crc kubenswrapper[4881]: I1211 08:36:46.977515 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"1af483c2-ea3f-45cd-971d-797c06f5c6e2","Type":"ContainerStarted","Data":"9f7889974cc1dec7e21a2e93eb92d3e3e13d6fc1d83bd95fafa15409cb3df595"} Dec 11 08:36:46 crc kubenswrapper[4881]: I1211 08:36:46.988670 4881 generic.go:334] "Generic (PLEG): container finished" podID="5e6b6074-f624-4016-beb9-a926af51e986" containerID="df00438a9f6b8c3f78ff888c7863b36178ddaeee4c61b4bcecf16d5bf631cb15" exitCode=0 Dec 11 08:36:46 crc kubenswrapper[4881]: I1211 08:36:46.988809 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-vwbg2" event={"ID":"5e6b6074-f624-4016-beb9-a926af51e986","Type":"ContainerDied","Data":"df00438a9f6b8c3f78ff888c7863b36178ddaeee4c61b4bcecf16d5bf631cb15"} Dec 11 08:36:46 crc kubenswrapper[4881]: I1211 08:36:46.996058 4881 generic.go:334] "Generic (PLEG): container finished" podID="f3b36526-9590-49d0-9b9a-0b86736538bb" containerID="481a4016e07783be6ea1d0c3f9de05be655d98b6eccea5c06b271668fd81476f" exitCode=0 Dec 11 08:36:46 crc kubenswrapper[4881]: I1211 08:36:46.996118 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-8z5np" event={"ID":"f3b36526-9590-49d0-9b9a-0b86736538bb","Type":"ContainerDied","Data":"481a4016e07783be6ea1d0c3f9de05be655d98b6eccea5c06b271668fd81476f"} Dec 11 08:36:47 crc kubenswrapper[4881]: I1211 08:36:47.020629 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-storage-0" podStartSLOduration=39.795943668 podStartE2EDuration="49.02060877s" podCreationTimestamp="2025-12-11 08:35:58 +0000 UTC" firstStartedPulling="2025-12-11 08:36:32.334222699 +0000 UTC m=+1240.711591396" lastFinishedPulling="2025-12-11 08:36:41.558887801 +0000 UTC m=+1249.936256498" observedRunningTime="2025-12-11 08:36:47.017957691 +0000 UTC m=+1255.395326408" watchObservedRunningTime="2025-12-11 08:36:47.02060877 +0000 UTC m=+1255.397977487" Dec 11 08:36:47 crc kubenswrapper[4881]: I1211 08:36:47.428077 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-77585f5f8c-s775q"] Dec 11 08:36:47 crc kubenswrapper[4881]: I1211 08:36:47.430622 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-77585f5f8c-s775q" Dec 11 08:36:47 crc kubenswrapper[4881]: I1211 08:36:47.432829 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns-swift-storage-0" Dec 11 08:36:47 crc kubenswrapper[4881]: I1211 08:36:47.462544 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-77585f5f8c-s775q"] Dec 11 08:36:47 crc kubenswrapper[4881]: I1211 08:36:47.474826 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-db-sync-lx525"] Dec 11 08:36:47 crc kubenswrapper[4881]: I1211 08:36:47.476945 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-lx525" Dec 11 08:36:47 crc kubenswrapper[4881]: I1211 08:36:47.482784 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-w497b" Dec 11 08:36:47 crc kubenswrapper[4881]: I1211 08:36:47.482969 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-config-data" Dec 11 08:36:47 crc kubenswrapper[4881]: I1211 08:36:47.483934 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2793ce53-41a4-4170-998f-b4ddcd0dbcaa-config\") pod \"dnsmasq-dns-77585f5f8c-s775q\" (UID: \"2793ce53-41a4-4170-998f-b4ddcd0dbcaa\") " pod="openstack/dnsmasq-dns-77585f5f8c-s775q" Dec 11 08:36:47 crc kubenswrapper[4881]: I1211 08:36:47.494041 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/2793ce53-41a4-4170-998f-b4ddcd0dbcaa-dns-swift-storage-0\") pod \"dnsmasq-dns-77585f5f8c-s775q\" (UID: \"2793ce53-41a4-4170-998f-b4ddcd0dbcaa\") " pod="openstack/dnsmasq-dns-77585f5f8c-s775q" Dec 11 08:36:47 crc kubenswrapper[4881]: I1211 08:36:47.494145 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6269s\" (UniqueName: \"kubernetes.io/projected/2793ce53-41a4-4170-998f-b4ddcd0dbcaa-kube-api-access-6269s\") pod \"dnsmasq-dns-77585f5f8c-s775q\" (UID: \"2793ce53-41a4-4170-998f-b4ddcd0dbcaa\") " pod="openstack/dnsmasq-dns-77585f5f8c-s775q" Dec 11 08:36:47 crc kubenswrapper[4881]: I1211 08:36:47.494199 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2793ce53-41a4-4170-998f-b4ddcd0dbcaa-ovsdbserver-nb\") pod \"dnsmasq-dns-77585f5f8c-s775q\" (UID: \"2793ce53-41a4-4170-998f-b4ddcd0dbcaa\") " pod="openstack/dnsmasq-dns-77585f5f8c-s775q" Dec 11 08:36:47 crc kubenswrapper[4881]: I1211 08:36:47.494254 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2793ce53-41a4-4170-998f-b4ddcd0dbcaa-ovsdbserver-sb\") pod \"dnsmasq-dns-77585f5f8c-s775q\" (UID: \"2793ce53-41a4-4170-998f-b4ddcd0dbcaa\") " pod="openstack/dnsmasq-dns-77585f5f8c-s775q" Dec 11 08:36:47 crc kubenswrapper[4881]: I1211 08:36:47.494534 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2793ce53-41a4-4170-998f-b4ddcd0dbcaa-dns-svc\") pod \"dnsmasq-dns-77585f5f8c-s775q\" (UID: \"2793ce53-41a4-4170-998f-b4ddcd0dbcaa\") " pod="openstack/dnsmasq-dns-77585f5f8c-s775q" Dec 11 08:36:47 crc kubenswrapper[4881]: I1211 08:36:47.503530 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-sync-lx525"] Dec 11 08:36:47 crc kubenswrapper[4881]: I1211 08:36:47.532554 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-sync-s75l2"] Dec 11 08:36:47 crc kubenswrapper[4881]: I1211 08:36:47.596891 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e87c3b7f-ebb4-4f97-bf3b-df5869f3c1ec-combined-ca-bundle\") pod \"glance-db-sync-lx525\" (UID: \"e87c3b7f-ebb4-4f97-bf3b-df5869f3c1ec\") " pod="openstack/glance-db-sync-lx525" Dec 11 08:36:47 crc kubenswrapper[4881]: I1211 08:36:47.597029 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2793ce53-41a4-4170-998f-b4ddcd0dbcaa-dns-svc\") pod \"dnsmasq-dns-77585f5f8c-s775q\" (UID: \"2793ce53-41a4-4170-998f-b4ddcd0dbcaa\") " pod="openstack/dnsmasq-dns-77585f5f8c-s775q" Dec 11 08:36:47 crc kubenswrapper[4881]: I1211 08:36:47.597124 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k4k68\" (UniqueName: \"kubernetes.io/projected/e87c3b7f-ebb4-4f97-bf3b-df5869f3c1ec-kube-api-access-k4k68\") pod \"glance-db-sync-lx525\" (UID: \"e87c3b7f-ebb4-4f97-bf3b-df5869f3c1ec\") " pod="openstack/glance-db-sync-lx525" Dec 11 08:36:47 crc kubenswrapper[4881]: I1211 08:36:47.597176 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2793ce53-41a4-4170-998f-b4ddcd0dbcaa-config\") pod \"dnsmasq-dns-77585f5f8c-s775q\" (UID: \"2793ce53-41a4-4170-998f-b4ddcd0dbcaa\") " pod="openstack/dnsmasq-dns-77585f5f8c-s775q" Dec 11 08:36:47 crc kubenswrapper[4881]: I1211 08:36:47.597247 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e87c3b7f-ebb4-4f97-bf3b-df5869f3c1ec-config-data\") pod \"glance-db-sync-lx525\" (UID: \"e87c3b7f-ebb4-4f97-bf3b-df5869f3c1ec\") " pod="openstack/glance-db-sync-lx525" Dec 11 08:36:47 crc kubenswrapper[4881]: I1211 08:36:47.597281 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/2793ce53-41a4-4170-998f-b4ddcd0dbcaa-dns-swift-storage-0\") pod \"dnsmasq-dns-77585f5f8c-s775q\" (UID: \"2793ce53-41a4-4170-998f-b4ddcd0dbcaa\") " pod="openstack/dnsmasq-dns-77585f5f8c-s775q" Dec 11 08:36:47 crc kubenswrapper[4881]: I1211 08:36:47.597312 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6269s\" (UniqueName: \"kubernetes.io/projected/2793ce53-41a4-4170-998f-b4ddcd0dbcaa-kube-api-access-6269s\") pod \"dnsmasq-dns-77585f5f8c-s775q\" (UID: \"2793ce53-41a4-4170-998f-b4ddcd0dbcaa\") " pod="openstack/dnsmasq-dns-77585f5f8c-s775q" Dec 11 08:36:47 crc kubenswrapper[4881]: I1211 08:36:47.597405 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/e87c3b7f-ebb4-4f97-bf3b-df5869f3c1ec-db-sync-config-data\") pod \"glance-db-sync-lx525\" (UID: \"e87c3b7f-ebb4-4f97-bf3b-df5869f3c1ec\") " pod="openstack/glance-db-sync-lx525" Dec 11 08:36:47 crc kubenswrapper[4881]: I1211 08:36:47.597444 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2793ce53-41a4-4170-998f-b4ddcd0dbcaa-ovsdbserver-nb\") pod \"dnsmasq-dns-77585f5f8c-s775q\" (UID: \"2793ce53-41a4-4170-998f-b4ddcd0dbcaa\") " pod="openstack/dnsmasq-dns-77585f5f8c-s775q" Dec 11 08:36:47 crc kubenswrapper[4881]: I1211 08:36:47.597478 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2793ce53-41a4-4170-998f-b4ddcd0dbcaa-ovsdbserver-sb\") pod \"dnsmasq-dns-77585f5f8c-s775q\" (UID: \"2793ce53-41a4-4170-998f-b4ddcd0dbcaa\") " pod="openstack/dnsmasq-dns-77585f5f8c-s775q" Dec 11 08:36:47 crc kubenswrapper[4881]: I1211 08:36:47.598151 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2793ce53-41a4-4170-998f-b4ddcd0dbcaa-config\") pod \"dnsmasq-dns-77585f5f8c-s775q\" (UID: \"2793ce53-41a4-4170-998f-b4ddcd0dbcaa\") " pod="openstack/dnsmasq-dns-77585f5f8c-s775q" Dec 11 08:36:47 crc kubenswrapper[4881]: I1211 08:36:47.598887 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/2793ce53-41a4-4170-998f-b4ddcd0dbcaa-dns-swift-storage-0\") pod \"dnsmasq-dns-77585f5f8c-s775q\" (UID: \"2793ce53-41a4-4170-998f-b4ddcd0dbcaa\") " pod="openstack/dnsmasq-dns-77585f5f8c-s775q" Dec 11 08:36:47 crc kubenswrapper[4881]: I1211 08:36:47.598904 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2793ce53-41a4-4170-998f-b4ddcd0dbcaa-ovsdbserver-nb\") pod \"dnsmasq-dns-77585f5f8c-s775q\" (UID: \"2793ce53-41a4-4170-998f-b4ddcd0dbcaa\") " pod="openstack/dnsmasq-dns-77585f5f8c-s775q" Dec 11 08:36:47 crc kubenswrapper[4881]: I1211 08:36:47.599488 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2793ce53-41a4-4170-998f-b4ddcd0dbcaa-ovsdbserver-sb\") pod \"dnsmasq-dns-77585f5f8c-s775q\" (UID: \"2793ce53-41a4-4170-998f-b4ddcd0dbcaa\") " pod="openstack/dnsmasq-dns-77585f5f8c-s775q" Dec 11 08:36:47 crc kubenswrapper[4881]: I1211 08:36:47.600035 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2793ce53-41a4-4170-998f-b4ddcd0dbcaa-dns-svc\") pod \"dnsmasq-dns-77585f5f8c-s775q\" (UID: \"2793ce53-41a4-4170-998f-b4ddcd0dbcaa\") " pod="openstack/dnsmasq-dns-77585f5f8c-s775q" Dec 11 08:36:47 crc kubenswrapper[4881]: I1211 08:36:47.619683 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6269s\" (UniqueName: \"kubernetes.io/projected/2793ce53-41a4-4170-998f-b4ddcd0dbcaa-kube-api-access-6269s\") pod \"dnsmasq-dns-77585f5f8c-s775q\" (UID: \"2793ce53-41a4-4170-998f-b4ddcd0dbcaa\") " pod="openstack/dnsmasq-dns-77585f5f8c-s775q" Dec 11 08:36:47 crc kubenswrapper[4881]: I1211 08:36:47.698999 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/e87c3b7f-ebb4-4f97-bf3b-df5869f3c1ec-db-sync-config-data\") pod \"glance-db-sync-lx525\" (UID: \"e87c3b7f-ebb4-4f97-bf3b-df5869f3c1ec\") " pod="openstack/glance-db-sync-lx525" Dec 11 08:36:47 crc kubenswrapper[4881]: I1211 08:36:47.699100 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e87c3b7f-ebb4-4f97-bf3b-df5869f3c1ec-combined-ca-bundle\") pod \"glance-db-sync-lx525\" (UID: \"e87c3b7f-ebb4-4f97-bf3b-df5869f3c1ec\") " pod="openstack/glance-db-sync-lx525" Dec 11 08:36:47 crc kubenswrapper[4881]: I1211 08:36:47.699213 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k4k68\" (UniqueName: \"kubernetes.io/projected/e87c3b7f-ebb4-4f97-bf3b-df5869f3c1ec-kube-api-access-k4k68\") pod \"glance-db-sync-lx525\" (UID: \"e87c3b7f-ebb4-4f97-bf3b-df5869f3c1ec\") " pod="openstack/glance-db-sync-lx525" Dec 11 08:36:47 crc kubenswrapper[4881]: I1211 08:36:47.699298 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e87c3b7f-ebb4-4f97-bf3b-df5869f3c1ec-config-data\") pod \"glance-db-sync-lx525\" (UID: \"e87c3b7f-ebb4-4f97-bf3b-df5869f3c1ec\") " pod="openstack/glance-db-sync-lx525" Dec 11 08:36:47 crc kubenswrapper[4881]: I1211 08:36:47.702952 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e87c3b7f-ebb4-4f97-bf3b-df5869f3c1ec-combined-ca-bundle\") pod \"glance-db-sync-lx525\" (UID: \"e87c3b7f-ebb4-4f97-bf3b-df5869f3c1ec\") " pod="openstack/glance-db-sync-lx525" Dec 11 08:36:47 crc kubenswrapper[4881]: I1211 08:36:47.704139 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e87c3b7f-ebb4-4f97-bf3b-df5869f3c1ec-config-data\") pod \"glance-db-sync-lx525\" (UID: \"e87c3b7f-ebb4-4f97-bf3b-df5869f3c1ec\") " pod="openstack/glance-db-sync-lx525" Dec 11 08:36:47 crc kubenswrapper[4881]: I1211 08:36:47.707615 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/e87c3b7f-ebb4-4f97-bf3b-df5869f3c1ec-db-sync-config-data\") pod \"glance-db-sync-lx525\" (UID: \"e87c3b7f-ebb4-4f97-bf3b-df5869f3c1ec\") " pod="openstack/glance-db-sync-lx525" Dec 11 08:36:47 crc kubenswrapper[4881]: I1211 08:36:47.716295 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k4k68\" (UniqueName: \"kubernetes.io/projected/e87c3b7f-ebb4-4f97-bf3b-df5869f3c1ec-kube-api-access-k4k68\") pod \"glance-db-sync-lx525\" (UID: \"e87c3b7f-ebb4-4f97-bf3b-df5869f3c1ec\") " pod="openstack/glance-db-sync-lx525" Dec 11 08:36:47 crc kubenswrapper[4881]: I1211 08:36:47.764347 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-77585f5f8c-s775q" Dec 11 08:36:47 crc kubenswrapper[4881]: I1211 08:36:47.806071 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-lx525" Dec 11 08:36:48 crc kubenswrapper[4881]: I1211 08:36:48.013526 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"ac1565ce-1297-4689-b199-d88c339feb68","Type":"ContainerStarted","Data":"347886b9b2fac12275b690e3f9d07193fec1b7f1ffc652cd2cba5003458eee74"} Dec 11 08:36:48 crc kubenswrapper[4881]: I1211 08:36:48.013877 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"ac1565ce-1297-4689-b199-d88c339feb68","Type":"ContainerStarted","Data":"ae879dcd6c62e0509daf39f6b783f092059f43300040f3dbe2d187e0c87b9ed4"} Dec 11 08:36:48 crc kubenswrapper[4881]: I1211 08:36:48.014083 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-northd-0" Dec 11 08:36:48 crc kubenswrapper[4881]: I1211 08:36:48.017937 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-s75l2" event={"ID":"3f4a6ba9-97eb-425b-b2c1-54245c81a6df","Type":"ContainerStarted","Data":"e6df2d20d903b5d776bf0cbfcbf505ff43d8953c047cab3f2b19ba8faf458b1b"} Dec 11 08:36:48 crc kubenswrapper[4881]: I1211 08:36:48.036885 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-northd-0" podStartSLOduration=3.227216566 podStartE2EDuration="8.036863992s" podCreationTimestamp="2025-12-11 08:36:40 +0000 UTC" firstStartedPulling="2025-12-11 08:36:42.020446941 +0000 UTC m=+1250.397815638" lastFinishedPulling="2025-12-11 08:36:46.830094367 +0000 UTC m=+1255.207463064" observedRunningTime="2025-12-11 08:36:48.034756887 +0000 UTC m=+1256.412125584" watchObservedRunningTime="2025-12-11 08:36:48.036863992 +0000 UTC m=+1256.414232689" Dec 11 08:36:49 crc kubenswrapper[4881]: I1211 08:36:48.729262 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mysqld-exporter-openstack-cell1-db-create-8mgxc"] Dec 11 08:36:49 crc kubenswrapper[4881]: I1211 08:36:48.731237 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-openstack-cell1-db-create-8mgxc" Dec 11 08:36:49 crc kubenswrapper[4881]: I1211 08:36:48.740415 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mysqld-exporter-openstack-cell1-db-create-8mgxc"] Dec 11 08:36:49 crc kubenswrapper[4881]: I1211 08:36:48.865887 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7lxjp\" (UniqueName: \"kubernetes.io/projected/cae2b44c-241b-44ec-b9c5-1cbdddf26008-kube-api-access-7lxjp\") pod \"mysqld-exporter-openstack-cell1-db-create-8mgxc\" (UID: \"cae2b44c-241b-44ec-b9c5-1cbdddf26008\") " pod="openstack/mysqld-exporter-openstack-cell1-db-create-8mgxc" Dec 11 08:36:49 crc kubenswrapper[4881]: I1211 08:36:49.349111 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7lxjp\" (UniqueName: \"kubernetes.io/projected/cae2b44c-241b-44ec-b9c5-1cbdddf26008-kube-api-access-7lxjp\") pod \"mysqld-exporter-openstack-cell1-db-create-8mgxc\" (UID: \"cae2b44c-241b-44ec-b9c5-1cbdddf26008\") " pod="openstack/mysqld-exporter-openstack-cell1-db-create-8mgxc" Dec 11 08:36:49 crc kubenswrapper[4881]: I1211 08:36:49.470866 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-ovs-5gqh8" Dec 11 08:36:49 crc kubenswrapper[4881]: I1211 08:36:49.471197 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-ovs-5gqh8" Dec 11 08:36:49 crc kubenswrapper[4881]: I1211 08:36:49.473990 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7lxjp\" (UniqueName: \"kubernetes.io/projected/cae2b44c-241b-44ec-b9c5-1cbdddf26008-kube-api-access-7lxjp\") pod \"mysqld-exporter-openstack-cell1-db-create-8mgxc\" (UID: \"cae2b44c-241b-44ec-b9c5-1cbdddf26008\") " pod="openstack/mysqld-exporter-openstack-cell1-db-create-8mgxc" Dec 11 08:36:49 crc kubenswrapper[4881]: I1211 08:36:49.654118 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-openstack-cell1-db-create-8mgxc" Dec 11 08:36:49 crc kubenswrapper[4881]: I1211 08:36:49.761238 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-xfltd-config-vggpl"] Dec 11 08:36:49 crc kubenswrapper[4881]: I1211 08:36:49.763222 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-xfltd-config-vggpl" Dec 11 08:36:49 crc kubenswrapper[4881]: I1211 08:36:49.771686 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-extra-scripts" Dec 11 08:36:49 crc kubenswrapper[4881]: I1211 08:36:49.778646 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-xfltd-config-vggpl"] Dec 11 08:36:49 crc kubenswrapper[4881]: I1211 08:36:49.881083 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/b3ccdd2c-fa3e-47c1-ab72-41e4f0b2f4fe-additional-scripts\") pod \"ovn-controller-xfltd-config-vggpl\" (UID: \"b3ccdd2c-fa3e-47c1-ab72-41e4f0b2f4fe\") " pod="openstack/ovn-controller-xfltd-config-vggpl" Dec 11 08:36:49 crc kubenswrapper[4881]: I1211 08:36:49.881150 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/b3ccdd2c-fa3e-47c1-ab72-41e4f0b2f4fe-var-log-ovn\") pod \"ovn-controller-xfltd-config-vggpl\" (UID: \"b3ccdd2c-fa3e-47c1-ab72-41e4f0b2f4fe\") " pod="openstack/ovn-controller-xfltd-config-vggpl" Dec 11 08:36:49 crc kubenswrapper[4881]: I1211 08:36:49.881186 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v5v8k\" (UniqueName: \"kubernetes.io/projected/b3ccdd2c-fa3e-47c1-ab72-41e4f0b2f4fe-kube-api-access-v5v8k\") pod \"ovn-controller-xfltd-config-vggpl\" (UID: \"b3ccdd2c-fa3e-47c1-ab72-41e4f0b2f4fe\") " pod="openstack/ovn-controller-xfltd-config-vggpl" Dec 11 08:36:49 crc kubenswrapper[4881]: I1211 08:36:49.881254 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/b3ccdd2c-fa3e-47c1-ab72-41e4f0b2f4fe-scripts\") pod \"ovn-controller-xfltd-config-vggpl\" (UID: \"b3ccdd2c-fa3e-47c1-ab72-41e4f0b2f4fe\") " pod="openstack/ovn-controller-xfltd-config-vggpl" Dec 11 08:36:49 crc kubenswrapper[4881]: I1211 08:36:49.881287 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/b3ccdd2c-fa3e-47c1-ab72-41e4f0b2f4fe-var-run\") pod \"ovn-controller-xfltd-config-vggpl\" (UID: \"b3ccdd2c-fa3e-47c1-ab72-41e4f0b2f4fe\") " pod="openstack/ovn-controller-xfltd-config-vggpl" Dec 11 08:36:49 crc kubenswrapper[4881]: I1211 08:36:49.881308 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/b3ccdd2c-fa3e-47c1-ab72-41e4f0b2f4fe-var-run-ovn\") pod \"ovn-controller-xfltd-config-vggpl\" (UID: \"b3ccdd2c-fa3e-47c1-ab72-41e4f0b2f4fe\") " pod="openstack/ovn-controller-xfltd-config-vggpl" Dec 11 08:36:49 crc kubenswrapper[4881]: I1211 08:36:49.958922 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-77585f5f8c-s775q"] Dec 11 08:36:49 crc kubenswrapper[4881]: W1211 08:36:49.979518 4881 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2793ce53_41a4_4170_998f_b4ddcd0dbcaa.slice/crio-4652d228ca21149e0e0d79e99f48007085421a032e692e98623ee97e8b0e0978 WatchSource:0}: Error finding container 4652d228ca21149e0e0d79e99f48007085421a032e692e98623ee97e8b0e0978: Status 404 returned error can't find the container with id 4652d228ca21149e0e0d79e99f48007085421a032e692e98623ee97e8b0e0978 Dec 11 08:36:49 crc kubenswrapper[4881]: I1211 08:36:49.983239 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/b3ccdd2c-fa3e-47c1-ab72-41e4f0b2f4fe-scripts\") pod \"ovn-controller-xfltd-config-vggpl\" (UID: \"b3ccdd2c-fa3e-47c1-ab72-41e4f0b2f4fe\") " pod="openstack/ovn-controller-xfltd-config-vggpl" Dec 11 08:36:49 crc kubenswrapper[4881]: I1211 08:36:49.983288 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/b3ccdd2c-fa3e-47c1-ab72-41e4f0b2f4fe-var-run\") pod \"ovn-controller-xfltd-config-vggpl\" (UID: \"b3ccdd2c-fa3e-47c1-ab72-41e4f0b2f4fe\") " pod="openstack/ovn-controller-xfltd-config-vggpl" Dec 11 08:36:49 crc kubenswrapper[4881]: I1211 08:36:49.983312 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/b3ccdd2c-fa3e-47c1-ab72-41e4f0b2f4fe-var-run-ovn\") pod \"ovn-controller-xfltd-config-vggpl\" (UID: \"b3ccdd2c-fa3e-47c1-ab72-41e4f0b2f4fe\") " pod="openstack/ovn-controller-xfltd-config-vggpl" Dec 11 08:36:49 crc kubenswrapper[4881]: I1211 08:36:49.983407 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/b3ccdd2c-fa3e-47c1-ab72-41e4f0b2f4fe-additional-scripts\") pod \"ovn-controller-xfltd-config-vggpl\" (UID: \"b3ccdd2c-fa3e-47c1-ab72-41e4f0b2f4fe\") " pod="openstack/ovn-controller-xfltd-config-vggpl" Dec 11 08:36:49 crc kubenswrapper[4881]: I1211 08:36:49.983451 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/b3ccdd2c-fa3e-47c1-ab72-41e4f0b2f4fe-var-log-ovn\") pod \"ovn-controller-xfltd-config-vggpl\" (UID: \"b3ccdd2c-fa3e-47c1-ab72-41e4f0b2f4fe\") " pod="openstack/ovn-controller-xfltd-config-vggpl" Dec 11 08:36:49 crc kubenswrapper[4881]: I1211 08:36:49.983481 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v5v8k\" (UniqueName: \"kubernetes.io/projected/b3ccdd2c-fa3e-47c1-ab72-41e4f0b2f4fe-kube-api-access-v5v8k\") pod \"ovn-controller-xfltd-config-vggpl\" (UID: \"b3ccdd2c-fa3e-47c1-ab72-41e4f0b2f4fe\") " pod="openstack/ovn-controller-xfltd-config-vggpl" Dec 11 08:36:49 crc kubenswrapper[4881]: I1211 08:36:49.984859 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/b3ccdd2c-fa3e-47c1-ab72-41e4f0b2f4fe-var-run-ovn\") pod \"ovn-controller-xfltd-config-vggpl\" (UID: \"b3ccdd2c-fa3e-47c1-ab72-41e4f0b2f4fe\") " pod="openstack/ovn-controller-xfltd-config-vggpl" Dec 11 08:36:49 crc kubenswrapper[4881]: I1211 08:36:49.984953 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/b3ccdd2c-fa3e-47c1-ab72-41e4f0b2f4fe-var-run\") pod \"ovn-controller-xfltd-config-vggpl\" (UID: \"b3ccdd2c-fa3e-47c1-ab72-41e4f0b2f4fe\") " pod="openstack/ovn-controller-xfltd-config-vggpl" Dec 11 08:36:49 crc kubenswrapper[4881]: I1211 08:36:49.984990 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/b3ccdd2c-fa3e-47c1-ab72-41e4f0b2f4fe-var-log-ovn\") pod \"ovn-controller-xfltd-config-vggpl\" (UID: \"b3ccdd2c-fa3e-47c1-ab72-41e4f0b2f4fe\") " pod="openstack/ovn-controller-xfltd-config-vggpl" Dec 11 08:36:49 crc kubenswrapper[4881]: I1211 08:36:49.985747 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/b3ccdd2c-fa3e-47c1-ab72-41e4f0b2f4fe-additional-scripts\") pod \"ovn-controller-xfltd-config-vggpl\" (UID: \"b3ccdd2c-fa3e-47c1-ab72-41e4f0b2f4fe\") " pod="openstack/ovn-controller-xfltd-config-vggpl" Dec 11 08:36:49 crc kubenswrapper[4881]: I1211 08:36:49.986050 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/b3ccdd2c-fa3e-47c1-ab72-41e4f0b2f4fe-scripts\") pod \"ovn-controller-xfltd-config-vggpl\" (UID: \"b3ccdd2c-fa3e-47c1-ab72-41e4f0b2f4fe\") " pod="openstack/ovn-controller-xfltd-config-vggpl" Dec 11 08:36:50 crc kubenswrapper[4881]: I1211 08:36:50.011738 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v5v8k\" (UniqueName: \"kubernetes.io/projected/b3ccdd2c-fa3e-47c1-ab72-41e4f0b2f4fe-kube-api-access-v5v8k\") pod \"ovn-controller-xfltd-config-vggpl\" (UID: \"b3ccdd2c-fa3e-47c1-ab72-41e4f0b2f4fe\") " pod="openstack/ovn-controller-xfltd-config-vggpl" Dec 11 08:36:50 crc kubenswrapper[4881]: I1211 08:36:50.015050 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-vwbg2" Dec 11 08:36:50 crc kubenswrapper[4881]: I1211 08:36:50.018449 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-create-xznx2" Dec 11 08:36:50 crc kubenswrapper[4881]: I1211 08:36:50.040479 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-8z5np" Dec 11 08:36:50 crc kubenswrapper[4881]: I1211 08:36:50.070617 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-ql9dq" Dec 11 08:36:50 crc kubenswrapper[4881]: I1211 08:36:50.070751 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-vwbg2" Dec 11 08:36:50 crc kubenswrapper[4881]: I1211 08:36:50.070742 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-vwbg2" event={"ID":"5e6b6074-f624-4016-beb9-a926af51e986","Type":"ContainerDied","Data":"75e83de236deeb0a3cf9a54fc43936d695fe9dda85fd4fdfbb8de4c33a9d05da"} Dec 11 08:36:50 crc kubenswrapper[4881]: I1211 08:36:50.070790 4881 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="75e83de236deeb0a3cf9a54fc43936d695fe9dda85fd4fdfbb8de4c33a9d05da" Dec 11 08:36:50 crc kubenswrapper[4881]: I1211 08:36:50.073692 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-8z5np" Dec 11 08:36:50 crc kubenswrapper[4881]: I1211 08:36:50.073674 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-8z5np" event={"ID":"f3b36526-9590-49d0-9b9a-0b86736538bb","Type":"ContainerDied","Data":"52b26e45e66a5641270eb871b48b976adee1fc1524b33e724e6495635864a740"} Dec 11 08:36:50 crc kubenswrapper[4881]: I1211 08:36:50.074217 4881 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="52b26e45e66a5641270eb871b48b976adee1fc1524b33e724e6495635864a740" Dec 11 08:36:50 crc kubenswrapper[4881]: I1211 08:36:50.076203 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-77585f5f8c-s775q" event={"ID":"2793ce53-41a4-4170-998f-b4ddcd0dbcaa","Type":"ContainerStarted","Data":"4652d228ca21149e0e0d79e99f48007085421a032e692e98623ee97e8b0e0978"} Dec 11 08:36:50 crc kubenswrapper[4881]: I1211 08:36:50.077985 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-ql9dq" Dec 11 08:36:50 crc kubenswrapper[4881]: I1211 08:36:50.078034 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-ql9dq" event={"ID":"2a84e1c9-f761-496c-aae3-6ec403f26898","Type":"ContainerDied","Data":"221b17d11f120b547eee0593ec3b0d474a32b23971b70c144cd86992cdb49c1f"} Dec 11 08:36:50 crc kubenswrapper[4881]: I1211 08:36:50.078069 4881 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="221b17d11f120b547eee0593ec3b0d474a32b23971b70c144cd86992cdb49c1f" Dec 11 08:36:50 crc kubenswrapper[4881]: I1211 08:36:50.082117 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-create-xznx2" Dec 11 08:36:50 crc kubenswrapper[4881]: I1211 08:36:50.082308 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-create-xznx2" event={"ID":"30f00215-2531-44c8-80d6-7f3be540e71b","Type":"ContainerDied","Data":"8702b9d30d9feafacd7099e9ee04312ccea06e63bcbf67610dd4b1cf28241755"} Dec 11 08:36:50 crc kubenswrapper[4881]: I1211 08:36:50.082363 4881 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8702b9d30d9feafacd7099e9ee04312ccea06e63bcbf67610dd4b1cf28241755" Dec 11 08:36:50 crc kubenswrapper[4881]: I1211 08:36:50.084984 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9zws8\" (UniqueName: \"kubernetes.io/projected/5e6b6074-f624-4016-beb9-a926af51e986-kube-api-access-9zws8\") pod \"5e6b6074-f624-4016-beb9-a926af51e986\" (UID: \"5e6b6074-f624-4016-beb9-a926af51e986\") " Dec 11 08:36:50 crc kubenswrapper[4881]: I1211 08:36:50.085255 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rfppv\" (UniqueName: \"kubernetes.io/projected/f3b36526-9590-49d0-9b9a-0b86736538bb-kube-api-access-rfppv\") pod \"f3b36526-9590-49d0-9b9a-0b86736538bb\" (UID: \"f3b36526-9590-49d0-9b9a-0b86736538bb\") " Dec 11 08:36:50 crc kubenswrapper[4881]: I1211 08:36:50.085437 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-478mv\" (UniqueName: \"kubernetes.io/projected/30f00215-2531-44c8-80d6-7f3be540e71b-kube-api-access-478mv\") pod \"30f00215-2531-44c8-80d6-7f3be540e71b\" (UID: \"30f00215-2531-44c8-80d6-7f3be540e71b\") " Dec 11 08:36:50 crc kubenswrapper[4881]: I1211 08:36:50.087220 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-sync-lx525"] Dec 11 08:36:50 crc kubenswrapper[4881]: I1211 08:36:50.090681 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/30f00215-2531-44c8-80d6-7f3be540e71b-kube-api-access-478mv" (OuterVolumeSpecName: "kube-api-access-478mv") pod "30f00215-2531-44c8-80d6-7f3be540e71b" (UID: "30f00215-2531-44c8-80d6-7f3be540e71b"). InnerVolumeSpecName "kube-api-access-478mv". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:36:50 crc kubenswrapper[4881]: I1211 08:36:50.090724 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5e6b6074-f624-4016-beb9-a926af51e986-kube-api-access-9zws8" (OuterVolumeSpecName: "kube-api-access-9zws8") pod "5e6b6074-f624-4016-beb9-a926af51e986" (UID: "5e6b6074-f624-4016-beb9-a926af51e986"). InnerVolumeSpecName "kube-api-access-9zws8". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:36:50 crc kubenswrapper[4881]: I1211 08:36:50.094785 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f3b36526-9590-49d0-9b9a-0b86736538bb-kube-api-access-rfppv" (OuterVolumeSpecName: "kube-api-access-rfppv") pod "f3b36526-9590-49d0-9b9a-0b86736538bb" (UID: "f3b36526-9590-49d0-9b9a-0b86736538bb"). InnerVolumeSpecName "kube-api-access-rfppv". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:36:50 crc kubenswrapper[4881]: I1211 08:36:50.187696 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-q2cvb\" (UniqueName: \"kubernetes.io/projected/2a84e1c9-f761-496c-aae3-6ec403f26898-kube-api-access-q2cvb\") pod \"2a84e1c9-f761-496c-aae3-6ec403f26898\" (UID: \"2a84e1c9-f761-496c-aae3-6ec403f26898\") " Dec 11 08:36:50 crc kubenswrapper[4881]: I1211 08:36:50.188822 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rfppv\" (UniqueName: \"kubernetes.io/projected/f3b36526-9590-49d0-9b9a-0b86736538bb-kube-api-access-rfppv\") on node \"crc\" DevicePath \"\"" Dec 11 08:36:50 crc kubenswrapper[4881]: I1211 08:36:50.188847 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-478mv\" (UniqueName: \"kubernetes.io/projected/30f00215-2531-44c8-80d6-7f3be540e71b-kube-api-access-478mv\") on node \"crc\" DevicePath \"\"" Dec 11 08:36:50 crc kubenswrapper[4881]: I1211 08:36:50.188860 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9zws8\" (UniqueName: \"kubernetes.io/projected/5e6b6074-f624-4016-beb9-a926af51e986-kube-api-access-9zws8\") on node \"crc\" DevicePath \"\"" Dec 11 08:36:50 crc kubenswrapper[4881]: I1211 08:36:50.197888 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2a84e1c9-f761-496c-aae3-6ec403f26898-kube-api-access-q2cvb" (OuterVolumeSpecName: "kube-api-access-q2cvb") pod "2a84e1c9-f761-496c-aae3-6ec403f26898" (UID: "2a84e1c9-f761-496c-aae3-6ec403f26898"). InnerVolumeSpecName "kube-api-access-q2cvb". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:36:50 crc kubenswrapper[4881]: I1211 08:36:50.290918 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-q2cvb\" (UniqueName: \"kubernetes.io/projected/2a84e1c9-f761-496c-aae3-6ec403f26898-kube-api-access-q2cvb\") on node \"crc\" DevicePath \"\"" Dec 11 08:36:50 crc kubenswrapper[4881]: I1211 08:36:50.296775 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-xfltd-config-vggpl" Dec 11 08:36:50 crc kubenswrapper[4881]: I1211 08:36:50.304880 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mysqld-exporter-openstack-cell1-db-create-8mgxc"] Dec 11 08:36:50 crc kubenswrapper[4881]: W1211 08:36:50.325412 4881 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podcae2b44c_241b_44ec_b9c5_1cbdddf26008.slice/crio-49f0fccc51467f3fb9e2a6af6e2a22b8b1faadbadbbddd8348d4acf2c5efe089 WatchSource:0}: Error finding container 49f0fccc51467f3fb9e2a6af6e2a22b8b1faadbadbbddd8348d4acf2c5efe089: Status 404 returned error can't find the container with id 49f0fccc51467f3fb9e2a6af6e2a22b8b1faadbadbbddd8348d4acf2c5efe089 Dec 11 08:36:50 crc kubenswrapper[4881]: I1211 08:36:50.783564 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-xfltd-config-vggpl"] Dec 11 08:36:51 crc kubenswrapper[4881]: I1211 08:36:51.100592 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-lx525" event={"ID":"e87c3b7f-ebb4-4f97-bf3b-df5869f3c1ec","Type":"ContainerStarted","Data":"037dea28846af65979d310752ff1396dc248b72cb1422c85838ffc7bac3b7267"} Dec 11 08:36:51 crc kubenswrapper[4881]: I1211 08:36:51.105753 4881 generic.go:334] "Generic (PLEG): container finished" podID="2793ce53-41a4-4170-998f-b4ddcd0dbcaa" containerID="4bc0e81d968f9e5cffc363ba7585560c6a7e636d710910c73c29abe205a2c605" exitCode=0 Dec 11 08:36:51 crc kubenswrapper[4881]: I1211 08:36:51.105863 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-77585f5f8c-s775q" event={"ID":"2793ce53-41a4-4170-998f-b4ddcd0dbcaa","Type":"ContainerDied","Data":"4bc0e81d968f9e5cffc363ba7585560c6a7e636d710910c73c29abe205a2c605"} Dec 11 08:36:51 crc kubenswrapper[4881]: I1211 08:36:51.111064 4881 generic.go:334] "Generic (PLEG): container finished" podID="cae2b44c-241b-44ec-b9c5-1cbdddf26008" containerID="a6b745bf3fd25c186456f376febb3fbfa8d631d8c3fd90b862f068dce33b7e57" exitCode=0 Dec 11 08:36:51 crc kubenswrapper[4881]: I1211 08:36:51.112001 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-openstack-cell1-db-create-8mgxc" event={"ID":"cae2b44c-241b-44ec-b9c5-1cbdddf26008","Type":"ContainerDied","Data":"a6b745bf3fd25c186456f376febb3fbfa8d631d8c3fd90b862f068dce33b7e57"} Dec 11 08:36:51 crc kubenswrapper[4881]: I1211 08:36:51.112075 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-openstack-cell1-db-create-8mgxc" event={"ID":"cae2b44c-241b-44ec-b9c5-1cbdddf26008","Type":"ContainerStarted","Data":"49f0fccc51467f3fb9e2a6af6e2a22b8b1faadbadbbddd8348d4acf2c5efe089"} Dec 11 08:36:51 crc kubenswrapper[4881]: W1211 08:36:51.495149 4881 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb3ccdd2c_fa3e_47c1_ab72_41e4f0b2f4fe.slice/crio-b6bf16f63e81a7fd987371bdf549a0860ef21834ff5b7454b02a21b9b360370d WatchSource:0}: Error finding container b6bf16f63e81a7fd987371bdf549a0860ef21834ff5b7454b02a21b9b360370d: Status 404 returned error can't find the container with id b6bf16f63e81a7fd987371bdf549a0860ef21834ff5b7454b02a21b9b360370d Dec 11 08:36:52 crc kubenswrapper[4881]: I1211 08:36:52.120639 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-xfltd-config-vggpl" event={"ID":"b3ccdd2c-fa3e-47c1-ab72-41e4f0b2f4fe","Type":"ContainerStarted","Data":"b6bf16f63e81a7fd987371bdf549a0860ef21834ff5b7454b02a21b9b360370d"} Dec 11 08:36:56 crc kubenswrapper[4881]: I1211 08:36:56.623203 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-openstack-cell1-db-create-8mgxc" Dec 11 08:36:56 crc kubenswrapper[4881]: I1211 08:36:56.734075 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7lxjp\" (UniqueName: \"kubernetes.io/projected/cae2b44c-241b-44ec-b9c5-1cbdddf26008-kube-api-access-7lxjp\") pod \"cae2b44c-241b-44ec-b9c5-1cbdddf26008\" (UID: \"cae2b44c-241b-44ec-b9c5-1cbdddf26008\") " Dec 11 08:36:56 crc kubenswrapper[4881]: I1211 08:36:56.744756 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cae2b44c-241b-44ec-b9c5-1cbdddf26008-kube-api-access-7lxjp" (OuterVolumeSpecName: "kube-api-access-7lxjp") pod "cae2b44c-241b-44ec-b9c5-1cbdddf26008" (UID: "cae2b44c-241b-44ec-b9c5-1cbdddf26008"). InnerVolumeSpecName "kube-api-access-7lxjp". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:36:56 crc kubenswrapper[4881]: I1211 08:36:56.837109 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7lxjp\" (UniqueName: \"kubernetes.io/projected/cae2b44c-241b-44ec-b9c5-1cbdddf26008-kube-api-access-7lxjp\") on node \"crc\" DevicePath \"\"" Dec 11 08:36:57 crc kubenswrapper[4881]: I1211 08:36:57.171532 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-s75l2" event={"ID":"3f4a6ba9-97eb-425b-b2c1-54245c81a6df","Type":"ContainerStarted","Data":"5dff56a1b1956c2b7a9b0e6d40e74ee9c4053f7af8fb5ec838800e0d5bbf7456"} Dec 11 08:36:57 crc kubenswrapper[4881]: I1211 08:36:57.173581 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-77585f5f8c-s775q" event={"ID":"2793ce53-41a4-4170-998f-b4ddcd0dbcaa","Type":"ContainerStarted","Data":"88e3d3f95b7e7011258611183d8dc80e67bbafc9ef51ce2a0cfde6734d7ee085"} Dec 11 08:36:57 crc kubenswrapper[4881]: I1211 08:36:57.173718 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-77585f5f8c-s775q" Dec 11 08:36:57 crc kubenswrapper[4881]: I1211 08:36:57.177423 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"1206112a-8438-4dcf-9cad-a3e38790a344","Type":"ContainerStarted","Data":"50a8d3c4e0c0f48ba62514941a8b99252d2a225d63e5471e4187a8166be52156"} Dec 11 08:36:57 crc kubenswrapper[4881]: I1211 08:36:57.179720 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-openstack-cell1-db-create-8mgxc" event={"ID":"cae2b44c-241b-44ec-b9c5-1cbdddf26008","Type":"ContainerDied","Data":"49f0fccc51467f3fb9e2a6af6e2a22b8b1faadbadbbddd8348d4acf2c5efe089"} Dec 11 08:36:57 crc kubenswrapper[4881]: I1211 08:36:57.179769 4881 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="49f0fccc51467f3fb9e2a6af6e2a22b8b1faadbadbbddd8348d4acf2c5efe089" Dec 11 08:36:57 crc kubenswrapper[4881]: I1211 08:36:57.179823 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-openstack-cell1-db-create-8mgxc" Dec 11 08:36:57 crc kubenswrapper[4881]: I1211 08:36:57.188736 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-db-sync-s75l2" podStartSLOduration=2.251576894 podStartE2EDuration="11.18867459s" podCreationTimestamp="2025-12-11 08:36:46 +0000 UTC" firstStartedPulling="2025-12-11 08:36:47.535954559 +0000 UTC m=+1255.913323256" lastFinishedPulling="2025-12-11 08:36:56.473052255 +0000 UTC m=+1264.850420952" observedRunningTime="2025-12-11 08:36:57.185275002 +0000 UTC m=+1265.562643719" watchObservedRunningTime="2025-12-11 08:36:57.18867459 +0000 UTC m=+1265.566043297" Dec 11 08:36:57 crc kubenswrapper[4881]: I1211 08:36:57.189858 4881 generic.go:334] "Generic (PLEG): container finished" podID="b3ccdd2c-fa3e-47c1-ab72-41e4f0b2f4fe" containerID="94c3019cf47f713b5a44bc81dd351be9c696fd6a3b9a6a5da9c12b249515fa87" exitCode=0 Dec 11 08:36:57 crc kubenswrapper[4881]: I1211 08:36:57.190050 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-xfltd-config-vggpl" event={"ID":"b3ccdd2c-fa3e-47c1-ab72-41e4f0b2f4fe","Type":"ContainerDied","Data":"94c3019cf47f713b5a44bc81dd351be9c696fd6a3b9a6a5da9c12b249515fa87"} Dec 11 08:36:57 crc kubenswrapper[4881]: I1211 08:36:57.210710 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/prometheus-metric-storage-0" podStartSLOduration=23.614431586 podStartE2EDuration="1m29.210692283s" podCreationTimestamp="2025-12-11 08:35:28 +0000 UTC" firstStartedPulling="2025-12-11 08:35:50.875613877 +0000 UTC m=+1199.252982574" lastFinishedPulling="2025-12-11 08:36:56.471874574 +0000 UTC m=+1264.849243271" observedRunningTime="2025-12-11 08:36:57.208857805 +0000 UTC m=+1265.586226502" watchObservedRunningTime="2025-12-11 08:36:57.210692283 +0000 UTC m=+1265.588060970" Dec 11 08:36:57 crc kubenswrapper[4881]: I1211 08:36:57.234977 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-77585f5f8c-s775q" podStartSLOduration=10.234954063 podStartE2EDuration="10.234954063s" podCreationTimestamp="2025-12-11 08:36:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 08:36:57.228790784 +0000 UTC m=+1265.606159471" watchObservedRunningTime="2025-12-11 08:36:57.234954063 +0000 UTC m=+1265.612322760" Dec 11 08:36:58 crc kubenswrapper[4881]: I1211 08:36:58.601530 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-xfltd-config-vggpl" Dec 11 08:36:58 crc kubenswrapper[4881]: I1211 08:36:58.684989 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/b3ccdd2c-fa3e-47c1-ab72-41e4f0b2f4fe-additional-scripts\") pod \"b3ccdd2c-fa3e-47c1-ab72-41e4f0b2f4fe\" (UID: \"b3ccdd2c-fa3e-47c1-ab72-41e4f0b2f4fe\") " Dec 11 08:36:58 crc kubenswrapper[4881]: I1211 08:36:58.685162 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v5v8k\" (UniqueName: \"kubernetes.io/projected/b3ccdd2c-fa3e-47c1-ab72-41e4f0b2f4fe-kube-api-access-v5v8k\") pod \"b3ccdd2c-fa3e-47c1-ab72-41e4f0b2f4fe\" (UID: \"b3ccdd2c-fa3e-47c1-ab72-41e4f0b2f4fe\") " Dec 11 08:36:58 crc kubenswrapper[4881]: I1211 08:36:58.685280 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/b3ccdd2c-fa3e-47c1-ab72-41e4f0b2f4fe-var-run-ovn\") pod \"b3ccdd2c-fa3e-47c1-ab72-41e4f0b2f4fe\" (UID: \"b3ccdd2c-fa3e-47c1-ab72-41e4f0b2f4fe\") " Dec 11 08:36:58 crc kubenswrapper[4881]: I1211 08:36:58.685329 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/b3ccdd2c-fa3e-47c1-ab72-41e4f0b2f4fe-scripts\") pod \"b3ccdd2c-fa3e-47c1-ab72-41e4f0b2f4fe\" (UID: \"b3ccdd2c-fa3e-47c1-ab72-41e4f0b2f4fe\") " Dec 11 08:36:58 crc kubenswrapper[4881]: I1211 08:36:58.685412 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/b3ccdd2c-fa3e-47c1-ab72-41e4f0b2f4fe-var-run\") pod \"b3ccdd2c-fa3e-47c1-ab72-41e4f0b2f4fe\" (UID: \"b3ccdd2c-fa3e-47c1-ab72-41e4f0b2f4fe\") " Dec 11 08:36:58 crc kubenswrapper[4881]: I1211 08:36:58.685454 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/b3ccdd2c-fa3e-47c1-ab72-41e4f0b2f4fe-var-log-ovn\") pod \"b3ccdd2c-fa3e-47c1-ab72-41e4f0b2f4fe\" (UID: \"b3ccdd2c-fa3e-47c1-ab72-41e4f0b2f4fe\") " Dec 11 08:36:58 crc kubenswrapper[4881]: I1211 08:36:58.685478 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/b3ccdd2c-fa3e-47c1-ab72-41e4f0b2f4fe-var-run-ovn" (OuterVolumeSpecName: "var-run-ovn") pod "b3ccdd2c-fa3e-47c1-ab72-41e4f0b2f4fe" (UID: "b3ccdd2c-fa3e-47c1-ab72-41e4f0b2f4fe"). InnerVolumeSpecName "var-run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 11 08:36:58 crc kubenswrapper[4881]: I1211 08:36:58.685897 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b3ccdd2c-fa3e-47c1-ab72-41e4f0b2f4fe-additional-scripts" (OuterVolumeSpecName: "additional-scripts") pod "b3ccdd2c-fa3e-47c1-ab72-41e4f0b2f4fe" (UID: "b3ccdd2c-fa3e-47c1-ab72-41e4f0b2f4fe"). InnerVolumeSpecName "additional-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:36:58 crc kubenswrapper[4881]: I1211 08:36:58.685916 4881 reconciler_common.go:293] "Volume detached for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/b3ccdd2c-fa3e-47c1-ab72-41e4f0b2f4fe-var-run-ovn\") on node \"crc\" DevicePath \"\"" Dec 11 08:36:58 crc kubenswrapper[4881]: I1211 08:36:58.685970 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/b3ccdd2c-fa3e-47c1-ab72-41e4f0b2f4fe-var-log-ovn" (OuterVolumeSpecName: "var-log-ovn") pod "b3ccdd2c-fa3e-47c1-ab72-41e4f0b2f4fe" (UID: "b3ccdd2c-fa3e-47c1-ab72-41e4f0b2f4fe"). InnerVolumeSpecName "var-log-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 11 08:36:58 crc kubenswrapper[4881]: I1211 08:36:58.686000 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/b3ccdd2c-fa3e-47c1-ab72-41e4f0b2f4fe-var-run" (OuterVolumeSpecName: "var-run") pod "b3ccdd2c-fa3e-47c1-ab72-41e4f0b2f4fe" (UID: "b3ccdd2c-fa3e-47c1-ab72-41e4f0b2f4fe"). InnerVolumeSpecName "var-run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 11 08:36:58 crc kubenswrapper[4881]: I1211 08:36:58.686660 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b3ccdd2c-fa3e-47c1-ab72-41e4f0b2f4fe-scripts" (OuterVolumeSpecName: "scripts") pod "b3ccdd2c-fa3e-47c1-ab72-41e4f0b2f4fe" (UID: "b3ccdd2c-fa3e-47c1-ab72-41e4f0b2f4fe"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:36:58 crc kubenswrapper[4881]: I1211 08:36:58.694656 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b3ccdd2c-fa3e-47c1-ab72-41e4f0b2f4fe-kube-api-access-v5v8k" (OuterVolumeSpecName: "kube-api-access-v5v8k") pod "b3ccdd2c-fa3e-47c1-ab72-41e4f0b2f4fe" (UID: "b3ccdd2c-fa3e-47c1-ab72-41e4f0b2f4fe"). InnerVolumeSpecName "kube-api-access-v5v8k". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:36:58 crc kubenswrapper[4881]: I1211 08:36:58.788468 4881 reconciler_common.go:293] "Volume detached for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/b3ccdd2c-fa3e-47c1-ab72-41e4f0b2f4fe-additional-scripts\") on node \"crc\" DevicePath \"\"" Dec 11 08:36:58 crc kubenswrapper[4881]: I1211 08:36:58.788505 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v5v8k\" (UniqueName: \"kubernetes.io/projected/b3ccdd2c-fa3e-47c1-ab72-41e4f0b2f4fe-kube-api-access-v5v8k\") on node \"crc\" DevicePath \"\"" Dec 11 08:36:58 crc kubenswrapper[4881]: I1211 08:36:58.788520 4881 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/b3ccdd2c-fa3e-47c1-ab72-41e4f0b2f4fe-scripts\") on node \"crc\" DevicePath \"\"" Dec 11 08:36:58 crc kubenswrapper[4881]: I1211 08:36:58.788529 4881 reconciler_common.go:293] "Volume detached for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/b3ccdd2c-fa3e-47c1-ab72-41e4f0b2f4fe-var-run\") on node \"crc\" DevicePath \"\"" Dec 11 08:36:58 crc kubenswrapper[4881]: I1211 08:36:58.788538 4881 reconciler_common.go:293] "Volume detached for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/b3ccdd2c-fa3e-47c1-ab72-41e4f0b2f4fe-var-log-ovn\") on node \"crc\" DevicePath \"\"" Dec 11 08:36:59 crc kubenswrapper[4881]: I1211 08:36:59.211583 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-xfltd-config-vggpl" event={"ID":"b3ccdd2c-fa3e-47c1-ab72-41e4f0b2f4fe","Type":"ContainerDied","Data":"b6bf16f63e81a7fd987371bdf549a0860ef21834ff5b7454b02a21b9b360370d"} Dec 11 08:36:59 crc kubenswrapper[4881]: I1211 08:36:59.211623 4881 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b6bf16f63e81a7fd987371bdf549a0860ef21834ff5b7454b02a21b9b360370d" Dec 11 08:36:59 crc kubenswrapper[4881]: I1211 08:36:59.211646 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-xfltd-config-vggpl" Dec 11 08:36:59 crc kubenswrapper[4881]: I1211 08:36:59.366257 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-xfltd" Dec 11 08:36:59 crc kubenswrapper[4881]: I1211 08:36:59.397679 4881 patch_prober.go:28] interesting pod/machine-config-daemon-z9nnh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 11 08:36:59 crc kubenswrapper[4881]: I1211 08:36:59.397759 4881 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 11 08:36:59 crc kubenswrapper[4881]: I1211 08:36:59.397819 4881 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" Dec 11 08:36:59 crc kubenswrapper[4881]: I1211 08:36:59.398820 4881 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"bf6d1efeca37e2539778b6f34be1560f88c12ad867f27057f394c09165f250e9"} pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Dec 11 08:36:59 crc kubenswrapper[4881]: I1211 08:36:59.398894 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" containerName="machine-config-daemon" containerID="cri-o://bf6d1efeca37e2539778b6f34be1560f88c12ad867f27057f394c09165f250e9" gracePeriod=600 Dec 11 08:36:59 crc kubenswrapper[4881]: I1211 08:36:59.703815 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-xfltd-config-vggpl"] Dec 11 08:36:59 crc kubenswrapper[4881]: I1211 08:36:59.715775 4881 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-xfltd-config-vggpl"] Dec 11 08:37:00 crc kubenswrapper[4881]: I1211 08:37:00.085954 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/prometheus-metric-storage-0" Dec 11 08:37:00 crc kubenswrapper[4881]: I1211 08:37:00.086217 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/prometheus-metric-storage-0" Dec 11 08:37:00 crc kubenswrapper[4881]: I1211 08:37:00.091192 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/prometheus-metric-storage-0" Dec 11 08:37:00 crc kubenswrapper[4881]: I1211 08:37:00.222958 4881 generic.go:334] "Generic (PLEG): container finished" podID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" containerID="bf6d1efeca37e2539778b6f34be1560f88c12ad867f27057f394c09165f250e9" exitCode=0 Dec 11 08:37:00 crc kubenswrapper[4881]: I1211 08:37:00.223045 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" event={"ID":"56d69133-af36-4cbd-af7d-3a58cc4dd8ca","Type":"ContainerDied","Data":"bf6d1efeca37e2539778b6f34be1560f88c12ad867f27057f394c09165f250e9"} Dec 11 08:37:00 crc kubenswrapper[4881]: I1211 08:37:00.223114 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" event={"ID":"56d69133-af36-4cbd-af7d-3a58cc4dd8ca","Type":"ContainerStarted","Data":"a515bd066db37a4e28461e603c6dd047107539c1d10350095f8c9ca546f164e8"} Dec 11 08:37:00 crc kubenswrapper[4881]: I1211 08:37:00.223131 4881 scope.go:117] "RemoveContainer" containerID="46ea07e17db7258f87fdf87e503b03bd1b18ffe826127eac80cec982af4ad0c0" Dec 11 08:37:00 crc kubenswrapper[4881]: I1211 08:37:00.224938 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/prometheus-metric-storage-0" Dec 11 08:37:01 crc kubenswrapper[4881]: I1211 08:37:01.019786 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b3ccdd2c-fa3e-47c1-ab72-41e4f0b2f4fe" path="/var/lib/kubelet/pods/b3ccdd2c-fa3e-47c1-ab72-41e4f0b2f4fe/volumes" Dec 11 08:37:01 crc kubenswrapper[4881]: I1211 08:37:01.238874 4881 generic.go:334] "Generic (PLEG): container finished" podID="3f4a6ba9-97eb-425b-b2c1-54245c81a6df" containerID="5dff56a1b1956c2b7a9b0e6d40e74ee9c4053f7af8fb5ec838800e0d5bbf7456" exitCode=0 Dec 11 08:37:01 crc kubenswrapper[4881]: I1211 08:37:01.238961 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-s75l2" event={"ID":"3f4a6ba9-97eb-425b-b2c1-54245c81a6df","Type":"ContainerDied","Data":"5dff56a1b1956c2b7a9b0e6d40e74ee9c4053f7af8fb5ec838800e0d5bbf7456"} Dec 11 08:37:01 crc kubenswrapper[4881]: I1211 08:37:01.402436 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-northd-0" Dec 11 08:37:02 crc kubenswrapper[4881]: I1211 08:37:02.645975 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/prometheus-metric-storage-0"] Dec 11 08:37:02 crc kubenswrapper[4881]: I1211 08:37:02.646688 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/prometheus-metric-storage-0" podUID="1206112a-8438-4dcf-9cad-a3e38790a344" containerName="prometheus" containerID="cri-o://ba6e0d3779b0e6c9e61ddaa181461d7ea87ee57213fe6efacda7acba5a4d1228" gracePeriod=600 Dec 11 08:37:02 crc kubenswrapper[4881]: I1211 08:37:02.646743 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/prometheus-metric-storage-0" podUID="1206112a-8438-4dcf-9cad-a3e38790a344" containerName="thanos-sidecar" containerID="cri-o://50a8d3c4e0c0f48ba62514941a8b99252d2a225d63e5471e4187a8166be52156" gracePeriod=600 Dec 11 08:37:02 crc kubenswrapper[4881]: I1211 08:37:02.646823 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/prometheus-metric-storage-0" podUID="1206112a-8438-4dcf-9cad-a3e38790a344" containerName="config-reloader" containerID="cri-o://7bb4b7a05a63591d49e144e279752306ea71b1cb98919777df69c105fc734a0e" gracePeriod=600 Dec 11 08:37:02 crc kubenswrapper[4881]: I1211 08:37:02.771272 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-77585f5f8c-s775q" Dec 11 08:37:02 crc kubenswrapper[4881]: I1211 08:37:02.859047 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-698758b865-mrlfq"] Dec 11 08:37:02 crc kubenswrapper[4881]: I1211 08:37:02.859515 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-698758b865-mrlfq" podUID="affe9e76-f06d-45d9-93f1-7f00db52c82d" containerName="dnsmasq-dns" containerID="cri-o://bb06310dc7df1e3b19e56273aa9d843f75d5abd6c1956b6aebf39d0899fa3217" gracePeriod=10 Dec 11 08:37:02 crc kubenswrapper[4881]: I1211 08:37:02.977084 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-1cc4-account-create-mq9q7"] Dec 11 08:37:02 crc kubenswrapper[4881]: E1211 08:37:02.977592 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2a84e1c9-f761-496c-aae3-6ec403f26898" containerName="mariadb-database-create" Dec 11 08:37:02 crc kubenswrapper[4881]: I1211 08:37:02.977609 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="2a84e1c9-f761-496c-aae3-6ec403f26898" containerName="mariadb-database-create" Dec 11 08:37:02 crc kubenswrapper[4881]: E1211 08:37:02.977620 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cae2b44c-241b-44ec-b9c5-1cbdddf26008" containerName="mariadb-database-create" Dec 11 08:37:02 crc kubenswrapper[4881]: I1211 08:37:02.977627 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="cae2b44c-241b-44ec-b9c5-1cbdddf26008" containerName="mariadb-database-create" Dec 11 08:37:02 crc kubenswrapper[4881]: E1211 08:37:02.977647 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f3b36526-9590-49d0-9b9a-0b86736538bb" containerName="mariadb-database-create" Dec 11 08:37:02 crc kubenswrapper[4881]: I1211 08:37:02.977655 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="f3b36526-9590-49d0-9b9a-0b86736538bb" containerName="mariadb-database-create" Dec 11 08:37:02 crc kubenswrapper[4881]: E1211 08:37:02.977671 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b3ccdd2c-fa3e-47c1-ab72-41e4f0b2f4fe" containerName="ovn-config" Dec 11 08:37:02 crc kubenswrapper[4881]: I1211 08:37:02.977678 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="b3ccdd2c-fa3e-47c1-ab72-41e4f0b2f4fe" containerName="ovn-config" Dec 11 08:37:02 crc kubenswrapper[4881]: E1211 08:37:02.977699 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5e6b6074-f624-4016-beb9-a926af51e986" containerName="mariadb-database-create" Dec 11 08:37:02 crc kubenswrapper[4881]: I1211 08:37:02.977707 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="5e6b6074-f624-4016-beb9-a926af51e986" containerName="mariadb-database-create" Dec 11 08:37:02 crc kubenswrapper[4881]: E1211 08:37:02.977743 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="30f00215-2531-44c8-80d6-7f3be540e71b" containerName="mariadb-database-create" Dec 11 08:37:02 crc kubenswrapper[4881]: I1211 08:37:02.977751 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="30f00215-2531-44c8-80d6-7f3be540e71b" containerName="mariadb-database-create" Dec 11 08:37:02 crc kubenswrapper[4881]: I1211 08:37:02.977958 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="2a84e1c9-f761-496c-aae3-6ec403f26898" containerName="mariadb-database-create" Dec 11 08:37:02 crc kubenswrapper[4881]: I1211 08:37:02.977971 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="30f00215-2531-44c8-80d6-7f3be540e71b" containerName="mariadb-database-create" Dec 11 08:37:02 crc kubenswrapper[4881]: I1211 08:37:02.977986 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="f3b36526-9590-49d0-9b9a-0b86736538bb" containerName="mariadb-database-create" Dec 11 08:37:02 crc kubenswrapper[4881]: I1211 08:37:02.978926 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="b3ccdd2c-fa3e-47c1-ab72-41e4f0b2f4fe" containerName="ovn-config" Dec 11 08:37:02 crc kubenswrapper[4881]: I1211 08:37:02.978951 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="5e6b6074-f624-4016-beb9-a926af51e986" containerName="mariadb-database-create" Dec 11 08:37:02 crc kubenswrapper[4881]: I1211 08:37:02.978971 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="cae2b44c-241b-44ec-b9c5-1cbdddf26008" containerName="mariadb-database-create" Dec 11 08:37:02 crc kubenswrapper[4881]: I1211 08:37:02.979990 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-1cc4-account-create-mq9q7" Dec 11 08:37:02 crc kubenswrapper[4881]: I1211 08:37:02.982821 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-db-secret" Dec 11 08:37:02 crc kubenswrapper[4881]: I1211 08:37:02.999106 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-1cc4-account-create-mq9q7"] Dec 11 08:37:03 crc kubenswrapper[4881]: I1211 08:37:03.110227 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n45bx\" (UniqueName: \"kubernetes.io/projected/33f9ffca-ca21-41c8-b52e-5a5c7c786f4d-kube-api-access-n45bx\") pod \"barbican-1cc4-account-create-mq9q7\" (UID: \"33f9ffca-ca21-41c8-b52e-5a5c7c786f4d\") " pod="openstack/barbican-1cc4-account-create-mq9q7" Dec 11 08:37:03 crc kubenswrapper[4881]: I1211 08:37:03.111809 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-c7b6-account-create-sbjdl"] Dec 11 08:37:03 crc kubenswrapper[4881]: I1211 08:37:03.116763 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-c7b6-account-create-sbjdl" Dec 11 08:37:03 crc kubenswrapper[4881]: I1211 08:37:03.124828 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-db-secret" Dec 11 08:37:03 crc kubenswrapper[4881]: I1211 08:37:03.132098 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-c7b6-account-create-sbjdl"] Dec 11 08:37:03 crc kubenswrapper[4881]: I1211 08:37:03.214042 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n45bx\" (UniqueName: \"kubernetes.io/projected/33f9ffca-ca21-41c8-b52e-5a5c7c786f4d-kube-api-access-n45bx\") pod \"barbican-1cc4-account-create-mq9q7\" (UID: \"33f9ffca-ca21-41c8-b52e-5a5c7c786f4d\") " pod="openstack/barbican-1cc4-account-create-mq9q7" Dec 11 08:37:03 crc kubenswrapper[4881]: I1211 08:37:03.214234 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gs6tc\" (UniqueName: \"kubernetes.io/projected/be7726a5-588c-4057-823d-6a6c51348f2c-kube-api-access-gs6tc\") pod \"cinder-c7b6-account-create-sbjdl\" (UID: \"be7726a5-588c-4057-823d-6a6c51348f2c\") " pod="openstack/cinder-c7b6-account-create-sbjdl" Dec 11 08:37:03 crc kubenswrapper[4881]: I1211 08:37:03.232201 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n45bx\" (UniqueName: \"kubernetes.io/projected/33f9ffca-ca21-41c8-b52e-5a5c7c786f4d-kube-api-access-n45bx\") pod \"barbican-1cc4-account-create-mq9q7\" (UID: \"33f9ffca-ca21-41c8-b52e-5a5c7c786f4d\") " pod="openstack/barbican-1cc4-account-create-mq9q7" Dec 11 08:37:03 crc kubenswrapper[4881]: I1211 08:37:03.282281 4881 generic.go:334] "Generic (PLEG): container finished" podID="affe9e76-f06d-45d9-93f1-7f00db52c82d" containerID="bb06310dc7df1e3b19e56273aa9d843f75d5abd6c1956b6aebf39d0899fa3217" exitCode=0 Dec 11 08:37:03 crc kubenswrapper[4881]: I1211 08:37:03.282366 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-698758b865-mrlfq" event={"ID":"affe9e76-f06d-45d9-93f1-7f00db52c82d","Type":"ContainerDied","Data":"bb06310dc7df1e3b19e56273aa9d843f75d5abd6c1956b6aebf39d0899fa3217"} Dec 11 08:37:03 crc kubenswrapper[4881]: I1211 08:37:03.285516 4881 generic.go:334] "Generic (PLEG): container finished" podID="1206112a-8438-4dcf-9cad-a3e38790a344" containerID="50a8d3c4e0c0f48ba62514941a8b99252d2a225d63e5471e4187a8166be52156" exitCode=0 Dec 11 08:37:03 crc kubenswrapper[4881]: I1211 08:37:03.285543 4881 generic.go:334] "Generic (PLEG): container finished" podID="1206112a-8438-4dcf-9cad-a3e38790a344" containerID="7bb4b7a05a63591d49e144e279752306ea71b1cb98919777df69c105fc734a0e" exitCode=0 Dec 11 08:37:03 crc kubenswrapper[4881]: I1211 08:37:03.285550 4881 generic.go:334] "Generic (PLEG): container finished" podID="1206112a-8438-4dcf-9cad-a3e38790a344" containerID="ba6e0d3779b0e6c9e61ddaa181461d7ea87ee57213fe6efacda7acba5a4d1228" exitCode=0 Dec 11 08:37:03 crc kubenswrapper[4881]: I1211 08:37:03.285567 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"1206112a-8438-4dcf-9cad-a3e38790a344","Type":"ContainerDied","Data":"50a8d3c4e0c0f48ba62514941a8b99252d2a225d63e5471e4187a8166be52156"} Dec 11 08:37:03 crc kubenswrapper[4881]: I1211 08:37:03.285591 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"1206112a-8438-4dcf-9cad-a3e38790a344","Type":"ContainerDied","Data":"7bb4b7a05a63591d49e144e279752306ea71b1cb98919777df69c105fc734a0e"} Dec 11 08:37:03 crc kubenswrapper[4881]: I1211 08:37:03.285600 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"1206112a-8438-4dcf-9cad-a3e38790a344","Type":"ContainerDied","Data":"ba6e0d3779b0e6c9e61ddaa181461d7ea87ee57213fe6efacda7acba5a4d1228"} Dec 11 08:37:03 crc kubenswrapper[4881]: I1211 08:37:03.316672 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gs6tc\" (UniqueName: \"kubernetes.io/projected/be7726a5-588c-4057-823d-6a6c51348f2c-kube-api-access-gs6tc\") pod \"cinder-c7b6-account-create-sbjdl\" (UID: \"be7726a5-588c-4057-823d-6a6c51348f2c\") " pod="openstack/cinder-c7b6-account-create-sbjdl" Dec 11 08:37:03 crc kubenswrapper[4881]: I1211 08:37:03.339214 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gs6tc\" (UniqueName: \"kubernetes.io/projected/be7726a5-588c-4057-823d-6a6c51348f2c-kube-api-access-gs6tc\") pod \"cinder-c7b6-account-create-sbjdl\" (UID: \"be7726a5-588c-4057-823d-6a6c51348f2c\") " pod="openstack/cinder-c7b6-account-create-sbjdl" Dec 11 08:37:03 crc kubenswrapper[4881]: I1211 08:37:03.383192 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-6956-account-create-xhfnq"] Dec 11 08:37:03 crc kubenswrapper[4881]: I1211 08:37:03.384632 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-6956-account-create-xhfnq" Dec 11 08:37:03 crc kubenswrapper[4881]: I1211 08:37:03.387021 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-db-secret" Dec 11 08:37:03 crc kubenswrapper[4881]: I1211 08:37:03.404057 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-1cc4-account-create-mq9q7" Dec 11 08:37:03 crc kubenswrapper[4881]: I1211 08:37:03.408796 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-6956-account-create-xhfnq"] Dec 11 08:37:03 crc kubenswrapper[4881]: I1211 08:37:03.454558 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-c7b6-account-create-sbjdl" Dec 11 08:37:03 crc kubenswrapper[4881]: I1211 08:37:03.523928 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q4bsr\" (UniqueName: \"kubernetes.io/projected/2fbc6a44-ebbf-4a77-bad1-53d78b09d292-kube-api-access-q4bsr\") pod \"heat-6956-account-create-xhfnq\" (UID: \"2fbc6a44-ebbf-4a77-bad1-53d78b09d292\") " pod="openstack/heat-6956-account-create-xhfnq" Dec 11 08:37:03 crc kubenswrapper[4881]: I1211 08:37:03.580279 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-95cf-account-create-9q62w"] Dec 11 08:37:03 crc kubenswrapper[4881]: I1211 08:37:03.581815 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-95cf-account-create-9q62w" Dec 11 08:37:03 crc kubenswrapper[4881]: I1211 08:37:03.585629 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-db-secret" Dec 11 08:37:03 crc kubenswrapper[4881]: I1211 08:37:03.603644 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-95cf-account-create-9q62w"] Dec 11 08:37:03 crc kubenswrapper[4881]: I1211 08:37:03.625980 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q4bsr\" (UniqueName: \"kubernetes.io/projected/2fbc6a44-ebbf-4a77-bad1-53d78b09d292-kube-api-access-q4bsr\") pod \"heat-6956-account-create-xhfnq\" (UID: \"2fbc6a44-ebbf-4a77-bad1-53d78b09d292\") " pod="openstack/heat-6956-account-create-xhfnq" Dec 11 08:37:03 crc kubenswrapper[4881]: I1211 08:37:03.641416 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q4bsr\" (UniqueName: \"kubernetes.io/projected/2fbc6a44-ebbf-4a77-bad1-53d78b09d292-kube-api-access-q4bsr\") pod \"heat-6956-account-create-xhfnq\" (UID: \"2fbc6a44-ebbf-4a77-bad1-53d78b09d292\") " pod="openstack/heat-6956-account-create-xhfnq" Dec 11 08:37:03 crc kubenswrapper[4881]: I1211 08:37:03.717988 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-6956-account-create-xhfnq" Dec 11 08:37:03 crc kubenswrapper[4881]: I1211 08:37:03.727871 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wdbsq\" (UniqueName: \"kubernetes.io/projected/11fc6ffa-3c79-4409-b646-1e658f40120e-kube-api-access-wdbsq\") pod \"neutron-95cf-account-create-9q62w\" (UID: \"11fc6ffa-3c79-4409-b646-1e658f40120e\") " pod="openstack/neutron-95cf-account-create-9q62w" Dec 11 08:37:03 crc kubenswrapper[4881]: I1211 08:37:03.830455 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wdbsq\" (UniqueName: \"kubernetes.io/projected/11fc6ffa-3c79-4409-b646-1e658f40120e-kube-api-access-wdbsq\") pod \"neutron-95cf-account-create-9q62w\" (UID: \"11fc6ffa-3c79-4409-b646-1e658f40120e\") " pod="openstack/neutron-95cf-account-create-9q62w" Dec 11 08:37:03 crc kubenswrapper[4881]: I1211 08:37:03.872035 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wdbsq\" (UniqueName: \"kubernetes.io/projected/11fc6ffa-3c79-4409-b646-1e658f40120e-kube-api-access-wdbsq\") pod \"neutron-95cf-account-create-9q62w\" (UID: \"11fc6ffa-3c79-4409-b646-1e658f40120e\") " pod="openstack/neutron-95cf-account-create-9q62w" Dec 11 08:37:03 crc kubenswrapper[4881]: I1211 08:37:03.915359 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-95cf-account-create-9q62w" Dec 11 08:37:05 crc kubenswrapper[4881]: I1211 08:37:05.086316 4881 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/prometheus-metric-storage-0" podUID="1206112a-8438-4dcf-9cad-a3e38790a344" containerName="prometheus" probeResult="failure" output="Get \"http://10.217.0.136:9090/-/ready\": dial tcp 10.217.0.136:9090: connect: connection refused" Dec 11 08:37:05 crc kubenswrapper[4881]: I1211 08:37:05.998774 4881 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-698758b865-mrlfq" podUID="affe9e76-f06d-45d9-93f1-7f00db52c82d" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.147:5353: connect: connection refused" Dec 11 08:37:08 crc kubenswrapper[4881]: I1211 08:37:08.797177 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mysqld-exporter-dd28-account-create-v5xvr"] Dec 11 08:37:08 crc kubenswrapper[4881]: I1211 08:37:08.799385 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-dd28-account-create-v5xvr" Dec 11 08:37:08 crc kubenswrapper[4881]: I1211 08:37:08.802454 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"mysqld-exporter-openstack-cell1-db-secret" Dec 11 08:37:08 crc kubenswrapper[4881]: I1211 08:37:08.823701 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mysqld-exporter-dd28-account-create-v5xvr"] Dec 11 08:37:08 crc kubenswrapper[4881]: I1211 08:37:08.954724 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-924sz\" (UniqueName: \"kubernetes.io/projected/0f3dbda2-e652-483b-9ce4-d3bf05516f7b-kube-api-access-924sz\") pod \"mysqld-exporter-dd28-account-create-v5xvr\" (UID: \"0f3dbda2-e652-483b-9ce4-d3bf05516f7b\") " pod="openstack/mysqld-exporter-dd28-account-create-v5xvr" Dec 11 08:37:09 crc kubenswrapper[4881]: I1211 08:37:09.057658 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-924sz\" (UniqueName: \"kubernetes.io/projected/0f3dbda2-e652-483b-9ce4-d3bf05516f7b-kube-api-access-924sz\") pod \"mysqld-exporter-dd28-account-create-v5xvr\" (UID: \"0f3dbda2-e652-483b-9ce4-d3bf05516f7b\") " pod="openstack/mysqld-exporter-dd28-account-create-v5xvr" Dec 11 08:37:09 crc kubenswrapper[4881]: I1211 08:37:09.077630 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-924sz\" (UniqueName: \"kubernetes.io/projected/0f3dbda2-e652-483b-9ce4-d3bf05516f7b-kube-api-access-924sz\") pod \"mysqld-exporter-dd28-account-create-v5xvr\" (UID: \"0f3dbda2-e652-483b-9ce4-d3bf05516f7b\") " pod="openstack/mysqld-exporter-dd28-account-create-v5xvr" Dec 11 08:37:09 crc kubenswrapper[4881]: I1211 08:37:09.126194 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-dd28-account-create-v5xvr" Dec 11 08:37:10 crc kubenswrapper[4881]: I1211 08:37:10.086630 4881 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/prometheus-metric-storage-0" podUID="1206112a-8438-4dcf-9cad-a3e38790a344" containerName="prometheus" probeResult="failure" output="Get \"http://10.217.0.136:9090/-/ready\": dial tcp 10.217.0.136:9090: connect: connection refused" Dec 11 08:37:10 crc kubenswrapper[4881]: I1211 08:37:10.517323 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-s75l2" event={"ID":"3f4a6ba9-97eb-425b-b2c1-54245c81a6df","Type":"ContainerDied","Data":"e6df2d20d903b5d776bf0cbfcbf505ff43d8953c047cab3f2b19ba8faf458b1b"} Dec 11 08:37:10 crc kubenswrapper[4881]: I1211 08:37:10.517393 4881 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e6df2d20d903b5d776bf0cbfcbf505ff43d8953c047cab3f2b19ba8faf458b1b" Dec 11 08:37:10 crc kubenswrapper[4881]: I1211 08:37:10.517558 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-s75l2" Dec 11 08:37:10 crc kubenswrapper[4881]: I1211 08:37:10.589757 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7wh44\" (UniqueName: \"kubernetes.io/projected/3f4a6ba9-97eb-425b-b2c1-54245c81a6df-kube-api-access-7wh44\") pod \"3f4a6ba9-97eb-425b-b2c1-54245c81a6df\" (UID: \"3f4a6ba9-97eb-425b-b2c1-54245c81a6df\") " Dec 11 08:37:10 crc kubenswrapper[4881]: I1211 08:37:10.591156 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3f4a6ba9-97eb-425b-b2c1-54245c81a6df-combined-ca-bundle\") pod \"3f4a6ba9-97eb-425b-b2c1-54245c81a6df\" (UID: \"3f4a6ba9-97eb-425b-b2c1-54245c81a6df\") " Dec 11 08:37:10 crc kubenswrapper[4881]: I1211 08:37:10.591292 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3f4a6ba9-97eb-425b-b2c1-54245c81a6df-config-data\") pod \"3f4a6ba9-97eb-425b-b2c1-54245c81a6df\" (UID: \"3f4a6ba9-97eb-425b-b2c1-54245c81a6df\") " Dec 11 08:37:10 crc kubenswrapper[4881]: I1211 08:37:10.603858 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3f4a6ba9-97eb-425b-b2c1-54245c81a6df-kube-api-access-7wh44" (OuterVolumeSpecName: "kube-api-access-7wh44") pod "3f4a6ba9-97eb-425b-b2c1-54245c81a6df" (UID: "3f4a6ba9-97eb-425b-b2c1-54245c81a6df"). InnerVolumeSpecName "kube-api-access-7wh44". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:37:10 crc kubenswrapper[4881]: I1211 08:37:10.645227 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3f4a6ba9-97eb-425b-b2c1-54245c81a6df-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "3f4a6ba9-97eb-425b-b2c1-54245c81a6df" (UID: "3f4a6ba9-97eb-425b-b2c1-54245c81a6df"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:37:10 crc kubenswrapper[4881]: I1211 08:37:10.675314 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3f4a6ba9-97eb-425b-b2c1-54245c81a6df-config-data" (OuterVolumeSpecName: "config-data") pod "3f4a6ba9-97eb-425b-b2c1-54245c81a6df" (UID: "3f4a6ba9-97eb-425b-b2c1-54245c81a6df"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:37:10 crc kubenswrapper[4881]: I1211 08:37:10.693652 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-698758b865-mrlfq" Dec 11 08:37:10 crc kubenswrapper[4881]: I1211 08:37:10.695148 4881 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3f4a6ba9-97eb-425b-b2c1-54245c81a6df-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 11 08:37:10 crc kubenswrapper[4881]: I1211 08:37:10.695202 4881 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3f4a6ba9-97eb-425b-b2c1-54245c81a6df-config-data\") on node \"crc\" DevicePath \"\"" Dec 11 08:37:10 crc kubenswrapper[4881]: I1211 08:37:10.695214 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7wh44\" (UniqueName: \"kubernetes.io/projected/3f4a6ba9-97eb-425b-b2c1-54245c81a6df-kube-api-access-7wh44\") on node \"crc\" DevicePath \"\"" Dec 11 08:37:10 crc kubenswrapper[4881]: I1211 08:37:10.796464 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/affe9e76-f06d-45d9-93f1-7f00db52c82d-ovsdbserver-sb\") pod \"affe9e76-f06d-45d9-93f1-7f00db52c82d\" (UID: \"affe9e76-f06d-45d9-93f1-7f00db52c82d\") " Dec 11 08:37:10 crc kubenswrapper[4881]: I1211 08:37:10.796554 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/affe9e76-f06d-45d9-93f1-7f00db52c82d-dns-svc\") pod \"affe9e76-f06d-45d9-93f1-7f00db52c82d\" (UID: \"affe9e76-f06d-45d9-93f1-7f00db52c82d\") " Dec 11 08:37:10 crc kubenswrapper[4881]: I1211 08:37:10.796633 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/affe9e76-f06d-45d9-93f1-7f00db52c82d-config\") pod \"affe9e76-f06d-45d9-93f1-7f00db52c82d\" (UID: \"affe9e76-f06d-45d9-93f1-7f00db52c82d\") " Dec 11 08:37:10 crc kubenswrapper[4881]: I1211 08:37:10.796655 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vj6vb\" (UniqueName: \"kubernetes.io/projected/affe9e76-f06d-45d9-93f1-7f00db52c82d-kube-api-access-vj6vb\") pod \"affe9e76-f06d-45d9-93f1-7f00db52c82d\" (UID: \"affe9e76-f06d-45d9-93f1-7f00db52c82d\") " Dec 11 08:37:10 crc kubenswrapper[4881]: I1211 08:37:10.796822 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/affe9e76-f06d-45d9-93f1-7f00db52c82d-ovsdbserver-nb\") pod \"affe9e76-f06d-45d9-93f1-7f00db52c82d\" (UID: \"affe9e76-f06d-45d9-93f1-7f00db52c82d\") " Dec 11 08:37:10 crc kubenswrapper[4881]: I1211 08:37:10.814917 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/affe9e76-f06d-45d9-93f1-7f00db52c82d-kube-api-access-vj6vb" (OuterVolumeSpecName: "kube-api-access-vj6vb") pod "affe9e76-f06d-45d9-93f1-7f00db52c82d" (UID: "affe9e76-f06d-45d9-93f1-7f00db52c82d"). InnerVolumeSpecName "kube-api-access-vj6vb". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:37:10 crc kubenswrapper[4881]: I1211 08:37:10.861422 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/affe9e76-f06d-45d9-93f1-7f00db52c82d-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "affe9e76-f06d-45d9-93f1-7f00db52c82d" (UID: "affe9e76-f06d-45d9-93f1-7f00db52c82d"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:37:10 crc kubenswrapper[4881]: I1211 08:37:10.861438 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/affe9e76-f06d-45d9-93f1-7f00db52c82d-config" (OuterVolumeSpecName: "config") pod "affe9e76-f06d-45d9-93f1-7f00db52c82d" (UID: "affe9e76-f06d-45d9-93f1-7f00db52c82d"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:37:10 crc kubenswrapper[4881]: I1211 08:37:10.866285 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/affe9e76-f06d-45d9-93f1-7f00db52c82d-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "affe9e76-f06d-45d9-93f1-7f00db52c82d" (UID: "affe9e76-f06d-45d9-93f1-7f00db52c82d"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:37:10 crc kubenswrapper[4881]: I1211 08:37:10.886045 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/affe9e76-f06d-45d9-93f1-7f00db52c82d-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "affe9e76-f06d-45d9-93f1-7f00db52c82d" (UID: "affe9e76-f06d-45d9-93f1-7f00db52c82d"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:37:10 crc kubenswrapper[4881]: I1211 08:37:10.902825 4881 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/affe9e76-f06d-45d9-93f1-7f00db52c82d-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Dec 11 08:37:10 crc kubenswrapper[4881]: I1211 08:37:10.902857 4881 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/affe9e76-f06d-45d9-93f1-7f00db52c82d-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Dec 11 08:37:10 crc kubenswrapper[4881]: I1211 08:37:10.902867 4881 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/affe9e76-f06d-45d9-93f1-7f00db52c82d-dns-svc\") on node \"crc\" DevicePath \"\"" Dec 11 08:37:10 crc kubenswrapper[4881]: I1211 08:37:10.902884 4881 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/affe9e76-f06d-45d9-93f1-7f00db52c82d-config\") on node \"crc\" DevicePath \"\"" Dec 11 08:37:10 crc kubenswrapper[4881]: I1211 08:37:10.902894 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vj6vb\" (UniqueName: \"kubernetes.io/projected/affe9e76-f06d-45d9-93f1-7f00db52c82d-kube-api-access-vj6vb\") on node \"crc\" DevicePath \"\"" Dec 11 08:37:10 crc kubenswrapper[4881]: I1211 08:37:10.937881 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Dec 11 08:37:11 crc kubenswrapper[4881]: I1211 08:37:11.004131 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"prometheus-metric-storage-db\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"1206112a-8438-4dcf-9cad-a3e38790a344\" (UID: \"1206112a-8438-4dcf-9cad-a3e38790a344\") " Dec 11 08:37:11 crc kubenswrapper[4881]: I1211 08:37:11.004190 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rmn5j\" (UniqueName: \"kubernetes.io/projected/1206112a-8438-4dcf-9cad-a3e38790a344-kube-api-access-rmn5j\") pod \"1206112a-8438-4dcf-9cad-a3e38790a344\" (UID: \"1206112a-8438-4dcf-9cad-a3e38790a344\") " Dec 11 08:37:11 crc kubenswrapper[4881]: I1211 08:37:11.004227 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/1206112a-8438-4dcf-9cad-a3e38790a344-web-config\") pod \"1206112a-8438-4dcf-9cad-a3e38790a344\" (UID: \"1206112a-8438-4dcf-9cad-a3e38790a344\") " Dec 11 08:37:11 crc kubenswrapper[4881]: I1211 08:37:11.004246 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/1206112a-8438-4dcf-9cad-a3e38790a344-thanos-prometheus-http-client-file\") pod \"1206112a-8438-4dcf-9cad-a3e38790a344\" (UID: \"1206112a-8438-4dcf-9cad-a3e38790a344\") " Dec 11 08:37:11 crc kubenswrapper[4881]: I1211 08:37:11.004326 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/1206112a-8438-4dcf-9cad-a3e38790a344-tls-assets\") pod \"1206112a-8438-4dcf-9cad-a3e38790a344\" (UID: \"1206112a-8438-4dcf-9cad-a3e38790a344\") " Dec 11 08:37:11 crc kubenswrapper[4881]: I1211 08:37:11.004385 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/1206112a-8438-4dcf-9cad-a3e38790a344-prometheus-metric-storage-rulefiles-0\") pod \"1206112a-8438-4dcf-9cad-a3e38790a344\" (UID: \"1206112a-8438-4dcf-9cad-a3e38790a344\") " Dec 11 08:37:11 crc kubenswrapper[4881]: I1211 08:37:11.004528 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/1206112a-8438-4dcf-9cad-a3e38790a344-config-out\") pod \"1206112a-8438-4dcf-9cad-a3e38790a344\" (UID: \"1206112a-8438-4dcf-9cad-a3e38790a344\") " Dec 11 08:37:11 crc kubenswrapper[4881]: I1211 08:37:11.004614 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/1206112a-8438-4dcf-9cad-a3e38790a344-config\") pod \"1206112a-8438-4dcf-9cad-a3e38790a344\" (UID: \"1206112a-8438-4dcf-9cad-a3e38790a344\") " Dec 11 08:37:11 crc kubenswrapper[4881]: I1211 08:37:11.005828 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1206112a-8438-4dcf-9cad-a3e38790a344-prometheus-metric-storage-rulefiles-0" (OuterVolumeSpecName: "prometheus-metric-storage-rulefiles-0") pod "1206112a-8438-4dcf-9cad-a3e38790a344" (UID: "1206112a-8438-4dcf-9cad-a3e38790a344"). InnerVolumeSpecName "prometheus-metric-storage-rulefiles-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:37:11 crc kubenswrapper[4881]: I1211 08:37:11.010106 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1206112a-8438-4dcf-9cad-a3e38790a344-kube-api-access-rmn5j" (OuterVolumeSpecName: "kube-api-access-rmn5j") pod "1206112a-8438-4dcf-9cad-a3e38790a344" (UID: "1206112a-8438-4dcf-9cad-a3e38790a344"). InnerVolumeSpecName "kube-api-access-rmn5j". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:37:11 crc kubenswrapper[4881]: I1211 08:37:11.026224 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1206112a-8438-4dcf-9cad-a3e38790a344-tls-assets" (OuterVolumeSpecName: "tls-assets") pod "1206112a-8438-4dcf-9cad-a3e38790a344" (UID: "1206112a-8438-4dcf-9cad-a3e38790a344"). InnerVolumeSpecName "tls-assets". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:37:11 crc kubenswrapper[4881]: I1211 08:37:11.026384 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1206112a-8438-4dcf-9cad-a3e38790a344-config-out" (OuterVolumeSpecName: "config-out") pod "1206112a-8438-4dcf-9cad-a3e38790a344" (UID: "1206112a-8438-4dcf-9cad-a3e38790a344"). InnerVolumeSpecName "config-out". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 08:37:11 crc kubenswrapper[4881]: I1211 08:37:11.026400 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1206112a-8438-4dcf-9cad-a3e38790a344-thanos-prometheus-http-client-file" (OuterVolumeSpecName: "thanos-prometheus-http-client-file") pod "1206112a-8438-4dcf-9cad-a3e38790a344" (UID: "1206112a-8438-4dcf-9cad-a3e38790a344"). InnerVolumeSpecName "thanos-prometheus-http-client-file". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:37:11 crc kubenswrapper[4881]: I1211 08:37:11.027830 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage08-crc" (OuterVolumeSpecName: "prometheus-metric-storage-db") pod "1206112a-8438-4dcf-9cad-a3e38790a344" (UID: "1206112a-8438-4dcf-9cad-a3e38790a344"). InnerVolumeSpecName "local-storage08-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Dec 11 08:37:11 crc kubenswrapper[4881]: I1211 08:37:11.031881 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1206112a-8438-4dcf-9cad-a3e38790a344-config" (OuterVolumeSpecName: "config") pod "1206112a-8438-4dcf-9cad-a3e38790a344" (UID: "1206112a-8438-4dcf-9cad-a3e38790a344"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:37:11 crc kubenswrapper[4881]: I1211 08:37:11.067806 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1206112a-8438-4dcf-9cad-a3e38790a344-web-config" (OuterVolumeSpecName: "web-config") pod "1206112a-8438-4dcf-9cad-a3e38790a344" (UID: "1206112a-8438-4dcf-9cad-a3e38790a344"). InnerVolumeSpecName "web-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:37:11 crc kubenswrapper[4881]: I1211 08:37:11.110279 4881 reconciler_common.go:293] "Volume detached for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/1206112a-8438-4dcf-9cad-a3e38790a344-config-out\") on node \"crc\" DevicePath \"\"" Dec 11 08:37:11 crc kubenswrapper[4881]: I1211 08:37:11.110328 4881 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/1206112a-8438-4dcf-9cad-a3e38790a344-config\") on node \"crc\" DevicePath \"\"" Dec 11 08:37:11 crc kubenswrapper[4881]: I1211 08:37:11.110368 4881 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") on node \"crc\" " Dec 11 08:37:11 crc kubenswrapper[4881]: I1211 08:37:11.110380 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rmn5j\" (UniqueName: \"kubernetes.io/projected/1206112a-8438-4dcf-9cad-a3e38790a344-kube-api-access-rmn5j\") on node \"crc\" DevicePath \"\"" Dec 11 08:37:11 crc kubenswrapper[4881]: I1211 08:37:11.110417 4881 reconciler_common.go:293] "Volume detached for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/1206112a-8438-4dcf-9cad-a3e38790a344-web-config\") on node \"crc\" DevicePath \"\"" Dec 11 08:37:11 crc kubenswrapper[4881]: I1211 08:37:11.110427 4881 reconciler_common.go:293] "Volume detached for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/1206112a-8438-4dcf-9cad-a3e38790a344-thanos-prometheus-http-client-file\") on node \"crc\" DevicePath \"\"" Dec 11 08:37:11 crc kubenswrapper[4881]: I1211 08:37:11.110436 4881 reconciler_common.go:293] "Volume detached for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/1206112a-8438-4dcf-9cad-a3e38790a344-tls-assets\") on node \"crc\" DevicePath \"\"" Dec 11 08:37:11 crc kubenswrapper[4881]: I1211 08:37:11.110446 4881 reconciler_common.go:293] "Volume detached for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/1206112a-8438-4dcf-9cad-a3e38790a344-prometheus-metric-storage-rulefiles-0\") on node \"crc\" DevicePath \"\"" Dec 11 08:37:11 crc kubenswrapper[4881]: I1211 08:37:11.155414 4881 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage08-crc" (UniqueName: "kubernetes.io/local-volume/local-storage08-crc") on node "crc" Dec 11 08:37:11 crc kubenswrapper[4881]: I1211 08:37:11.211772 4881 reconciler_common.go:293] "Volume detached for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") on node \"crc\" DevicePath \"\"" Dec 11 08:37:11 crc kubenswrapper[4881]: I1211 08:37:11.387648 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mysqld-exporter-dd28-account-create-v5xvr"] Dec 11 08:37:11 crc kubenswrapper[4881]: W1211 08:37:11.388921 4881 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0f3dbda2_e652_483b_9ce4_d3bf05516f7b.slice/crio-5e91098bef8cf1d3c128d1060cb453a084f991c9b0c59b463c24f2e85f939f2a WatchSource:0}: Error finding container 5e91098bef8cf1d3c128d1060cb453a084f991c9b0c59b463c24f2e85f939f2a: Status 404 returned error can't find the container with id 5e91098bef8cf1d3c128d1060cb453a084f991c9b0c59b463c24f2e85f939f2a Dec 11 08:37:11 crc kubenswrapper[4881]: W1211 08:37:11.392144 4881 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod11fc6ffa_3c79_4409_b646_1e658f40120e.slice/crio-ac03fcf64e576168832b8c29f176fb1176da71a040d1c13da31cdb206e6e824e WatchSource:0}: Error finding container ac03fcf64e576168832b8c29f176fb1176da71a040d1c13da31cdb206e6e824e: Status 404 returned error can't find the container with id ac03fcf64e576168832b8c29f176fb1176da71a040d1c13da31cdb206e6e824e Dec 11 08:37:11 crc kubenswrapper[4881]: W1211 08:37:11.392613 4881 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podbe7726a5_588c_4057_823d_6a6c51348f2c.slice/crio-41005fc0b83645ae9deb71ad67e02654eb753c9df2de4d563644f8fc35005384 WatchSource:0}: Error finding container 41005fc0b83645ae9deb71ad67e02654eb753c9df2de4d563644f8fc35005384: Status 404 returned error can't find the container with id 41005fc0b83645ae9deb71ad67e02654eb753c9df2de4d563644f8fc35005384 Dec 11 08:37:11 crc kubenswrapper[4881]: I1211 08:37:11.414917 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-c7b6-account-create-sbjdl"] Dec 11 08:37:11 crc kubenswrapper[4881]: I1211 08:37:11.427286 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-95cf-account-create-9q62w"] Dec 11 08:37:11 crc kubenswrapper[4881]: I1211 08:37:11.438967 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-1cc4-account-create-mq9q7"] Dec 11 08:37:11 crc kubenswrapper[4881]: I1211 08:37:11.447979 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-6956-account-create-xhfnq"] Dec 11 08:37:11 crc kubenswrapper[4881]: I1211 08:37:11.530319 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-698758b865-mrlfq" event={"ID":"affe9e76-f06d-45d9-93f1-7f00db52c82d","Type":"ContainerDied","Data":"57dd363b3b978ebe185b042faa69d0e446a8668c24289933bf5c8c69bd1efd93"} Dec 11 08:37:11 crc kubenswrapper[4881]: I1211 08:37:11.530408 4881 scope.go:117] "RemoveContainer" containerID="bb06310dc7df1e3b19e56273aa9d843f75d5abd6c1956b6aebf39d0899fa3217" Dec 11 08:37:11 crc kubenswrapper[4881]: I1211 08:37:11.530722 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-698758b865-mrlfq" Dec 11 08:37:11 crc kubenswrapper[4881]: I1211 08:37:11.531629 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-dd28-account-create-v5xvr" event={"ID":"0f3dbda2-e652-483b-9ce4-d3bf05516f7b","Type":"ContainerStarted","Data":"5e91098bef8cf1d3c128d1060cb453a084f991c9b0c59b463c24f2e85f939f2a"} Dec 11 08:37:11 crc kubenswrapper[4881]: I1211 08:37:11.548578 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-c7b6-account-create-sbjdl" event={"ID":"be7726a5-588c-4057-823d-6a6c51348f2c","Type":"ContainerStarted","Data":"41005fc0b83645ae9deb71ad67e02654eb753c9df2de4d563644f8fc35005384"} Dec 11 08:37:11 crc kubenswrapper[4881]: I1211 08:37:11.569438 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-6956-account-create-xhfnq" event={"ID":"2fbc6a44-ebbf-4a77-bad1-53d78b09d292","Type":"ContainerStarted","Data":"2fd6fe0ba9894f8b4599a604b924dff425f239e57e64f74662a5db9187232103"} Dec 11 08:37:11 crc kubenswrapper[4881]: I1211 08:37:11.576393 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-lx525" event={"ID":"e87c3b7f-ebb4-4f97-bf3b-df5869f3c1ec","Type":"ContainerStarted","Data":"848fe4d3938e93638f3177a44497a800500651736f73ccaf195458f2ef728205"} Dec 11 08:37:11 crc kubenswrapper[4881]: I1211 08:37:11.579691 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-95cf-account-create-9q62w" event={"ID":"11fc6ffa-3c79-4409-b646-1e658f40120e","Type":"ContainerStarted","Data":"ac03fcf64e576168832b8c29f176fb1176da71a040d1c13da31cdb206e6e824e"} Dec 11 08:37:11 crc kubenswrapper[4881]: I1211 08:37:11.584302 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"1206112a-8438-4dcf-9cad-a3e38790a344","Type":"ContainerDied","Data":"18c790be2f3607b53bfd255854e482425c9db1653791bde5a4363dc5669d3d3b"} Dec 11 08:37:11 crc kubenswrapper[4881]: I1211 08:37:11.584409 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Dec 11 08:37:11 crc kubenswrapper[4881]: I1211 08:37:11.585413 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-s75l2" Dec 11 08:37:11 crc kubenswrapper[4881]: I1211 08:37:11.586217 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-1cc4-account-create-mq9q7" event={"ID":"33f9ffca-ca21-41c8-b52e-5a5c7c786f4d","Type":"ContainerStarted","Data":"1ca8b2f5270ae106d25b30e57f293ddccf6e98ca38c918d291884a2d4fb7c4c6"} Dec 11 08:37:11 crc kubenswrapper[4881]: I1211 08:37:11.586542 4881 scope.go:117] "RemoveContainer" containerID="4aa7ebcf4c4847b75bbf85fad95bf72a0d1ec3c082947285051dedd0d1e05fef" Dec 11 08:37:11 crc kubenswrapper[4881]: I1211 08:37:11.621622 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-db-sync-lx525" podStartSLOduration=4.202687151 podStartE2EDuration="24.621594842s" podCreationTimestamp="2025-12-11 08:36:47 +0000 UTC" firstStartedPulling="2025-12-11 08:36:50.094013666 +0000 UTC m=+1258.471382363" lastFinishedPulling="2025-12-11 08:37:10.512921357 +0000 UTC m=+1278.890290054" observedRunningTime="2025-12-11 08:37:11.595953495 +0000 UTC m=+1279.973322192" watchObservedRunningTime="2025-12-11 08:37:11.621594842 +0000 UTC m=+1279.998963529" Dec 11 08:37:11 crc kubenswrapper[4881]: I1211 08:37:11.649307 4881 scope.go:117] "RemoveContainer" containerID="50a8d3c4e0c0f48ba62514941a8b99252d2a225d63e5471e4187a8166be52156" Dec 11 08:37:11 crc kubenswrapper[4881]: I1211 08:37:11.655274 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-698758b865-mrlfq"] Dec 11 08:37:11 crc kubenswrapper[4881]: I1211 08:37:11.669620 4881 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-698758b865-mrlfq"] Dec 11 08:37:11 crc kubenswrapper[4881]: I1211 08:37:11.681883 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/prometheus-metric-storage-0"] Dec 11 08:37:11 crc kubenswrapper[4881]: I1211 08:37:11.706684 4881 scope.go:117] "RemoveContainer" containerID="7bb4b7a05a63591d49e144e279752306ea71b1cb98919777df69c105fc734a0e" Dec 11 08:37:11 crc kubenswrapper[4881]: I1211 08:37:11.726555 4881 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/prometheus-metric-storage-0"] Dec 11 08:37:11 crc kubenswrapper[4881]: I1211 08:37:11.751676 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/prometheus-metric-storage-0"] Dec 11 08:37:11 crc kubenswrapper[4881]: E1211 08:37:11.752308 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="affe9e76-f06d-45d9-93f1-7f00db52c82d" containerName="init" Dec 11 08:37:11 crc kubenswrapper[4881]: I1211 08:37:11.752349 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="affe9e76-f06d-45d9-93f1-7f00db52c82d" containerName="init" Dec 11 08:37:11 crc kubenswrapper[4881]: E1211 08:37:11.752388 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1206112a-8438-4dcf-9cad-a3e38790a344" containerName="thanos-sidecar" Dec 11 08:37:11 crc kubenswrapper[4881]: I1211 08:37:11.752397 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="1206112a-8438-4dcf-9cad-a3e38790a344" containerName="thanos-sidecar" Dec 11 08:37:11 crc kubenswrapper[4881]: E1211 08:37:11.752407 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3f4a6ba9-97eb-425b-b2c1-54245c81a6df" containerName="keystone-db-sync" Dec 11 08:37:11 crc kubenswrapper[4881]: I1211 08:37:11.752417 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="3f4a6ba9-97eb-425b-b2c1-54245c81a6df" containerName="keystone-db-sync" Dec 11 08:37:11 crc kubenswrapper[4881]: E1211 08:37:11.752440 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1206112a-8438-4dcf-9cad-a3e38790a344" containerName="config-reloader" Dec 11 08:37:11 crc kubenswrapper[4881]: I1211 08:37:11.752448 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="1206112a-8438-4dcf-9cad-a3e38790a344" containerName="config-reloader" Dec 11 08:37:11 crc kubenswrapper[4881]: E1211 08:37:11.752469 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1206112a-8438-4dcf-9cad-a3e38790a344" containerName="prometheus" Dec 11 08:37:11 crc kubenswrapper[4881]: I1211 08:37:11.752477 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="1206112a-8438-4dcf-9cad-a3e38790a344" containerName="prometheus" Dec 11 08:37:11 crc kubenswrapper[4881]: E1211 08:37:11.752491 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1206112a-8438-4dcf-9cad-a3e38790a344" containerName="init-config-reloader" Dec 11 08:37:11 crc kubenswrapper[4881]: I1211 08:37:11.752498 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="1206112a-8438-4dcf-9cad-a3e38790a344" containerName="init-config-reloader" Dec 11 08:37:11 crc kubenswrapper[4881]: E1211 08:37:11.752516 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="affe9e76-f06d-45d9-93f1-7f00db52c82d" containerName="dnsmasq-dns" Dec 11 08:37:11 crc kubenswrapper[4881]: I1211 08:37:11.752524 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="affe9e76-f06d-45d9-93f1-7f00db52c82d" containerName="dnsmasq-dns" Dec 11 08:37:11 crc kubenswrapper[4881]: I1211 08:37:11.752806 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="1206112a-8438-4dcf-9cad-a3e38790a344" containerName="config-reloader" Dec 11 08:37:11 crc kubenswrapper[4881]: I1211 08:37:11.752829 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="1206112a-8438-4dcf-9cad-a3e38790a344" containerName="thanos-sidecar" Dec 11 08:37:11 crc kubenswrapper[4881]: I1211 08:37:11.752849 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="1206112a-8438-4dcf-9cad-a3e38790a344" containerName="prometheus" Dec 11 08:37:11 crc kubenswrapper[4881]: I1211 08:37:11.752866 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="affe9e76-f06d-45d9-93f1-7f00db52c82d" containerName="dnsmasq-dns" Dec 11 08:37:11 crc kubenswrapper[4881]: I1211 08:37:11.752877 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="3f4a6ba9-97eb-425b-b2c1-54245c81a6df" containerName="keystone-db-sync" Dec 11 08:37:11 crc kubenswrapper[4881]: I1211 08:37:11.755787 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Dec 11 08:37:11 crc kubenswrapper[4881]: I1211 08:37:11.760967 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"prometheus-metric-storage-rulefiles-0" Dec 11 08:37:11 crc kubenswrapper[4881]: I1211 08:37:11.761022 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-web-config" Dec 11 08:37:11 crc kubenswrapper[4881]: I1211 08:37:11.761166 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage" Dec 11 08:37:11 crc kubenswrapper[4881]: I1211 08:37:11.760951 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"metric-storage-prometheus-dockercfg-nb9sv" Dec 11 08:37:11 crc kubenswrapper[4881]: I1211 08:37:11.761909 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-metric-storage-prometheus-svc" Dec 11 08:37:11 crc kubenswrapper[4881]: I1211 08:37:11.762117 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-thanos-prometheus-http-client-file" Dec 11 08:37:11 crc kubenswrapper[4881]: I1211 08:37:11.765370 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/prometheus-metric-storage-0"] Dec 11 08:37:11 crc kubenswrapper[4881]: I1211 08:37:11.791957 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-tls-assets-0" Dec 11 08:37:11 crc kubenswrapper[4881]: I1211 08:37:11.821604 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-55fff446b9-5s4v5"] Dec 11 08:37:11 crc kubenswrapper[4881]: I1211 08:37:11.823608 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-55fff446b9-5s4v5" Dec 11 08:37:11 crc kubenswrapper[4881]: I1211 08:37:11.838848 4881 scope.go:117] "RemoveContainer" containerID="ba6e0d3779b0e6c9e61ddaa181461d7ea87ee57213fe6efacda7acba5a4d1228" Dec 11 08:37:11 crc kubenswrapper[4881]: I1211 08:37:11.902493 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-55fff446b9-5s4v5"] Dec 11 08:37:11 crc kubenswrapper[4881]: I1211 08:37:11.917303 4881 scope.go:117] "RemoveContainer" containerID="aacb4d3d3e7abb2a614eb9a8d21b36b2a3df4e493512d61d72fa8e2f8536328b" Dec 11 08:37:11 crc kubenswrapper[4881]: I1211 08:37:11.931362 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/aa55caf4-597c-4e33-8c51-b14753998ad2-ovsdbserver-nb\") pod \"dnsmasq-dns-55fff446b9-5s4v5\" (UID: \"aa55caf4-597c-4e33-8c51-b14753998ad2\") " pod="openstack/dnsmasq-dns-55fff446b9-5s4v5" Dec 11 08:37:11 crc kubenswrapper[4881]: I1211 08:37:11.931476 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/b515a685-da3e-4d92-a8e5-60561e9de83f-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"b515a685-da3e-4d92-a8e5-60561e9de83f\") " pod="openstack/prometheus-metric-storage-0" Dec 11 08:37:11 crc kubenswrapper[4881]: I1211 08:37:11.931547 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"prometheus-metric-storage-0\" (UID: \"b515a685-da3e-4d92-a8e5-60561e9de83f\") " pod="openstack/prometheus-metric-storage-0" Dec 11 08:37:11 crc kubenswrapper[4881]: I1211 08:37:11.931594 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/aa55caf4-597c-4e33-8c51-b14753998ad2-config\") pod \"dnsmasq-dns-55fff446b9-5s4v5\" (UID: \"aa55caf4-597c-4e33-8c51-b14753998ad2\") " pod="openstack/dnsmasq-dns-55fff446b9-5s4v5" Dec 11 08:37:11 crc kubenswrapper[4881]: I1211 08:37:11.931626 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/b515a685-da3e-4d92-a8e5-60561e9de83f-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"b515a685-da3e-4d92-a8e5-60561e9de83f\") " pod="openstack/prometheus-metric-storage-0" Dec 11 08:37:11 crc kubenswrapper[4881]: I1211 08:37:11.931685 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/b515a685-da3e-4d92-a8e5-60561e9de83f-config\") pod \"prometheus-metric-storage-0\" (UID: \"b515a685-da3e-4d92-a8e5-60561e9de83f\") " pod="openstack/prometheus-metric-storage-0" Dec 11 08:37:11 crc kubenswrapper[4881]: I1211 08:37:11.931775 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\" (UniqueName: \"kubernetes.io/secret/b515a685-da3e-4d92-a8e5-60561e9de83f-web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"b515a685-da3e-4d92-a8e5-60561e9de83f\") " pod="openstack/prometheus-metric-storage-0" Dec 11 08:37:11 crc kubenswrapper[4881]: I1211 08:37:11.931849 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/aa55caf4-597c-4e33-8c51-b14753998ad2-ovsdbserver-sb\") pod \"dnsmasq-dns-55fff446b9-5s4v5\" (UID: \"aa55caf4-597c-4e33-8c51-b14753998ad2\") " pod="openstack/dnsmasq-dns-55fff446b9-5s4v5" Dec 11 08:37:11 crc kubenswrapper[4881]: I1211 08:37:11.931929 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/aa55caf4-597c-4e33-8c51-b14753998ad2-dns-svc\") pod \"dnsmasq-dns-55fff446b9-5s4v5\" (UID: \"aa55caf4-597c-4e33-8c51-b14753998ad2\") " pod="openstack/dnsmasq-dns-55fff446b9-5s4v5" Dec 11 08:37:11 crc kubenswrapper[4881]: I1211 08:37:11.931979 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\" (UniqueName: \"kubernetes.io/secret/b515a685-da3e-4d92-a8e5-60561e9de83f-web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"b515a685-da3e-4d92-a8e5-60561e9de83f\") " pod="openstack/prometheus-metric-storage-0" Dec 11 08:37:11 crc kubenswrapper[4881]: I1211 08:37:11.932064 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/b515a685-da3e-4d92-a8e5-60561e9de83f-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"b515a685-da3e-4d92-a8e5-60561e9de83f\") " pod="openstack/prometheus-metric-storage-0" Dec 11 08:37:11 crc kubenswrapper[4881]: I1211 08:37:11.932143 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/b515a685-da3e-4d92-a8e5-60561e9de83f-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"b515a685-da3e-4d92-a8e5-60561e9de83f\") " pod="openstack/prometheus-metric-storage-0" Dec 11 08:37:11 crc kubenswrapper[4881]: I1211 08:37:11.932212 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hgvwn\" (UniqueName: \"kubernetes.io/projected/b515a685-da3e-4d92-a8e5-60561e9de83f-kube-api-access-hgvwn\") pod \"prometheus-metric-storage-0\" (UID: \"b515a685-da3e-4d92-a8e5-60561e9de83f\") " pod="openstack/prometheus-metric-storage-0" Dec 11 08:37:11 crc kubenswrapper[4881]: I1211 08:37:11.932271 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b515a685-da3e-4d92-a8e5-60561e9de83f-secret-combined-ca-bundle\") pod \"prometheus-metric-storage-0\" (UID: \"b515a685-da3e-4d92-a8e5-60561e9de83f\") " pod="openstack/prometheus-metric-storage-0" Dec 11 08:37:11 crc kubenswrapper[4881]: I1211 08:37:11.932319 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rmgbw\" (UniqueName: \"kubernetes.io/projected/aa55caf4-597c-4e33-8c51-b14753998ad2-kube-api-access-rmgbw\") pod \"dnsmasq-dns-55fff446b9-5s4v5\" (UID: \"aa55caf4-597c-4e33-8c51-b14753998ad2\") " pod="openstack/dnsmasq-dns-55fff446b9-5s4v5" Dec 11 08:37:11 crc kubenswrapper[4881]: I1211 08:37:11.932563 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/aa55caf4-597c-4e33-8c51-b14753998ad2-dns-swift-storage-0\") pod \"dnsmasq-dns-55fff446b9-5s4v5\" (UID: \"aa55caf4-597c-4e33-8c51-b14753998ad2\") " pod="openstack/dnsmasq-dns-55fff446b9-5s4v5" Dec 11 08:37:11 crc kubenswrapper[4881]: I1211 08:37:11.932617 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/b515a685-da3e-4d92-a8e5-60561e9de83f-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"b515a685-da3e-4d92-a8e5-60561e9de83f\") " pod="openstack/prometheus-metric-storage-0" Dec 11 08:37:11 crc kubenswrapper[4881]: I1211 08:37:11.934927 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-bootstrap-vrsnd"] Dec 11 08:37:11 crc kubenswrapper[4881]: I1211 08:37:11.936967 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-vrsnd" Dec 11 08:37:11 crc kubenswrapper[4881]: I1211 08:37:11.945622 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Dec 11 08:37:11 crc kubenswrapper[4881]: I1211 08:37:11.947919 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-vrsnd"] Dec 11 08:37:11 crc kubenswrapper[4881]: I1211 08:37:11.945865 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Dec 11 08:37:11 crc kubenswrapper[4881]: I1211 08:37:11.945902 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Dec 11 08:37:11 crc kubenswrapper[4881]: I1211 08:37:11.948447 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-9vkph" Dec 11 08:37:12 crc kubenswrapper[4881]: I1211 08:37:12.034767 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\" (UniqueName: \"kubernetes.io/secret/b515a685-da3e-4d92-a8e5-60561e9de83f-web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"b515a685-da3e-4d92-a8e5-60561e9de83f\") " pod="openstack/prometheus-metric-storage-0" Dec 11 08:37:12 crc kubenswrapper[4881]: I1211 08:37:12.035375 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/aa55caf4-597c-4e33-8c51-b14753998ad2-ovsdbserver-sb\") pod \"dnsmasq-dns-55fff446b9-5s4v5\" (UID: \"aa55caf4-597c-4e33-8c51-b14753998ad2\") " pod="openstack/dnsmasq-dns-55fff446b9-5s4v5" Dec 11 08:37:12 crc kubenswrapper[4881]: I1211 08:37:12.035492 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ghrkf\" (UniqueName: \"kubernetes.io/projected/09761130-7785-4617-b078-ea94c099676a-kube-api-access-ghrkf\") pod \"keystone-bootstrap-vrsnd\" (UID: \"09761130-7785-4617-b078-ea94c099676a\") " pod="openstack/keystone-bootstrap-vrsnd" Dec 11 08:37:12 crc kubenswrapper[4881]: I1211 08:37:12.035606 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/aa55caf4-597c-4e33-8c51-b14753998ad2-dns-svc\") pod \"dnsmasq-dns-55fff446b9-5s4v5\" (UID: \"aa55caf4-597c-4e33-8c51-b14753998ad2\") " pod="openstack/dnsmasq-dns-55fff446b9-5s4v5" Dec 11 08:37:12 crc kubenswrapper[4881]: I1211 08:37:12.035709 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\" (UniqueName: \"kubernetes.io/secret/b515a685-da3e-4d92-a8e5-60561e9de83f-web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"b515a685-da3e-4d92-a8e5-60561e9de83f\") " pod="openstack/prometheus-metric-storage-0" Dec 11 08:37:12 crc kubenswrapper[4881]: I1211 08:37:12.035836 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/b515a685-da3e-4d92-a8e5-60561e9de83f-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"b515a685-da3e-4d92-a8e5-60561e9de83f\") " pod="openstack/prometheus-metric-storage-0" Dec 11 08:37:12 crc kubenswrapper[4881]: I1211 08:37:12.036731 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/b515a685-da3e-4d92-a8e5-60561e9de83f-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"b515a685-da3e-4d92-a8e5-60561e9de83f\") " pod="openstack/prometheus-metric-storage-0" Dec 11 08:37:12 crc kubenswrapper[4881]: I1211 08:37:12.036827 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/09761130-7785-4617-b078-ea94c099676a-credential-keys\") pod \"keystone-bootstrap-vrsnd\" (UID: \"09761130-7785-4617-b078-ea94c099676a\") " pod="openstack/keystone-bootstrap-vrsnd" Dec 11 08:37:12 crc kubenswrapper[4881]: I1211 08:37:12.037492 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hgvwn\" (UniqueName: \"kubernetes.io/projected/b515a685-da3e-4d92-a8e5-60561e9de83f-kube-api-access-hgvwn\") pod \"prometheus-metric-storage-0\" (UID: \"b515a685-da3e-4d92-a8e5-60561e9de83f\") " pod="openstack/prometheus-metric-storage-0" Dec 11 08:37:12 crc kubenswrapper[4881]: I1211 08:37:12.037535 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b515a685-da3e-4d92-a8e5-60561e9de83f-secret-combined-ca-bundle\") pod \"prometheus-metric-storage-0\" (UID: \"b515a685-da3e-4d92-a8e5-60561e9de83f\") " pod="openstack/prometheus-metric-storage-0" Dec 11 08:37:12 crc kubenswrapper[4881]: I1211 08:37:12.037607 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rmgbw\" (UniqueName: \"kubernetes.io/projected/aa55caf4-597c-4e33-8c51-b14753998ad2-kube-api-access-rmgbw\") pod \"dnsmasq-dns-55fff446b9-5s4v5\" (UID: \"aa55caf4-597c-4e33-8c51-b14753998ad2\") " pod="openstack/dnsmasq-dns-55fff446b9-5s4v5" Dec 11 08:37:12 crc kubenswrapper[4881]: I1211 08:37:12.037630 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/aa55caf4-597c-4e33-8c51-b14753998ad2-dns-swift-storage-0\") pod \"dnsmasq-dns-55fff446b9-5s4v5\" (UID: \"aa55caf4-597c-4e33-8c51-b14753998ad2\") " pod="openstack/dnsmasq-dns-55fff446b9-5s4v5" Dec 11 08:37:12 crc kubenswrapper[4881]: I1211 08:37:12.037696 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/b515a685-da3e-4d92-a8e5-60561e9de83f-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"b515a685-da3e-4d92-a8e5-60561e9de83f\") " pod="openstack/prometheus-metric-storage-0" Dec 11 08:37:12 crc kubenswrapper[4881]: I1211 08:37:12.037728 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/09761130-7785-4617-b078-ea94c099676a-scripts\") pod \"keystone-bootstrap-vrsnd\" (UID: \"09761130-7785-4617-b078-ea94c099676a\") " pod="openstack/keystone-bootstrap-vrsnd" Dec 11 08:37:12 crc kubenswrapper[4881]: I1211 08:37:12.037872 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/09761130-7785-4617-b078-ea94c099676a-config-data\") pod \"keystone-bootstrap-vrsnd\" (UID: \"09761130-7785-4617-b078-ea94c099676a\") " pod="openstack/keystone-bootstrap-vrsnd" Dec 11 08:37:12 crc kubenswrapper[4881]: I1211 08:37:12.037918 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/aa55caf4-597c-4e33-8c51-b14753998ad2-ovsdbserver-nb\") pod \"dnsmasq-dns-55fff446b9-5s4v5\" (UID: \"aa55caf4-597c-4e33-8c51-b14753998ad2\") " pod="openstack/dnsmasq-dns-55fff446b9-5s4v5" Dec 11 08:37:12 crc kubenswrapper[4881]: I1211 08:37:12.037959 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/b515a685-da3e-4d92-a8e5-60561e9de83f-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"b515a685-da3e-4d92-a8e5-60561e9de83f\") " pod="openstack/prometheus-metric-storage-0" Dec 11 08:37:12 crc kubenswrapper[4881]: I1211 08:37:12.038068 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"prometheus-metric-storage-0\" (UID: \"b515a685-da3e-4d92-a8e5-60561e9de83f\") " pod="openstack/prometheus-metric-storage-0" Dec 11 08:37:12 crc kubenswrapper[4881]: I1211 08:37:12.038136 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/aa55caf4-597c-4e33-8c51-b14753998ad2-config\") pod \"dnsmasq-dns-55fff446b9-5s4v5\" (UID: \"aa55caf4-597c-4e33-8c51-b14753998ad2\") " pod="openstack/dnsmasq-dns-55fff446b9-5s4v5" Dec 11 08:37:12 crc kubenswrapper[4881]: I1211 08:37:12.038177 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/b515a685-da3e-4d92-a8e5-60561e9de83f-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"b515a685-da3e-4d92-a8e5-60561e9de83f\") " pod="openstack/prometheus-metric-storage-0" Dec 11 08:37:12 crc kubenswrapper[4881]: I1211 08:37:12.038271 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/09761130-7785-4617-b078-ea94c099676a-combined-ca-bundle\") pod \"keystone-bootstrap-vrsnd\" (UID: \"09761130-7785-4617-b078-ea94c099676a\") " pod="openstack/keystone-bootstrap-vrsnd" Dec 11 08:37:12 crc kubenswrapper[4881]: I1211 08:37:12.038305 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/b515a685-da3e-4d92-a8e5-60561e9de83f-config\") pod \"prometheus-metric-storage-0\" (UID: \"b515a685-da3e-4d92-a8e5-60561e9de83f\") " pod="openstack/prometheus-metric-storage-0" Dec 11 08:37:12 crc kubenswrapper[4881]: I1211 08:37:12.038356 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/09761130-7785-4617-b078-ea94c099676a-fernet-keys\") pod \"keystone-bootstrap-vrsnd\" (UID: \"09761130-7785-4617-b078-ea94c099676a\") " pod="openstack/keystone-bootstrap-vrsnd" Dec 11 08:37:12 crc kubenswrapper[4881]: I1211 08:37:12.038682 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/aa55caf4-597c-4e33-8c51-b14753998ad2-dns-svc\") pod \"dnsmasq-dns-55fff446b9-5s4v5\" (UID: \"aa55caf4-597c-4e33-8c51-b14753998ad2\") " pod="openstack/dnsmasq-dns-55fff446b9-5s4v5" Dec 11 08:37:12 crc kubenswrapper[4881]: I1211 08:37:12.038681 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/aa55caf4-597c-4e33-8c51-b14753998ad2-ovsdbserver-sb\") pod \"dnsmasq-dns-55fff446b9-5s4v5\" (UID: \"aa55caf4-597c-4e33-8c51-b14753998ad2\") " pod="openstack/dnsmasq-dns-55fff446b9-5s4v5" Dec 11 08:37:12 crc kubenswrapper[4881]: I1211 08:37:12.038923 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/aa55caf4-597c-4e33-8c51-b14753998ad2-dns-swift-storage-0\") pod \"dnsmasq-dns-55fff446b9-5s4v5\" (UID: \"aa55caf4-597c-4e33-8c51-b14753998ad2\") " pod="openstack/dnsmasq-dns-55fff446b9-5s4v5" Dec 11 08:37:12 crc kubenswrapper[4881]: I1211 08:37:12.039084 4881 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"prometheus-metric-storage-0\" (UID: \"b515a685-da3e-4d92-a8e5-60561e9de83f\") device mount path \"/mnt/openstack/pv08\"" pod="openstack/prometheus-metric-storage-0" Dec 11 08:37:12 crc kubenswrapper[4881]: I1211 08:37:12.039257 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/b515a685-da3e-4d92-a8e5-60561e9de83f-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"b515a685-da3e-4d92-a8e5-60561e9de83f\") " pod="openstack/prometheus-metric-storage-0" Dec 11 08:37:12 crc kubenswrapper[4881]: I1211 08:37:12.045658 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/aa55caf4-597c-4e33-8c51-b14753998ad2-config\") pod \"dnsmasq-dns-55fff446b9-5s4v5\" (UID: \"aa55caf4-597c-4e33-8c51-b14753998ad2\") " pod="openstack/dnsmasq-dns-55fff446b9-5s4v5" Dec 11 08:37:12 crc kubenswrapper[4881]: I1211 08:37:12.048047 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/aa55caf4-597c-4e33-8c51-b14753998ad2-ovsdbserver-nb\") pod \"dnsmasq-dns-55fff446b9-5s4v5\" (UID: \"aa55caf4-597c-4e33-8c51-b14753998ad2\") " pod="openstack/dnsmasq-dns-55fff446b9-5s4v5" Dec 11 08:37:12 crc kubenswrapper[4881]: I1211 08:37:12.048476 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b515a685-da3e-4d92-a8e5-60561e9de83f-secret-combined-ca-bundle\") pod \"prometheus-metric-storage-0\" (UID: \"b515a685-da3e-4d92-a8e5-60561e9de83f\") " pod="openstack/prometheus-metric-storage-0" Dec 11 08:37:12 crc kubenswrapper[4881]: I1211 08:37:12.051065 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\" (UniqueName: \"kubernetes.io/secret/b515a685-da3e-4d92-a8e5-60561e9de83f-web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"b515a685-da3e-4d92-a8e5-60561e9de83f\") " pod="openstack/prometheus-metric-storage-0" Dec 11 08:37:12 crc kubenswrapper[4881]: I1211 08:37:12.052224 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\" (UniqueName: \"kubernetes.io/secret/b515a685-da3e-4d92-a8e5-60561e9de83f-web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"b515a685-da3e-4d92-a8e5-60561e9de83f\") " pod="openstack/prometheus-metric-storage-0" Dec 11 08:37:12 crc kubenswrapper[4881]: I1211 08:37:12.053818 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/b515a685-da3e-4d92-a8e5-60561e9de83f-config\") pod \"prometheus-metric-storage-0\" (UID: \"b515a685-da3e-4d92-a8e5-60561e9de83f\") " pod="openstack/prometheus-metric-storage-0" Dec 11 08:37:12 crc kubenswrapper[4881]: I1211 08:37:12.057041 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/b515a685-da3e-4d92-a8e5-60561e9de83f-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"b515a685-da3e-4d92-a8e5-60561e9de83f\") " pod="openstack/prometheus-metric-storage-0" Dec 11 08:37:12 crc kubenswrapper[4881]: I1211 08:37:12.058419 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/b515a685-da3e-4d92-a8e5-60561e9de83f-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"b515a685-da3e-4d92-a8e5-60561e9de83f\") " pod="openstack/prometheus-metric-storage-0" Dec 11 08:37:12 crc kubenswrapper[4881]: I1211 08:37:12.059899 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/b515a685-da3e-4d92-a8e5-60561e9de83f-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"b515a685-da3e-4d92-a8e5-60561e9de83f\") " pod="openstack/prometheus-metric-storage-0" Dec 11 08:37:12 crc kubenswrapper[4881]: I1211 08:37:12.068525 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hgvwn\" (UniqueName: \"kubernetes.io/projected/b515a685-da3e-4d92-a8e5-60561e9de83f-kube-api-access-hgvwn\") pod \"prometheus-metric-storage-0\" (UID: \"b515a685-da3e-4d92-a8e5-60561e9de83f\") " pod="openstack/prometheus-metric-storage-0" Dec 11 08:37:12 crc kubenswrapper[4881]: I1211 08:37:12.070185 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rmgbw\" (UniqueName: \"kubernetes.io/projected/aa55caf4-597c-4e33-8c51-b14753998ad2-kube-api-access-rmgbw\") pod \"dnsmasq-dns-55fff446b9-5s4v5\" (UID: \"aa55caf4-597c-4e33-8c51-b14753998ad2\") " pod="openstack/dnsmasq-dns-55fff446b9-5s4v5" Dec 11 08:37:12 crc kubenswrapper[4881]: I1211 08:37:12.078084 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/b515a685-da3e-4d92-a8e5-60561e9de83f-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"b515a685-da3e-4d92-a8e5-60561e9de83f\") " pod="openstack/prometheus-metric-storage-0" Dec 11 08:37:12 crc kubenswrapper[4881]: I1211 08:37:12.140771 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/09761130-7785-4617-b078-ea94c099676a-combined-ca-bundle\") pod \"keystone-bootstrap-vrsnd\" (UID: \"09761130-7785-4617-b078-ea94c099676a\") " pod="openstack/keystone-bootstrap-vrsnd" Dec 11 08:37:12 crc kubenswrapper[4881]: I1211 08:37:12.140834 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/09761130-7785-4617-b078-ea94c099676a-fernet-keys\") pod \"keystone-bootstrap-vrsnd\" (UID: \"09761130-7785-4617-b078-ea94c099676a\") " pod="openstack/keystone-bootstrap-vrsnd" Dec 11 08:37:12 crc kubenswrapper[4881]: I1211 08:37:12.140898 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ghrkf\" (UniqueName: \"kubernetes.io/projected/09761130-7785-4617-b078-ea94c099676a-kube-api-access-ghrkf\") pod \"keystone-bootstrap-vrsnd\" (UID: \"09761130-7785-4617-b078-ea94c099676a\") " pod="openstack/keystone-bootstrap-vrsnd" Dec 11 08:37:12 crc kubenswrapper[4881]: I1211 08:37:12.140989 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/09761130-7785-4617-b078-ea94c099676a-credential-keys\") pod \"keystone-bootstrap-vrsnd\" (UID: \"09761130-7785-4617-b078-ea94c099676a\") " pod="openstack/keystone-bootstrap-vrsnd" Dec 11 08:37:12 crc kubenswrapper[4881]: I1211 08:37:12.141065 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/09761130-7785-4617-b078-ea94c099676a-scripts\") pod \"keystone-bootstrap-vrsnd\" (UID: \"09761130-7785-4617-b078-ea94c099676a\") " pod="openstack/keystone-bootstrap-vrsnd" Dec 11 08:37:12 crc kubenswrapper[4881]: I1211 08:37:12.141119 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/09761130-7785-4617-b078-ea94c099676a-config-data\") pod \"keystone-bootstrap-vrsnd\" (UID: \"09761130-7785-4617-b078-ea94c099676a\") " pod="openstack/keystone-bootstrap-vrsnd" Dec 11 08:37:12 crc kubenswrapper[4881]: I1211 08:37:12.146700 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/09761130-7785-4617-b078-ea94c099676a-scripts\") pod \"keystone-bootstrap-vrsnd\" (UID: \"09761130-7785-4617-b078-ea94c099676a\") " pod="openstack/keystone-bootstrap-vrsnd" Dec 11 08:37:12 crc kubenswrapper[4881]: I1211 08:37:12.148324 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/09761130-7785-4617-b078-ea94c099676a-credential-keys\") pod \"keystone-bootstrap-vrsnd\" (UID: \"09761130-7785-4617-b078-ea94c099676a\") " pod="openstack/keystone-bootstrap-vrsnd" Dec 11 08:37:12 crc kubenswrapper[4881]: I1211 08:37:12.148814 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/09761130-7785-4617-b078-ea94c099676a-config-data\") pod \"keystone-bootstrap-vrsnd\" (UID: \"09761130-7785-4617-b078-ea94c099676a\") " pod="openstack/keystone-bootstrap-vrsnd" Dec 11 08:37:12 crc kubenswrapper[4881]: I1211 08:37:12.160566 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-55fff446b9-5s4v5" Dec 11 08:37:12 crc kubenswrapper[4881]: I1211 08:37:12.174709 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/09761130-7785-4617-b078-ea94c099676a-fernet-keys\") pod \"keystone-bootstrap-vrsnd\" (UID: \"09761130-7785-4617-b078-ea94c099676a\") " pod="openstack/keystone-bootstrap-vrsnd" Dec 11 08:37:12 crc kubenswrapper[4881]: I1211 08:37:12.174941 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ghrkf\" (UniqueName: \"kubernetes.io/projected/09761130-7785-4617-b078-ea94c099676a-kube-api-access-ghrkf\") pod \"keystone-bootstrap-vrsnd\" (UID: \"09761130-7785-4617-b078-ea94c099676a\") " pod="openstack/keystone-bootstrap-vrsnd" Dec 11 08:37:12 crc kubenswrapper[4881]: I1211 08:37:12.175061 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/09761130-7785-4617-b078-ea94c099676a-combined-ca-bundle\") pod \"keystone-bootstrap-vrsnd\" (UID: \"09761130-7785-4617-b078-ea94c099676a\") " pod="openstack/keystone-bootstrap-vrsnd" Dec 11 08:37:12 crc kubenswrapper[4881]: I1211 08:37:12.197895 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"prometheus-metric-storage-0\" (UID: \"b515a685-da3e-4d92-a8e5-60561e9de83f\") " pod="openstack/prometheus-metric-storage-0" Dec 11 08:37:12 crc kubenswrapper[4881]: I1211 08:37:12.240133 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-55fff446b9-5s4v5"] Dec 11 08:37:12 crc kubenswrapper[4881]: I1211 08:37:12.279367 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-76fcf4b695-628mp"] Dec 11 08:37:12 crc kubenswrapper[4881]: I1211 08:37:12.280045 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-vrsnd" Dec 11 08:37:12 crc kubenswrapper[4881]: I1211 08:37:12.281814 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-76fcf4b695-628mp" Dec 11 08:37:12 crc kubenswrapper[4881]: I1211 08:37:12.291858 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-db-sync-cb9l4"] Dec 11 08:37:12 crc kubenswrapper[4881]: I1211 08:37:12.293787 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-cb9l4" Dec 11 08:37:12 crc kubenswrapper[4881]: I1211 08:37:12.301978 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-placement-dockercfg-mm49w" Dec 11 08:37:12 crc kubenswrapper[4881]: I1211 08:37:12.302195 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-config-data" Dec 11 08:37:12 crc kubenswrapper[4881]: I1211 08:37:12.302366 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-scripts" Dec 11 08:37:12 crc kubenswrapper[4881]: I1211 08:37:12.340322 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-76fcf4b695-628mp"] Dec 11 08:37:12 crc kubenswrapper[4881]: I1211 08:37:12.350597 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-sync-cb9l4"] Dec 11 08:37:12 crc kubenswrapper[4881]: I1211 08:37:12.390939 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Dec 11 08:37:12 crc kubenswrapper[4881]: I1211 08:37:12.449742 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/4414ed0e-d99f-4f9b-8888-e73cf1d47834-dns-swift-storage-0\") pod \"dnsmasq-dns-76fcf4b695-628mp\" (UID: \"4414ed0e-d99f-4f9b-8888-e73cf1d47834\") " pod="openstack/dnsmasq-dns-76fcf4b695-628mp" Dec 11 08:37:12 crc kubenswrapper[4881]: I1211 08:37:12.449800 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4414ed0e-d99f-4f9b-8888-e73cf1d47834-dns-svc\") pod \"dnsmasq-dns-76fcf4b695-628mp\" (UID: \"4414ed0e-d99f-4f9b-8888-e73cf1d47834\") " pod="openstack/dnsmasq-dns-76fcf4b695-628mp" Dec 11 08:37:12 crc kubenswrapper[4881]: I1211 08:37:12.449825 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4406be5c-0e4c-40ff-ac0f-4da87b36b145-scripts\") pod \"placement-db-sync-cb9l4\" (UID: \"4406be5c-0e4c-40ff-ac0f-4da87b36b145\") " pod="openstack/placement-db-sync-cb9l4" Dec 11 08:37:12 crc kubenswrapper[4881]: I1211 08:37:12.449882 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qzxn4\" (UniqueName: \"kubernetes.io/projected/4406be5c-0e4c-40ff-ac0f-4da87b36b145-kube-api-access-qzxn4\") pod \"placement-db-sync-cb9l4\" (UID: \"4406be5c-0e4c-40ff-ac0f-4da87b36b145\") " pod="openstack/placement-db-sync-cb9l4" Dec 11 08:37:12 crc kubenswrapper[4881]: I1211 08:37:12.449955 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4406be5c-0e4c-40ff-ac0f-4da87b36b145-combined-ca-bundle\") pod \"placement-db-sync-cb9l4\" (UID: \"4406be5c-0e4c-40ff-ac0f-4da87b36b145\") " pod="openstack/placement-db-sync-cb9l4" Dec 11 08:37:12 crc kubenswrapper[4881]: I1211 08:37:12.449972 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lz9lz\" (UniqueName: \"kubernetes.io/projected/4414ed0e-d99f-4f9b-8888-e73cf1d47834-kube-api-access-lz9lz\") pod \"dnsmasq-dns-76fcf4b695-628mp\" (UID: \"4414ed0e-d99f-4f9b-8888-e73cf1d47834\") " pod="openstack/dnsmasq-dns-76fcf4b695-628mp" Dec 11 08:37:12 crc kubenswrapper[4881]: I1211 08:37:12.450100 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4406be5c-0e4c-40ff-ac0f-4da87b36b145-logs\") pod \"placement-db-sync-cb9l4\" (UID: \"4406be5c-0e4c-40ff-ac0f-4da87b36b145\") " pod="openstack/placement-db-sync-cb9l4" Dec 11 08:37:12 crc kubenswrapper[4881]: I1211 08:37:12.450156 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4406be5c-0e4c-40ff-ac0f-4da87b36b145-config-data\") pod \"placement-db-sync-cb9l4\" (UID: \"4406be5c-0e4c-40ff-ac0f-4da87b36b145\") " pod="openstack/placement-db-sync-cb9l4" Dec 11 08:37:12 crc kubenswrapper[4881]: I1211 08:37:12.450265 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/4414ed0e-d99f-4f9b-8888-e73cf1d47834-ovsdbserver-nb\") pod \"dnsmasq-dns-76fcf4b695-628mp\" (UID: \"4414ed0e-d99f-4f9b-8888-e73cf1d47834\") " pod="openstack/dnsmasq-dns-76fcf4b695-628mp" Dec 11 08:37:12 crc kubenswrapper[4881]: I1211 08:37:12.450507 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/4414ed0e-d99f-4f9b-8888-e73cf1d47834-ovsdbserver-sb\") pod \"dnsmasq-dns-76fcf4b695-628mp\" (UID: \"4414ed0e-d99f-4f9b-8888-e73cf1d47834\") " pod="openstack/dnsmasq-dns-76fcf4b695-628mp" Dec 11 08:37:12 crc kubenswrapper[4881]: I1211 08:37:12.450635 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4414ed0e-d99f-4f9b-8888-e73cf1d47834-config\") pod \"dnsmasq-dns-76fcf4b695-628mp\" (UID: \"4414ed0e-d99f-4f9b-8888-e73cf1d47834\") " pod="openstack/dnsmasq-dns-76fcf4b695-628mp" Dec 11 08:37:12 crc kubenswrapper[4881]: I1211 08:37:12.554203 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qzxn4\" (UniqueName: \"kubernetes.io/projected/4406be5c-0e4c-40ff-ac0f-4da87b36b145-kube-api-access-qzxn4\") pod \"placement-db-sync-cb9l4\" (UID: \"4406be5c-0e4c-40ff-ac0f-4da87b36b145\") " pod="openstack/placement-db-sync-cb9l4" Dec 11 08:37:12 crc kubenswrapper[4881]: I1211 08:37:12.554702 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4406be5c-0e4c-40ff-ac0f-4da87b36b145-combined-ca-bundle\") pod \"placement-db-sync-cb9l4\" (UID: \"4406be5c-0e4c-40ff-ac0f-4da87b36b145\") " pod="openstack/placement-db-sync-cb9l4" Dec 11 08:37:12 crc kubenswrapper[4881]: I1211 08:37:12.554727 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lz9lz\" (UniqueName: \"kubernetes.io/projected/4414ed0e-d99f-4f9b-8888-e73cf1d47834-kube-api-access-lz9lz\") pod \"dnsmasq-dns-76fcf4b695-628mp\" (UID: \"4414ed0e-d99f-4f9b-8888-e73cf1d47834\") " pod="openstack/dnsmasq-dns-76fcf4b695-628mp" Dec 11 08:37:12 crc kubenswrapper[4881]: I1211 08:37:12.554790 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4406be5c-0e4c-40ff-ac0f-4da87b36b145-logs\") pod \"placement-db-sync-cb9l4\" (UID: \"4406be5c-0e4c-40ff-ac0f-4da87b36b145\") " pod="openstack/placement-db-sync-cb9l4" Dec 11 08:37:12 crc kubenswrapper[4881]: I1211 08:37:12.554819 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4406be5c-0e4c-40ff-ac0f-4da87b36b145-config-data\") pod \"placement-db-sync-cb9l4\" (UID: \"4406be5c-0e4c-40ff-ac0f-4da87b36b145\") " pod="openstack/placement-db-sync-cb9l4" Dec 11 08:37:12 crc kubenswrapper[4881]: I1211 08:37:12.554891 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/4414ed0e-d99f-4f9b-8888-e73cf1d47834-ovsdbserver-nb\") pod \"dnsmasq-dns-76fcf4b695-628mp\" (UID: \"4414ed0e-d99f-4f9b-8888-e73cf1d47834\") " pod="openstack/dnsmasq-dns-76fcf4b695-628mp" Dec 11 08:37:12 crc kubenswrapper[4881]: I1211 08:37:12.554973 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/4414ed0e-d99f-4f9b-8888-e73cf1d47834-ovsdbserver-sb\") pod \"dnsmasq-dns-76fcf4b695-628mp\" (UID: \"4414ed0e-d99f-4f9b-8888-e73cf1d47834\") " pod="openstack/dnsmasq-dns-76fcf4b695-628mp" Dec 11 08:37:12 crc kubenswrapper[4881]: I1211 08:37:12.555057 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4414ed0e-d99f-4f9b-8888-e73cf1d47834-config\") pod \"dnsmasq-dns-76fcf4b695-628mp\" (UID: \"4414ed0e-d99f-4f9b-8888-e73cf1d47834\") " pod="openstack/dnsmasq-dns-76fcf4b695-628mp" Dec 11 08:37:12 crc kubenswrapper[4881]: I1211 08:37:12.555117 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/4414ed0e-d99f-4f9b-8888-e73cf1d47834-dns-swift-storage-0\") pod \"dnsmasq-dns-76fcf4b695-628mp\" (UID: \"4414ed0e-d99f-4f9b-8888-e73cf1d47834\") " pod="openstack/dnsmasq-dns-76fcf4b695-628mp" Dec 11 08:37:12 crc kubenswrapper[4881]: I1211 08:37:12.555165 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4414ed0e-d99f-4f9b-8888-e73cf1d47834-dns-svc\") pod \"dnsmasq-dns-76fcf4b695-628mp\" (UID: \"4414ed0e-d99f-4f9b-8888-e73cf1d47834\") " pod="openstack/dnsmasq-dns-76fcf4b695-628mp" Dec 11 08:37:12 crc kubenswrapper[4881]: I1211 08:37:12.555182 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4406be5c-0e4c-40ff-ac0f-4da87b36b145-scripts\") pod \"placement-db-sync-cb9l4\" (UID: \"4406be5c-0e4c-40ff-ac0f-4da87b36b145\") " pod="openstack/placement-db-sync-cb9l4" Dec 11 08:37:12 crc kubenswrapper[4881]: I1211 08:37:12.556609 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/4414ed0e-d99f-4f9b-8888-e73cf1d47834-ovsdbserver-nb\") pod \"dnsmasq-dns-76fcf4b695-628mp\" (UID: \"4414ed0e-d99f-4f9b-8888-e73cf1d47834\") " pod="openstack/dnsmasq-dns-76fcf4b695-628mp" Dec 11 08:37:12 crc kubenswrapper[4881]: I1211 08:37:12.557494 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/4414ed0e-d99f-4f9b-8888-e73cf1d47834-ovsdbserver-sb\") pod \"dnsmasq-dns-76fcf4b695-628mp\" (UID: \"4414ed0e-d99f-4f9b-8888-e73cf1d47834\") " pod="openstack/dnsmasq-dns-76fcf4b695-628mp" Dec 11 08:37:12 crc kubenswrapper[4881]: I1211 08:37:12.559639 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/4414ed0e-d99f-4f9b-8888-e73cf1d47834-dns-swift-storage-0\") pod \"dnsmasq-dns-76fcf4b695-628mp\" (UID: \"4414ed0e-d99f-4f9b-8888-e73cf1d47834\") " pod="openstack/dnsmasq-dns-76fcf4b695-628mp" Dec 11 08:37:12 crc kubenswrapper[4881]: I1211 08:37:12.560808 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4414ed0e-d99f-4f9b-8888-e73cf1d47834-config\") pod \"dnsmasq-dns-76fcf4b695-628mp\" (UID: \"4414ed0e-d99f-4f9b-8888-e73cf1d47834\") " pod="openstack/dnsmasq-dns-76fcf4b695-628mp" Dec 11 08:37:12 crc kubenswrapper[4881]: I1211 08:37:12.561896 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4414ed0e-d99f-4f9b-8888-e73cf1d47834-dns-svc\") pod \"dnsmasq-dns-76fcf4b695-628mp\" (UID: \"4414ed0e-d99f-4f9b-8888-e73cf1d47834\") " pod="openstack/dnsmasq-dns-76fcf4b695-628mp" Dec 11 08:37:12 crc kubenswrapper[4881]: I1211 08:37:12.566404 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4406be5c-0e4c-40ff-ac0f-4da87b36b145-logs\") pod \"placement-db-sync-cb9l4\" (UID: \"4406be5c-0e4c-40ff-ac0f-4da87b36b145\") " pod="openstack/placement-db-sync-cb9l4" Dec 11 08:37:12 crc kubenswrapper[4881]: I1211 08:37:12.580953 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4406be5c-0e4c-40ff-ac0f-4da87b36b145-scripts\") pod \"placement-db-sync-cb9l4\" (UID: \"4406be5c-0e4c-40ff-ac0f-4da87b36b145\") " pod="openstack/placement-db-sync-cb9l4" Dec 11 08:37:12 crc kubenswrapper[4881]: I1211 08:37:12.581695 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4406be5c-0e4c-40ff-ac0f-4da87b36b145-combined-ca-bundle\") pod \"placement-db-sync-cb9l4\" (UID: \"4406be5c-0e4c-40ff-ac0f-4da87b36b145\") " pod="openstack/placement-db-sync-cb9l4" Dec 11 08:37:12 crc kubenswrapper[4881]: I1211 08:37:12.581969 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qzxn4\" (UniqueName: \"kubernetes.io/projected/4406be5c-0e4c-40ff-ac0f-4da87b36b145-kube-api-access-qzxn4\") pod \"placement-db-sync-cb9l4\" (UID: \"4406be5c-0e4c-40ff-ac0f-4da87b36b145\") " pod="openstack/placement-db-sync-cb9l4" Dec 11 08:37:12 crc kubenswrapper[4881]: I1211 08:37:12.586195 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4406be5c-0e4c-40ff-ac0f-4da87b36b145-config-data\") pod \"placement-db-sync-cb9l4\" (UID: \"4406be5c-0e4c-40ff-ac0f-4da87b36b145\") " pod="openstack/placement-db-sync-cb9l4" Dec 11 08:37:12 crc kubenswrapper[4881]: I1211 08:37:12.589391 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lz9lz\" (UniqueName: \"kubernetes.io/projected/4414ed0e-d99f-4f9b-8888-e73cf1d47834-kube-api-access-lz9lz\") pod \"dnsmasq-dns-76fcf4b695-628mp\" (UID: \"4414ed0e-d99f-4f9b-8888-e73cf1d47834\") " pod="openstack/dnsmasq-dns-76fcf4b695-628mp" Dec 11 08:37:12 crc kubenswrapper[4881]: I1211 08:37:12.630637 4881 generic.go:334] "Generic (PLEG): container finished" podID="be7726a5-588c-4057-823d-6a6c51348f2c" containerID="4e55705f98615e6841e69340d70998afd92096e009f293d65b120d6e3bfdf3f3" exitCode=0 Dec 11 08:37:12 crc kubenswrapper[4881]: I1211 08:37:12.630712 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-c7b6-account-create-sbjdl" event={"ID":"be7726a5-588c-4057-823d-6a6c51348f2c","Type":"ContainerDied","Data":"4e55705f98615e6841e69340d70998afd92096e009f293d65b120d6e3bfdf3f3"} Dec 11 08:37:12 crc kubenswrapper[4881]: I1211 08:37:12.648889 4881 generic.go:334] "Generic (PLEG): container finished" podID="33f9ffca-ca21-41c8-b52e-5a5c7c786f4d" containerID="16b752fc75edd3f6fad9ee6e5c2ea00e27a43acb7a9a7a57685c4e2f9cba1ad1" exitCode=0 Dec 11 08:37:12 crc kubenswrapper[4881]: I1211 08:37:12.648983 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-1cc4-account-create-mq9q7" event={"ID":"33f9ffca-ca21-41c8-b52e-5a5c7c786f4d","Type":"ContainerDied","Data":"16b752fc75edd3f6fad9ee6e5c2ea00e27a43acb7a9a7a57685c4e2f9cba1ad1"} Dec 11 08:37:12 crc kubenswrapper[4881]: I1211 08:37:12.669711 4881 generic.go:334] "Generic (PLEG): container finished" podID="2fbc6a44-ebbf-4a77-bad1-53d78b09d292" containerID="1748598d01f3fc7123a05fdfb8c58dbc2731be3bb0dc3a1dff09fd1c0bf642db" exitCode=0 Dec 11 08:37:12 crc kubenswrapper[4881]: I1211 08:37:12.670220 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-6956-account-create-xhfnq" event={"ID":"2fbc6a44-ebbf-4a77-bad1-53d78b09d292","Type":"ContainerDied","Data":"1748598d01f3fc7123a05fdfb8c58dbc2731be3bb0dc3a1dff09fd1c0bf642db"} Dec 11 08:37:12 crc kubenswrapper[4881]: I1211 08:37:12.703687 4881 generic.go:334] "Generic (PLEG): container finished" podID="11fc6ffa-3c79-4409-b646-1e658f40120e" containerID="d7f250f05ad02b6d4c03f2f429d3e2ae61a035770d5d337eef1e949528c52c73" exitCode=0 Dec 11 08:37:12 crc kubenswrapper[4881]: I1211 08:37:12.704039 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-95cf-account-create-9q62w" event={"ID":"11fc6ffa-3c79-4409-b646-1e658f40120e","Type":"ContainerDied","Data":"d7f250f05ad02b6d4c03f2f429d3e2ae61a035770d5d337eef1e949528c52c73"} Dec 11 08:37:12 crc kubenswrapper[4881]: I1211 08:37:12.742736 4881 generic.go:334] "Generic (PLEG): container finished" podID="0f3dbda2-e652-483b-9ce4-d3bf05516f7b" containerID="ea4a69f217c879e0f90e6d0019162ddd7630229a61961e2190e17070080738fa" exitCode=0 Dec 11 08:37:12 crc kubenswrapper[4881]: I1211 08:37:12.743741 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-dd28-account-create-v5xvr" event={"ID":"0f3dbda2-e652-483b-9ce4-d3bf05516f7b","Type":"ContainerDied","Data":"ea4a69f217c879e0f90e6d0019162ddd7630229a61961e2190e17070080738fa"} Dec 11 08:37:12 crc kubenswrapper[4881]: I1211 08:37:12.797089 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-76fcf4b695-628mp" Dec 11 08:37:12 crc kubenswrapper[4881]: I1211 08:37:12.838492 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-cb9l4" Dec 11 08:37:12 crc kubenswrapper[4881]: I1211 08:37:12.956210 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-55fff446b9-5s4v5"] Dec 11 08:37:13 crc kubenswrapper[4881]: I1211 08:37:13.026115 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1206112a-8438-4dcf-9cad-a3e38790a344" path="/var/lib/kubelet/pods/1206112a-8438-4dcf-9cad-a3e38790a344/volumes" Dec 11 08:37:13 crc kubenswrapper[4881]: I1211 08:37:13.027610 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="affe9e76-f06d-45d9-93f1-7f00db52c82d" path="/var/lib/kubelet/pods/affe9e76-f06d-45d9-93f1-7f00db52c82d/volumes" Dec 11 08:37:13 crc kubenswrapper[4881]: I1211 08:37:13.249215 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/prometheus-metric-storage-0"] Dec 11 08:37:13 crc kubenswrapper[4881]: I1211 08:37:13.288252 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-vrsnd"] Dec 11 08:37:13 crc kubenswrapper[4881]: W1211 08:37:13.532957 4881 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4414ed0e_d99f_4f9b_8888_e73cf1d47834.slice/crio-fa2a4b103870d6c0d0cfc1f5c6936516d910176698a6ac649a93235deaeace76 WatchSource:0}: Error finding container fa2a4b103870d6c0d0cfc1f5c6936516d910176698a6ac649a93235deaeace76: Status 404 returned error can't find the container with id fa2a4b103870d6c0d0cfc1f5c6936516d910176698a6ac649a93235deaeace76 Dec 11 08:37:13 crc kubenswrapper[4881]: I1211 08:37:13.533220 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-76fcf4b695-628mp"] Dec 11 08:37:13 crc kubenswrapper[4881]: W1211 08:37:13.581376 4881 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4406be5c_0e4c_40ff_ac0f_4da87b36b145.slice/crio-f47189dad2695138b96b3e9508efece25d6ff0bed68200235fd23c428b85a229 WatchSource:0}: Error finding container f47189dad2695138b96b3e9508efece25d6ff0bed68200235fd23c428b85a229: Status 404 returned error can't find the container with id f47189dad2695138b96b3e9508efece25d6ff0bed68200235fd23c428b85a229 Dec 11 08:37:13 crc kubenswrapper[4881]: I1211 08:37:13.581592 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-sync-cb9l4"] Dec 11 08:37:13 crc kubenswrapper[4881]: I1211 08:37:13.761655 4881 generic.go:334] "Generic (PLEG): container finished" podID="aa55caf4-597c-4e33-8c51-b14753998ad2" containerID="89784fb5031ebc5aec662ee35499797eb1c2948709411984da181134a894bbe2" exitCode=0 Dec 11 08:37:13 crc kubenswrapper[4881]: I1211 08:37:13.761789 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-55fff446b9-5s4v5" event={"ID":"aa55caf4-597c-4e33-8c51-b14753998ad2","Type":"ContainerDied","Data":"89784fb5031ebc5aec662ee35499797eb1c2948709411984da181134a894bbe2"} Dec 11 08:37:13 crc kubenswrapper[4881]: I1211 08:37:13.761825 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-55fff446b9-5s4v5" event={"ID":"aa55caf4-597c-4e33-8c51-b14753998ad2","Type":"ContainerStarted","Data":"1cc40edb1229f74313521cc815994982757bf102c7157a99604e9617cbcc667e"} Dec 11 08:37:13 crc kubenswrapper[4881]: I1211 08:37:13.763729 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"b515a685-da3e-4d92-a8e5-60561e9de83f","Type":"ContainerStarted","Data":"0b636c62c9e655afe0a5beacf8856f9d053b54205e93d17fa1aafcab9c76db6f"} Dec 11 08:37:13 crc kubenswrapper[4881]: I1211 08:37:13.765493 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-76fcf4b695-628mp" event={"ID":"4414ed0e-d99f-4f9b-8888-e73cf1d47834","Type":"ContainerStarted","Data":"fa2a4b103870d6c0d0cfc1f5c6936516d910176698a6ac649a93235deaeace76"} Dec 11 08:37:13 crc kubenswrapper[4881]: I1211 08:37:13.767189 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-vrsnd" event={"ID":"09761130-7785-4617-b078-ea94c099676a","Type":"ContainerStarted","Data":"536e1cf2200e922961c97ce57ed7f2e215e8235c81203aa0de42adb0f7f32950"} Dec 11 08:37:13 crc kubenswrapper[4881]: I1211 08:37:13.768921 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-cb9l4" event={"ID":"4406be5c-0e4c-40ff-ac0f-4da87b36b145","Type":"ContainerStarted","Data":"f47189dad2695138b96b3e9508efece25d6ff0bed68200235fd23c428b85a229"} Dec 11 08:37:14 crc kubenswrapper[4881]: I1211 08:37:14.176218 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-c7b6-account-create-sbjdl" Dec 11 08:37:14 crc kubenswrapper[4881]: I1211 08:37:14.324411 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gs6tc\" (UniqueName: \"kubernetes.io/projected/be7726a5-588c-4057-823d-6a6c51348f2c-kube-api-access-gs6tc\") pod \"be7726a5-588c-4057-823d-6a6c51348f2c\" (UID: \"be7726a5-588c-4057-823d-6a6c51348f2c\") " Dec 11 08:37:14 crc kubenswrapper[4881]: I1211 08:37:14.358623 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/be7726a5-588c-4057-823d-6a6c51348f2c-kube-api-access-gs6tc" (OuterVolumeSpecName: "kube-api-access-gs6tc") pod "be7726a5-588c-4057-823d-6a6c51348f2c" (UID: "be7726a5-588c-4057-823d-6a6c51348f2c"). InnerVolumeSpecName "kube-api-access-gs6tc". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:37:14 crc kubenswrapper[4881]: I1211 08:37:14.427287 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gs6tc\" (UniqueName: \"kubernetes.io/projected/be7726a5-588c-4057-823d-6a6c51348f2c-kube-api-access-gs6tc\") on node \"crc\" DevicePath \"\"" Dec 11 08:37:14 crc kubenswrapper[4881]: I1211 08:37:14.691741 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-55fff446b9-5s4v5" Dec 11 08:37:14 crc kubenswrapper[4881]: I1211 08:37:14.720842 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-6956-account-create-xhfnq" Dec 11 08:37:14 crc kubenswrapper[4881]: I1211 08:37:14.738633 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-95cf-account-create-9q62w" Dec 11 08:37:14 crc kubenswrapper[4881]: I1211 08:37:14.742922 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-dd28-account-create-v5xvr" Dec 11 08:37:14 crc kubenswrapper[4881]: I1211 08:37:14.768081 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-1cc4-account-create-mq9q7" Dec 11 08:37:14 crc kubenswrapper[4881]: I1211 08:37:14.809324 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-95cf-account-create-9q62w" Dec 11 08:37:14 crc kubenswrapper[4881]: I1211 08:37:14.809321 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-95cf-account-create-9q62w" event={"ID":"11fc6ffa-3c79-4409-b646-1e658f40120e","Type":"ContainerDied","Data":"ac03fcf64e576168832b8c29f176fb1176da71a040d1c13da31cdb206e6e824e"} Dec 11 08:37:14 crc kubenswrapper[4881]: I1211 08:37:14.809459 4881 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ac03fcf64e576168832b8c29f176fb1176da71a040d1c13da31cdb206e6e824e" Dec 11 08:37:14 crc kubenswrapper[4881]: I1211 08:37:14.817120 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-dd28-account-create-v5xvr" event={"ID":"0f3dbda2-e652-483b-9ce4-d3bf05516f7b","Type":"ContainerDied","Data":"5e91098bef8cf1d3c128d1060cb453a084f991c9b0c59b463c24f2e85f939f2a"} Dec 11 08:37:14 crc kubenswrapper[4881]: I1211 08:37:14.817165 4881 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5e91098bef8cf1d3c128d1060cb453a084f991c9b0c59b463c24f2e85f939f2a" Dec 11 08:37:14 crc kubenswrapper[4881]: I1211 08:37:14.817242 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-dd28-account-create-v5xvr" Dec 11 08:37:14 crc kubenswrapper[4881]: I1211 08:37:14.821393 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-c7b6-account-create-sbjdl" event={"ID":"be7726a5-588c-4057-823d-6a6c51348f2c","Type":"ContainerDied","Data":"41005fc0b83645ae9deb71ad67e02654eb753c9df2de4d563644f8fc35005384"} Dec 11 08:37:14 crc kubenswrapper[4881]: I1211 08:37:14.821568 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-c7b6-account-create-sbjdl" Dec 11 08:37:14 crc kubenswrapper[4881]: I1211 08:37:14.821577 4881 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="41005fc0b83645ae9deb71ad67e02654eb753c9df2de4d563644f8fc35005384" Dec 11 08:37:14 crc kubenswrapper[4881]: I1211 08:37:14.832518 4881 generic.go:334] "Generic (PLEG): container finished" podID="4414ed0e-d99f-4f9b-8888-e73cf1d47834" containerID="4b0cc2df4e12ea55c5671984d70098bf238ee2595e52235b5ba6c4f67ec1cbbc" exitCode=0 Dec 11 08:37:14 crc kubenswrapper[4881]: I1211 08:37:14.832578 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-76fcf4b695-628mp" event={"ID":"4414ed0e-d99f-4f9b-8888-e73cf1d47834","Type":"ContainerDied","Data":"4b0cc2df4e12ea55c5671984d70098bf238ee2595e52235b5ba6c4f67ec1cbbc"} Dec 11 08:37:14 crc kubenswrapper[4881]: I1211 08:37:14.836082 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/aa55caf4-597c-4e33-8c51-b14753998ad2-dns-svc\") pod \"aa55caf4-597c-4e33-8c51-b14753998ad2\" (UID: \"aa55caf4-597c-4e33-8c51-b14753998ad2\") " Dec 11 08:37:14 crc kubenswrapper[4881]: I1211 08:37:14.836249 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wdbsq\" (UniqueName: \"kubernetes.io/projected/11fc6ffa-3c79-4409-b646-1e658f40120e-kube-api-access-wdbsq\") pod \"11fc6ffa-3c79-4409-b646-1e658f40120e\" (UID: \"11fc6ffa-3c79-4409-b646-1e658f40120e\") " Dec 11 08:37:14 crc kubenswrapper[4881]: I1211 08:37:14.836360 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/aa55caf4-597c-4e33-8c51-b14753998ad2-ovsdbserver-sb\") pod \"aa55caf4-597c-4e33-8c51-b14753998ad2\" (UID: \"aa55caf4-597c-4e33-8c51-b14753998ad2\") " Dec 11 08:37:14 crc kubenswrapper[4881]: I1211 08:37:14.836510 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-q4bsr\" (UniqueName: \"kubernetes.io/projected/2fbc6a44-ebbf-4a77-bad1-53d78b09d292-kube-api-access-q4bsr\") pod \"2fbc6a44-ebbf-4a77-bad1-53d78b09d292\" (UID: \"2fbc6a44-ebbf-4a77-bad1-53d78b09d292\") " Dec 11 08:37:14 crc kubenswrapper[4881]: I1211 08:37:14.836552 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/aa55caf4-597c-4e33-8c51-b14753998ad2-config\") pod \"aa55caf4-597c-4e33-8c51-b14753998ad2\" (UID: \"aa55caf4-597c-4e33-8c51-b14753998ad2\") " Dec 11 08:37:14 crc kubenswrapper[4881]: I1211 08:37:14.836775 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-n45bx\" (UniqueName: \"kubernetes.io/projected/33f9ffca-ca21-41c8-b52e-5a5c7c786f4d-kube-api-access-n45bx\") pod \"33f9ffca-ca21-41c8-b52e-5a5c7c786f4d\" (UID: \"33f9ffca-ca21-41c8-b52e-5a5c7c786f4d\") " Dec 11 08:37:14 crc kubenswrapper[4881]: I1211 08:37:14.836851 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rmgbw\" (UniqueName: \"kubernetes.io/projected/aa55caf4-597c-4e33-8c51-b14753998ad2-kube-api-access-rmgbw\") pod \"aa55caf4-597c-4e33-8c51-b14753998ad2\" (UID: \"aa55caf4-597c-4e33-8c51-b14753998ad2\") " Dec 11 08:37:14 crc kubenswrapper[4881]: I1211 08:37:14.836939 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/aa55caf4-597c-4e33-8c51-b14753998ad2-dns-swift-storage-0\") pod \"aa55caf4-597c-4e33-8c51-b14753998ad2\" (UID: \"aa55caf4-597c-4e33-8c51-b14753998ad2\") " Dec 11 08:37:14 crc kubenswrapper[4881]: I1211 08:37:14.836990 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/aa55caf4-597c-4e33-8c51-b14753998ad2-ovsdbserver-nb\") pod \"aa55caf4-597c-4e33-8c51-b14753998ad2\" (UID: \"aa55caf4-597c-4e33-8c51-b14753998ad2\") " Dec 11 08:37:14 crc kubenswrapper[4881]: I1211 08:37:14.837135 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-924sz\" (UniqueName: \"kubernetes.io/projected/0f3dbda2-e652-483b-9ce4-d3bf05516f7b-kube-api-access-924sz\") pod \"0f3dbda2-e652-483b-9ce4-d3bf05516f7b\" (UID: \"0f3dbda2-e652-483b-9ce4-d3bf05516f7b\") " Dec 11 08:37:14 crc kubenswrapper[4881]: I1211 08:37:14.842021 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-1cc4-account-create-mq9q7" Dec 11 08:37:14 crc kubenswrapper[4881]: I1211 08:37:14.842303 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-1cc4-account-create-mq9q7" event={"ID":"33f9ffca-ca21-41c8-b52e-5a5c7c786f4d","Type":"ContainerDied","Data":"1ca8b2f5270ae106d25b30e57f293ddccf6e98ca38c918d291884a2d4fb7c4c6"} Dec 11 08:37:14 crc kubenswrapper[4881]: I1211 08:37:14.842347 4881 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1ca8b2f5270ae106d25b30e57f293ddccf6e98ca38c918d291884a2d4fb7c4c6" Dec 11 08:37:14 crc kubenswrapper[4881]: I1211 08:37:14.845710 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-vrsnd" event={"ID":"09761130-7785-4617-b078-ea94c099676a","Type":"ContainerStarted","Data":"d7d9878db78b7c7b900f17e224a18ac7b574b29f36904ba9edfbf03574b307d5"} Dec 11 08:37:14 crc kubenswrapper[4881]: I1211 08:37:14.846718 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2fbc6a44-ebbf-4a77-bad1-53d78b09d292-kube-api-access-q4bsr" (OuterVolumeSpecName: "kube-api-access-q4bsr") pod "2fbc6a44-ebbf-4a77-bad1-53d78b09d292" (UID: "2fbc6a44-ebbf-4a77-bad1-53d78b09d292"). InnerVolumeSpecName "kube-api-access-q4bsr". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:37:14 crc kubenswrapper[4881]: I1211 08:37:14.850233 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-6956-account-create-xhfnq" event={"ID":"2fbc6a44-ebbf-4a77-bad1-53d78b09d292","Type":"ContainerDied","Data":"2fd6fe0ba9894f8b4599a604b924dff425f239e57e64f74662a5db9187232103"} Dec 11 08:37:14 crc kubenswrapper[4881]: I1211 08:37:14.850272 4881 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2fd6fe0ba9894f8b4599a604b924dff425f239e57e64f74662a5db9187232103" Dec 11 08:37:14 crc kubenswrapper[4881]: I1211 08:37:14.850379 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-6956-account-create-xhfnq" Dec 11 08:37:14 crc kubenswrapper[4881]: I1211 08:37:14.854171 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-55fff446b9-5s4v5" event={"ID":"aa55caf4-597c-4e33-8c51-b14753998ad2","Type":"ContainerDied","Data":"1cc40edb1229f74313521cc815994982757bf102c7157a99604e9617cbcc667e"} Dec 11 08:37:14 crc kubenswrapper[4881]: I1211 08:37:14.854234 4881 scope.go:117] "RemoveContainer" containerID="89784fb5031ebc5aec662ee35499797eb1c2948709411984da181134a894bbe2" Dec 11 08:37:14 crc kubenswrapper[4881]: I1211 08:37:14.854398 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-55fff446b9-5s4v5" Dec 11 08:37:14 crc kubenswrapper[4881]: I1211 08:37:14.882363 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/33f9ffca-ca21-41c8-b52e-5a5c7c786f4d-kube-api-access-n45bx" (OuterVolumeSpecName: "kube-api-access-n45bx") pod "33f9ffca-ca21-41c8-b52e-5a5c7c786f4d" (UID: "33f9ffca-ca21-41c8-b52e-5a5c7c786f4d"). InnerVolumeSpecName "kube-api-access-n45bx". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:37:14 crc kubenswrapper[4881]: I1211 08:37:14.889584 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/aa55caf4-597c-4e33-8c51-b14753998ad2-kube-api-access-rmgbw" (OuterVolumeSpecName: "kube-api-access-rmgbw") pod "aa55caf4-597c-4e33-8c51-b14753998ad2" (UID: "aa55caf4-597c-4e33-8c51-b14753998ad2"). InnerVolumeSpecName "kube-api-access-rmgbw". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:37:14 crc kubenswrapper[4881]: I1211 08:37:14.889772 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0f3dbda2-e652-483b-9ce4-d3bf05516f7b-kube-api-access-924sz" (OuterVolumeSpecName: "kube-api-access-924sz") pod "0f3dbda2-e652-483b-9ce4-d3bf05516f7b" (UID: "0f3dbda2-e652-483b-9ce4-d3bf05516f7b"). InnerVolumeSpecName "kube-api-access-924sz". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:37:14 crc kubenswrapper[4881]: I1211 08:37:14.893594 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/11fc6ffa-3c79-4409-b646-1e658f40120e-kube-api-access-wdbsq" (OuterVolumeSpecName: "kube-api-access-wdbsq") pod "11fc6ffa-3c79-4409-b646-1e658f40120e" (UID: "11fc6ffa-3c79-4409-b646-1e658f40120e"). InnerVolumeSpecName "kube-api-access-wdbsq". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:37:14 crc kubenswrapper[4881]: I1211 08:37:14.899223 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-bootstrap-vrsnd" podStartSLOduration=3.899204617 podStartE2EDuration="3.899204617s" podCreationTimestamp="2025-12-11 08:37:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 08:37:14.892505873 +0000 UTC m=+1283.269874570" watchObservedRunningTime="2025-12-11 08:37:14.899204617 +0000 UTC m=+1283.276573314" Dec 11 08:37:14 crc kubenswrapper[4881]: I1211 08:37:14.935739 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/aa55caf4-597c-4e33-8c51-b14753998ad2-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "aa55caf4-597c-4e33-8c51-b14753998ad2" (UID: "aa55caf4-597c-4e33-8c51-b14753998ad2"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:37:14 crc kubenswrapper[4881]: I1211 08:37:14.940878 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/aa55caf4-597c-4e33-8c51-b14753998ad2-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "aa55caf4-597c-4e33-8c51-b14753998ad2" (UID: "aa55caf4-597c-4e33-8c51-b14753998ad2"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:37:14 crc kubenswrapper[4881]: I1211 08:37:14.944310 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-n45bx\" (UniqueName: \"kubernetes.io/projected/33f9ffca-ca21-41c8-b52e-5a5c7c786f4d-kube-api-access-n45bx\") on node \"crc\" DevicePath \"\"" Dec 11 08:37:14 crc kubenswrapper[4881]: I1211 08:37:14.944364 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rmgbw\" (UniqueName: \"kubernetes.io/projected/aa55caf4-597c-4e33-8c51-b14753998ad2-kube-api-access-rmgbw\") on node \"crc\" DevicePath \"\"" Dec 11 08:37:14 crc kubenswrapper[4881]: I1211 08:37:14.944375 4881 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/aa55caf4-597c-4e33-8c51-b14753998ad2-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Dec 11 08:37:14 crc kubenswrapper[4881]: I1211 08:37:14.944387 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-924sz\" (UniqueName: \"kubernetes.io/projected/0f3dbda2-e652-483b-9ce4-d3bf05516f7b-kube-api-access-924sz\") on node \"crc\" DevicePath \"\"" Dec 11 08:37:14 crc kubenswrapper[4881]: I1211 08:37:14.944395 4881 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/aa55caf4-597c-4e33-8c51-b14753998ad2-dns-svc\") on node \"crc\" DevicePath \"\"" Dec 11 08:37:14 crc kubenswrapper[4881]: I1211 08:37:14.944405 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wdbsq\" (UniqueName: \"kubernetes.io/projected/11fc6ffa-3c79-4409-b646-1e658f40120e-kube-api-access-wdbsq\") on node \"crc\" DevicePath \"\"" Dec 11 08:37:14 crc kubenswrapper[4881]: I1211 08:37:14.944415 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-q4bsr\" (UniqueName: \"kubernetes.io/projected/2fbc6a44-ebbf-4a77-bad1-53d78b09d292-kube-api-access-q4bsr\") on node \"crc\" DevicePath \"\"" Dec 11 08:37:14 crc kubenswrapper[4881]: I1211 08:37:14.947108 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/aa55caf4-597c-4e33-8c51-b14753998ad2-config" (OuterVolumeSpecName: "config") pod "aa55caf4-597c-4e33-8c51-b14753998ad2" (UID: "aa55caf4-597c-4e33-8c51-b14753998ad2"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:37:14 crc kubenswrapper[4881]: I1211 08:37:14.948140 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/aa55caf4-597c-4e33-8c51-b14753998ad2-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "aa55caf4-597c-4e33-8c51-b14753998ad2" (UID: "aa55caf4-597c-4e33-8c51-b14753998ad2"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:37:14 crc kubenswrapper[4881]: I1211 08:37:14.957672 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/aa55caf4-597c-4e33-8c51-b14753998ad2-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "aa55caf4-597c-4e33-8c51-b14753998ad2" (UID: "aa55caf4-597c-4e33-8c51-b14753998ad2"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:37:15 crc kubenswrapper[4881]: I1211 08:37:15.046949 4881 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/aa55caf4-597c-4e33-8c51-b14753998ad2-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Dec 11 08:37:15 crc kubenswrapper[4881]: I1211 08:37:15.046981 4881 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/aa55caf4-597c-4e33-8c51-b14753998ad2-config\") on node \"crc\" DevicePath \"\"" Dec 11 08:37:15 crc kubenswrapper[4881]: I1211 08:37:15.046994 4881 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/aa55caf4-597c-4e33-8c51-b14753998ad2-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Dec 11 08:37:15 crc kubenswrapper[4881]: I1211 08:37:15.201003 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-55fff446b9-5s4v5"] Dec 11 08:37:15 crc kubenswrapper[4881]: I1211 08:37:15.207467 4881 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-55fff446b9-5s4v5"] Dec 11 08:37:15 crc kubenswrapper[4881]: I1211 08:37:15.871782 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-76fcf4b695-628mp" event={"ID":"4414ed0e-d99f-4f9b-8888-e73cf1d47834","Type":"ContainerStarted","Data":"0235f7d2b1778dfb438c3a15c2da77b5e7352d840bed0db998a72cb71adb59ac"} Dec 11 08:37:15 crc kubenswrapper[4881]: I1211 08:37:15.872820 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-76fcf4b695-628mp" Dec 11 08:37:15 crc kubenswrapper[4881]: I1211 08:37:15.914672 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-76fcf4b695-628mp" podStartSLOduration=3.914651138 podStartE2EDuration="3.914651138s" podCreationTimestamp="2025-12-11 08:37:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 08:37:15.887971504 +0000 UTC m=+1284.265340211" watchObservedRunningTime="2025-12-11 08:37:15.914651138 +0000 UTC m=+1284.292019835" Dec 11 08:37:16 crc kubenswrapper[4881]: I1211 08:37:16.897003 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"b515a685-da3e-4d92-a8e5-60561e9de83f","Type":"ContainerStarted","Data":"3819c06d346a851494a0f572fc73842482075ee7932c7739229c872cd48f1d46"} Dec 11 08:37:17 crc kubenswrapper[4881]: I1211 08:37:17.018804 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="aa55caf4-597c-4e33-8c51-b14753998ad2" path="/var/lib/kubelet/pods/aa55caf4-597c-4e33-8c51-b14753998ad2/volumes" Dec 11 08:37:17 crc kubenswrapper[4881]: I1211 08:37:17.918739 4881 generic.go:334] "Generic (PLEG): container finished" podID="09761130-7785-4617-b078-ea94c099676a" containerID="d7d9878db78b7c7b900f17e224a18ac7b574b29f36904ba9edfbf03574b307d5" exitCode=0 Dec 11 08:37:17 crc kubenswrapper[4881]: I1211 08:37:17.919742 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-vrsnd" event={"ID":"09761130-7785-4617-b078-ea94c099676a","Type":"ContainerDied","Data":"d7d9878db78b7c7b900f17e224a18ac7b574b29f36904ba9edfbf03574b307d5"} Dec 11 08:37:18 crc kubenswrapper[4881]: I1211 08:37:18.327735 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-db-sync-vmjdf"] Dec 11 08:37:18 crc kubenswrapper[4881]: E1211 08:37:18.328580 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2fbc6a44-ebbf-4a77-bad1-53d78b09d292" containerName="mariadb-account-create" Dec 11 08:37:18 crc kubenswrapper[4881]: I1211 08:37:18.328606 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="2fbc6a44-ebbf-4a77-bad1-53d78b09d292" containerName="mariadb-account-create" Dec 11 08:37:18 crc kubenswrapper[4881]: E1211 08:37:18.328635 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0f3dbda2-e652-483b-9ce4-d3bf05516f7b" containerName="mariadb-account-create" Dec 11 08:37:18 crc kubenswrapper[4881]: I1211 08:37:18.328646 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="0f3dbda2-e652-483b-9ce4-d3bf05516f7b" containerName="mariadb-account-create" Dec 11 08:37:18 crc kubenswrapper[4881]: E1211 08:37:18.328677 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="33f9ffca-ca21-41c8-b52e-5a5c7c786f4d" containerName="mariadb-account-create" Dec 11 08:37:18 crc kubenswrapper[4881]: I1211 08:37:18.328685 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="33f9ffca-ca21-41c8-b52e-5a5c7c786f4d" containerName="mariadb-account-create" Dec 11 08:37:18 crc kubenswrapper[4881]: E1211 08:37:18.328706 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="be7726a5-588c-4057-823d-6a6c51348f2c" containerName="mariadb-account-create" Dec 11 08:37:18 crc kubenswrapper[4881]: I1211 08:37:18.328714 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="be7726a5-588c-4057-823d-6a6c51348f2c" containerName="mariadb-account-create" Dec 11 08:37:18 crc kubenswrapper[4881]: E1211 08:37:18.328727 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="11fc6ffa-3c79-4409-b646-1e658f40120e" containerName="mariadb-account-create" Dec 11 08:37:18 crc kubenswrapper[4881]: I1211 08:37:18.328735 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="11fc6ffa-3c79-4409-b646-1e658f40120e" containerName="mariadb-account-create" Dec 11 08:37:18 crc kubenswrapper[4881]: E1211 08:37:18.328749 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="aa55caf4-597c-4e33-8c51-b14753998ad2" containerName="init" Dec 11 08:37:18 crc kubenswrapper[4881]: I1211 08:37:18.328756 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="aa55caf4-597c-4e33-8c51-b14753998ad2" containerName="init" Dec 11 08:37:18 crc kubenswrapper[4881]: I1211 08:37:18.329038 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="11fc6ffa-3c79-4409-b646-1e658f40120e" containerName="mariadb-account-create" Dec 11 08:37:18 crc kubenswrapper[4881]: I1211 08:37:18.329071 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="2fbc6a44-ebbf-4a77-bad1-53d78b09d292" containerName="mariadb-account-create" Dec 11 08:37:18 crc kubenswrapper[4881]: I1211 08:37:18.329084 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="0f3dbda2-e652-483b-9ce4-d3bf05516f7b" containerName="mariadb-account-create" Dec 11 08:37:18 crc kubenswrapper[4881]: I1211 08:37:18.329100 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="aa55caf4-597c-4e33-8c51-b14753998ad2" containerName="init" Dec 11 08:37:18 crc kubenswrapper[4881]: I1211 08:37:18.329110 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="be7726a5-588c-4057-823d-6a6c51348f2c" containerName="mariadb-account-create" Dec 11 08:37:18 crc kubenswrapper[4881]: I1211 08:37:18.329123 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="33f9ffca-ca21-41c8-b52e-5a5c7c786f4d" containerName="mariadb-account-create" Dec 11 08:37:18 crc kubenswrapper[4881]: I1211 08:37:18.330022 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-vmjdf" Dec 11 08:37:18 crc kubenswrapper[4881]: I1211 08:37:18.333054 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Dec 11 08:37:18 crc kubenswrapper[4881]: I1211 08:37:18.333105 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-ggs6l" Dec 11 08:37:18 crc kubenswrapper[4881]: I1211 08:37:18.351760 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-sync-vmjdf"] Dec 11 08:37:18 crc kubenswrapper[4881]: I1211 08:37:18.427539 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dpbqz\" (UniqueName: \"kubernetes.io/projected/422faa6a-f2ed-4015-87cd-7878bac246e4-kube-api-access-dpbqz\") pod \"barbican-db-sync-vmjdf\" (UID: \"422faa6a-f2ed-4015-87cd-7878bac246e4\") " pod="openstack/barbican-db-sync-vmjdf" Dec 11 08:37:18 crc kubenswrapper[4881]: I1211 08:37:18.435102 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/422faa6a-f2ed-4015-87cd-7878bac246e4-db-sync-config-data\") pod \"barbican-db-sync-vmjdf\" (UID: \"422faa6a-f2ed-4015-87cd-7878bac246e4\") " pod="openstack/barbican-db-sync-vmjdf" Dec 11 08:37:18 crc kubenswrapper[4881]: I1211 08:37:18.435257 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/422faa6a-f2ed-4015-87cd-7878bac246e4-combined-ca-bundle\") pod \"barbican-db-sync-vmjdf\" (UID: \"422faa6a-f2ed-4015-87cd-7878bac246e4\") " pod="openstack/barbican-db-sync-vmjdf" Dec 11 08:37:18 crc kubenswrapper[4881]: I1211 08:37:18.500450 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-db-sync-bcjrt"] Dec 11 08:37:18 crc kubenswrapper[4881]: I1211 08:37:18.501879 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-bcjrt" Dec 11 08:37:18 crc kubenswrapper[4881]: I1211 08:37:18.504653 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scripts" Dec 11 08:37:18 crc kubenswrapper[4881]: I1211 08:37:18.505246 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-config-data" Dec 11 08:37:18 crc kubenswrapper[4881]: I1211 08:37:18.506804 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-cinder-dockercfg-q8l6s" Dec 11 08:37:18 crc kubenswrapper[4881]: I1211 08:37:18.516797 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-sync-bcjrt"] Dec 11 08:37:18 crc kubenswrapper[4881]: I1211 08:37:18.543743 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/422faa6a-f2ed-4015-87cd-7878bac246e4-db-sync-config-data\") pod \"barbican-db-sync-vmjdf\" (UID: \"422faa6a-f2ed-4015-87cd-7878bac246e4\") " pod="openstack/barbican-db-sync-vmjdf" Dec 11 08:37:18 crc kubenswrapper[4881]: I1211 08:37:18.543838 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/422faa6a-f2ed-4015-87cd-7878bac246e4-combined-ca-bundle\") pod \"barbican-db-sync-vmjdf\" (UID: \"422faa6a-f2ed-4015-87cd-7878bac246e4\") " pod="openstack/barbican-db-sync-vmjdf" Dec 11 08:37:18 crc kubenswrapper[4881]: I1211 08:37:18.543887 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dpbqz\" (UniqueName: \"kubernetes.io/projected/422faa6a-f2ed-4015-87cd-7878bac246e4-kube-api-access-dpbqz\") pod \"barbican-db-sync-vmjdf\" (UID: \"422faa6a-f2ed-4015-87cd-7878bac246e4\") " pod="openstack/barbican-db-sync-vmjdf" Dec 11 08:37:18 crc kubenswrapper[4881]: I1211 08:37:18.550742 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/422faa6a-f2ed-4015-87cd-7878bac246e4-db-sync-config-data\") pod \"barbican-db-sync-vmjdf\" (UID: \"422faa6a-f2ed-4015-87cd-7878bac246e4\") " pod="openstack/barbican-db-sync-vmjdf" Dec 11 08:37:18 crc kubenswrapper[4881]: I1211 08:37:18.565352 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/422faa6a-f2ed-4015-87cd-7878bac246e4-combined-ca-bundle\") pod \"barbican-db-sync-vmjdf\" (UID: \"422faa6a-f2ed-4015-87cd-7878bac246e4\") " pod="openstack/barbican-db-sync-vmjdf" Dec 11 08:37:18 crc kubenswrapper[4881]: I1211 08:37:18.584307 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dpbqz\" (UniqueName: \"kubernetes.io/projected/422faa6a-f2ed-4015-87cd-7878bac246e4-kube-api-access-dpbqz\") pod \"barbican-db-sync-vmjdf\" (UID: \"422faa6a-f2ed-4015-87cd-7878bac246e4\") " pod="openstack/barbican-db-sync-vmjdf" Dec 11 08:37:18 crc kubenswrapper[4881]: I1211 08:37:18.646733 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c7s5v\" (UniqueName: \"kubernetes.io/projected/cc1ec075-9e84-4cfd-9f5a-b29d5af0d610-kube-api-access-c7s5v\") pod \"cinder-db-sync-bcjrt\" (UID: \"cc1ec075-9e84-4cfd-9f5a-b29d5af0d610\") " pod="openstack/cinder-db-sync-bcjrt" Dec 11 08:37:18 crc kubenswrapper[4881]: I1211 08:37:18.647141 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cc1ec075-9e84-4cfd-9f5a-b29d5af0d610-config-data\") pod \"cinder-db-sync-bcjrt\" (UID: \"cc1ec075-9e84-4cfd-9f5a-b29d5af0d610\") " pod="openstack/cinder-db-sync-bcjrt" Dec 11 08:37:18 crc kubenswrapper[4881]: I1211 08:37:18.647206 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/cc1ec075-9e84-4cfd-9f5a-b29d5af0d610-db-sync-config-data\") pod \"cinder-db-sync-bcjrt\" (UID: \"cc1ec075-9e84-4cfd-9f5a-b29d5af0d610\") " pod="openstack/cinder-db-sync-bcjrt" Dec 11 08:37:18 crc kubenswrapper[4881]: I1211 08:37:18.647293 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cc1ec075-9e84-4cfd-9f5a-b29d5af0d610-scripts\") pod \"cinder-db-sync-bcjrt\" (UID: \"cc1ec075-9e84-4cfd-9f5a-b29d5af0d610\") " pod="openstack/cinder-db-sync-bcjrt" Dec 11 08:37:18 crc kubenswrapper[4881]: I1211 08:37:18.647447 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/cc1ec075-9e84-4cfd-9f5a-b29d5af0d610-etc-machine-id\") pod \"cinder-db-sync-bcjrt\" (UID: \"cc1ec075-9e84-4cfd-9f5a-b29d5af0d610\") " pod="openstack/cinder-db-sync-bcjrt" Dec 11 08:37:18 crc kubenswrapper[4881]: I1211 08:37:18.647533 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cc1ec075-9e84-4cfd-9f5a-b29d5af0d610-combined-ca-bundle\") pod \"cinder-db-sync-bcjrt\" (UID: \"cc1ec075-9e84-4cfd-9f5a-b29d5af0d610\") " pod="openstack/cinder-db-sync-bcjrt" Dec 11 08:37:18 crc kubenswrapper[4881]: I1211 08:37:18.664066 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-vmjdf" Dec 11 08:37:18 crc kubenswrapper[4881]: I1211 08:37:18.730471 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-db-sync-jvkz7"] Dec 11 08:37:18 crc kubenswrapper[4881]: I1211 08:37:18.732307 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-sync-jvkz7" Dec 11 08:37:18 crc kubenswrapper[4881]: I1211 08:37:18.741179 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-heat-dockercfg-gpnxr" Dec 11 08:37:18 crc kubenswrapper[4881]: I1211 08:37:18.741772 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-config-data" Dec 11 08:37:18 crc kubenswrapper[4881]: I1211 08:37:18.756652 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/cc1ec075-9e84-4cfd-9f5a-b29d5af0d610-etc-machine-id\") pod \"cinder-db-sync-bcjrt\" (UID: \"cc1ec075-9e84-4cfd-9f5a-b29d5af0d610\") " pod="openstack/cinder-db-sync-bcjrt" Dec 11 08:37:18 crc kubenswrapper[4881]: I1211 08:37:18.756741 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/cc1ec075-9e84-4cfd-9f5a-b29d5af0d610-etc-machine-id\") pod \"cinder-db-sync-bcjrt\" (UID: \"cc1ec075-9e84-4cfd-9f5a-b29d5af0d610\") " pod="openstack/cinder-db-sync-bcjrt" Dec 11 08:37:18 crc kubenswrapper[4881]: I1211 08:37:18.756861 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cc1ec075-9e84-4cfd-9f5a-b29d5af0d610-combined-ca-bundle\") pod \"cinder-db-sync-bcjrt\" (UID: \"cc1ec075-9e84-4cfd-9f5a-b29d5af0d610\") " pod="openstack/cinder-db-sync-bcjrt" Dec 11 08:37:18 crc kubenswrapper[4881]: I1211 08:37:18.756951 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c7s5v\" (UniqueName: \"kubernetes.io/projected/cc1ec075-9e84-4cfd-9f5a-b29d5af0d610-kube-api-access-c7s5v\") pod \"cinder-db-sync-bcjrt\" (UID: \"cc1ec075-9e84-4cfd-9f5a-b29d5af0d610\") " pod="openstack/cinder-db-sync-bcjrt" Dec 11 08:37:18 crc kubenswrapper[4881]: I1211 08:37:18.757085 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cc1ec075-9e84-4cfd-9f5a-b29d5af0d610-config-data\") pod \"cinder-db-sync-bcjrt\" (UID: \"cc1ec075-9e84-4cfd-9f5a-b29d5af0d610\") " pod="openstack/cinder-db-sync-bcjrt" Dec 11 08:37:18 crc kubenswrapper[4881]: I1211 08:37:18.757169 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/cc1ec075-9e84-4cfd-9f5a-b29d5af0d610-db-sync-config-data\") pod \"cinder-db-sync-bcjrt\" (UID: \"cc1ec075-9e84-4cfd-9f5a-b29d5af0d610\") " pod="openstack/cinder-db-sync-bcjrt" Dec 11 08:37:18 crc kubenswrapper[4881]: I1211 08:37:18.757311 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cc1ec075-9e84-4cfd-9f5a-b29d5af0d610-scripts\") pod \"cinder-db-sync-bcjrt\" (UID: \"cc1ec075-9e84-4cfd-9f5a-b29d5af0d610\") " pod="openstack/cinder-db-sync-bcjrt" Dec 11 08:37:18 crc kubenswrapper[4881]: I1211 08:37:18.757775 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-db-sync-jvkz7"] Dec 11 08:37:18 crc kubenswrapper[4881]: I1211 08:37:18.763064 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cc1ec075-9e84-4cfd-9f5a-b29d5af0d610-scripts\") pod \"cinder-db-sync-bcjrt\" (UID: \"cc1ec075-9e84-4cfd-9f5a-b29d5af0d610\") " pod="openstack/cinder-db-sync-bcjrt" Dec 11 08:37:18 crc kubenswrapper[4881]: I1211 08:37:18.763130 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cc1ec075-9e84-4cfd-9f5a-b29d5af0d610-combined-ca-bundle\") pod \"cinder-db-sync-bcjrt\" (UID: \"cc1ec075-9e84-4cfd-9f5a-b29d5af0d610\") " pod="openstack/cinder-db-sync-bcjrt" Dec 11 08:37:18 crc kubenswrapper[4881]: I1211 08:37:18.763642 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/cc1ec075-9e84-4cfd-9f5a-b29d5af0d610-db-sync-config-data\") pod \"cinder-db-sync-bcjrt\" (UID: \"cc1ec075-9e84-4cfd-9f5a-b29d5af0d610\") " pod="openstack/cinder-db-sync-bcjrt" Dec 11 08:37:18 crc kubenswrapper[4881]: I1211 08:37:18.780114 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cc1ec075-9e84-4cfd-9f5a-b29d5af0d610-config-data\") pod \"cinder-db-sync-bcjrt\" (UID: \"cc1ec075-9e84-4cfd-9f5a-b29d5af0d610\") " pod="openstack/cinder-db-sync-bcjrt" Dec 11 08:37:18 crc kubenswrapper[4881]: I1211 08:37:18.781844 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c7s5v\" (UniqueName: \"kubernetes.io/projected/cc1ec075-9e84-4cfd-9f5a-b29d5af0d610-kube-api-access-c7s5v\") pod \"cinder-db-sync-bcjrt\" (UID: \"cc1ec075-9e84-4cfd-9f5a-b29d5af0d610\") " pod="openstack/cinder-db-sync-bcjrt" Dec 11 08:37:18 crc kubenswrapper[4881]: I1211 08:37:18.817058 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-bcjrt" Dec 11 08:37:18 crc kubenswrapper[4881]: I1211 08:37:18.858974 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2dde239c-3502-4b29-8f5d-1893f53819bd-combined-ca-bundle\") pod \"heat-db-sync-jvkz7\" (UID: \"2dde239c-3502-4b29-8f5d-1893f53819bd\") " pod="openstack/heat-db-sync-jvkz7" Dec 11 08:37:18 crc kubenswrapper[4881]: I1211 08:37:18.859092 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2dde239c-3502-4b29-8f5d-1893f53819bd-config-data\") pod \"heat-db-sync-jvkz7\" (UID: \"2dde239c-3502-4b29-8f5d-1893f53819bd\") " pod="openstack/heat-db-sync-jvkz7" Dec 11 08:37:18 crc kubenswrapper[4881]: I1211 08:37:18.859191 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qd2dc\" (UniqueName: \"kubernetes.io/projected/2dde239c-3502-4b29-8f5d-1893f53819bd-kube-api-access-qd2dc\") pod \"heat-db-sync-jvkz7\" (UID: \"2dde239c-3502-4b29-8f5d-1893f53819bd\") " pod="openstack/heat-db-sync-jvkz7" Dec 11 08:37:18 crc kubenswrapper[4881]: I1211 08:37:18.961853 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2dde239c-3502-4b29-8f5d-1893f53819bd-config-data\") pod \"heat-db-sync-jvkz7\" (UID: \"2dde239c-3502-4b29-8f5d-1893f53819bd\") " pod="openstack/heat-db-sync-jvkz7" Dec 11 08:37:18 crc kubenswrapper[4881]: I1211 08:37:18.962019 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qd2dc\" (UniqueName: \"kubernetes.io/projected/2dde239c-3502-4b29-8f5d-1893f53819bd-kube-api-access-qd2dc\") pod \"heat-db-sync-jvkz7\" (UID: \"2dde239c-3502-4b29-8f5d-1893f53819bd\") " pod="openstack/heat-db-sync-jvkz7" Dec 11 08:37:18 crc kubenswrapper[4881]: I1211 08:37:18.962065 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2dde239c-3502-4b29-8f5d-1893f53819bd-combined-ca-bundle\") pod \"heat-db-sync-jvkz7\" (UID: \"2dde239c-3502-4b29-8f5d-1893f53819bd\") " pod="openstack/heat-db-sync-jvkz7" Dec 11 08:37:18 crc kubenswrapper[4881]: I1211 08:37:18.963448 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-cb9l4" event={"ID":"4406be5c-0e4c-40ff-ac0f-4da87b36b145","Type":"ContainerStarted","Data":"f2fda50fe0522e806ea7b78ac99013d0af1df7ba3435818f78881969a91abf57"} Dec 11 08:37:18 crc kubenswrapper[4881]: I1211 08:37:18.969230 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2dde239c-3502-4b29-8f5d-1893f53819bd-config-data\") pod \"heat-db-sync-jvkz7\" (UID: \"2dde239c-3502-4b29-8f5d-1893f53819bd\") " pod="openstack/heat-db-sync-jvkz7" Dec 11 08:37:18 crc kubenswrapper[4881]: I1211 08:37:18.972907 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2dde239c-3502-4b29-8f5d-1893f53819bd-combined-ca-bundle\") pod \"heat-db-sync-jvkz7\" (UID: \"2dde239c-3502-4b29-8f5d-1893f53819bd\") " pod="openstack/heat-db-sync-jvkz7" Dec 11 08:37:18 crc kubenswrapper[4881]: I1211 08:37:18.993605 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-db-sync-cb9l4" podStartSLOduration=2.839450893 podStartE2EDuration="6.993586637s" podCreationTimestamp="2025-12-11 08:37:12 +0000 UTC" firstStartedPulling="2025-12-11 08:37:13.582686799 +0000 UTC m=+1281.960055496" lastFinishedPulling="2025-12-11 08:37:17.736822533 +0000 UTC m=+1286.114191240" observedRunningTime="2025-12-11 08:37:18.993552286 +0000 UTC m=+1287.370920993" watchObservedRunningTime="2025-12-11 08:37:18.993586637 +0000 UTC m=+1287.370955334" Dec 11 08:37:18 crc kubenswrapper[4881]: I1211 08:37:18.994032 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qd2dc\" (UniqueName: \"kubernetes.io/projected/2dde239c-3502-4b29-8f5d-1893f53819bd-kube-api-access-qd2dc\") pod \"heat-db-sync-jvkz7\" (UID: \"2dde239c-3502-4b29-8f5d-1893f53819bd\") " pod="openstack/heat-db-sync-jvkz7" Dec 11 08:37:19 crc kubenswrapper[4881]: I1211 08:37:19.056842 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mysqld-exporter-0"] Dec 11 08:37:19 crc kubenswrapper[4881]: I1211 08:37:19.058363 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mysqld-exporter-0"] Dec 11 08:37:19 crc kubenswrapper[4881]: I1211 08:37:19.058457 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-0" Dec 11 08:37:19 crc kubenswrapper[4881]: I1211 08:37:19.064893 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"mysqld-exporter-config-data" Dec 11 08:37:19 crc kubenswrapper[4881]: I1211 08:37:19.098055 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-db-sync-rhqlf"] Dec 11 08:37:19 crc kubenswrapper[4881]: I1211 08:37:19.099584 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-rhqlf" Dec 11 08:37:19 crc kubenswrapper[4881]: I1211 08:37:19.124088 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-httpd-config" Dec 11 08:37:19 crc kubenswrapper[4881]: I1211 08:37:19.124238 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-config" Dec 11 08:37:19 crc kubenswrapper[4881]: I1211 08:37:19.124312 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-neutron-dockercfg-xndx6" Dec 11 08:37:19 crc kubenswrapper[4881]: I1211 08:37:19.137864 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-sync-rhqlf"] Dec 11 08:37:19 crc kubenswrapper[4881]: I1211 08:37:19.165698 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-sync-jvkz7" Dec 11 08:37:19 crc kubenswrapper[4881]: I1211 08:37:19.168718 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/c45356cd-f35d-41fa-98d3-6697e7a8100a-config\") pod \"neutron-db-sync-rhqlf\" (UID: \"c45356cd-f35d-41fa-98d3-6697e7a8100a\") " pod="openstack/neutron-db-sync-rhqlf" Dec 11 08:37:19 crc kubenswrapper[4881]: I1211 08:37:19.168800 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3dca57af-8220-449b-a5fc-8001bcc8c180-config-data\") pod \"mysqld-exporter-0\" (UID: \"3dca57af-8220-449b-a5fc-8001bcc8c180\") " pod="openstack/mysqld-exporter-0" Dec 11 08:37:19 crc kubenswrapper[4881]: I1211 08:37:19.168829 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3dca57af-8220-449b-a5fc-8001bcc8c180-combined-ca-bundle\") pod \"mysqld-exporter-0\" (UID: \"3dca57af-8220-449b-a5fc-8001bcc8c180\") " pod="openstack/mysqld-exporter-0" Dec 11 08:37:19 crc kubenswrapper[4881]: I1211 08:37:19.168862 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-24zj7\" (UniqueName: \"kubernetes.io/projected/c45356cd-f35d-41fa-98d3-6697e7a8100a-kube-api-access-24zj7\") pod \"neutron-db-sync-rhqlf\" (UID: \"c45356cd-f35d-41fa-98d3-6697e7a8100a\") " pod="openstack/neutron-db-sync-rhqlf" Dec 11 08:37:19 crc kubenswrapper[4881]: I1211 08:37:19.168923 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mqsg2\" (UniqueName: \"kubernetes.io/projected/3dca57af-8220-449b-a5fc-8001bcc8c180-kube-api-access-mqsg2\") pod \"mysqld-exporter-0\" (UID: \"3dca57af-8220-449b-a5fc-8001bcc8c180\") " pod="openstack/mysqld-exporter-0" Dec 11 08:37:19 crc kubenswrapper[4881]: I1211 08:37:19.168942 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c45356cd-f35d-41fa-98d3-6697e7a8100a-combined-ca-bundle\") pod \"neutron-db-sync-rhqlf\" (UID: \"c45356cd-f35d-41fa-98d3-6697e7a8100a\") " pod="openstack/neutron-db-sync-rhqlf" Dec 11 08:37:19 crc kubenswrapper[4881]: I1211 08:37:19.247061 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-sync-vmjdf"] Dec 11 08:37:19 crc kubenswrapper[4881]: I1211 08:37:19.271458 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c45356cd-f35d-41fa-98d3-6697e7a8100a-combined-ca-bundle\") pod \"neutron-db-sync-rhqlf\" (UID: \"c45356cd-f35d-41fa-98d3-6697e7a8100a\") " pod="openstack/neutron-db-sync-rhqlf" Dec 11 08:37:19 crc kubenswrapper[4881]: I1211 08:37:19.271511 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mqsg2\" (UniqueName: \"kubernetes.io/projected/3dca57af-8220-449b-a5fc-8001bcc8c180-kube-api-access-mqsg2\") pod \"mysqld-exporter-0\" (UID: \"3dca57af-8220-449b-a5fc-8001bcc8c180\") " pod="openstack/mysqld-exporter-0" Dec 11 08:37:19 crc kubenswrapper[4881]: I1211 08:37:19.271674 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/c45356cd-f35d-41fa-98d3-6697e7a8100a-config\") pod \"neutron-db-sync-rhqlf\" (UID: \"c45356cd-f35d-41fa-98d3-6697e7a8100a\") " pod="openstack/neutron-db-sync-rhqlf" Dec 11 08:37:19 crc kubenswrapper[4881]: I1211 08:37:19.271754 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3dca57af-8220-449b-a5fc-8001bcc8c180-config-data\") pod \"mysqld-exporter-0\" (UID: \"3dca57af-8220-449b-a5fc-8001bcc8c180\") " pod="openstack/mysqld-exporter-0" Dec 11 08:37:19 crc kubenswrapper[4881]: I1211 08:37:19.271791 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3dca57af-8220-449b-a5fc-8001bcc8c180-combined-ca-bundle\") pod \"mysqld-exporter-0\" (UID: \"3dca57af-8220-449b-a5fc-8001bcc8c180\") " pod="openstack/mysqld-exporter-0" Dec 11 08:37:19 crc kubenswrapper[4881]: I1211 08:37:19.271827 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-24zj7\" (UniqueName: \"kubernetes.io/projected/c45356cd-f35d-41fa-98d3-6697e7a8100a-kube-api-access-24zj7\") pod \"neutron-db-sync-rhqlf\" (UID: \"c45356cd-f35d-41fa-98d3-6697e7a8100a\") " pod="openstack/neutron-db-sync-rhqlf" Dec 11 08:37:19 crc kubenswrapper[4881]: I1211 08:37:19.277850 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c45356cd-f35d-41fa-98d3-6697e7a8100a-combined-ca-bundle\") pod \"neutron-db-sync-rhqlf\" (UID: \"c45356cd-f35d-41fa-98d3-6697e7a8100a\") " pod="openstack/neutron-db-sync-rhqlf" Dec 11 08:37:19 crc kubenswrapper[4881]: W1211 08:37:19.278919 4881 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod422faa6a_f2ed_4015_87cd_7878bac246e4.slice/crio-4dff55711a9ea928a36047afb2d3ccf0107f427a994b77b9be3b15a05784469e WatchSource:0}: Error finding container 4dff55711a9ea928a36047afb2d3ccf0107f427a994b77b9be3b15a05784469e: Status 404 returned error can't find the container with id 4dff55711a9ea928a36047afb2d3ccf0107f427a994b77b9be3b15a05784469e Dec 11 08:37:19 crc kubenswrapper[4881]: I1211 08:37:19.284955 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3dca57af-8220-449b-a5fc-8001bcc8c180-config-data\") pod \"mysqld-exporter-0\" (UID: \"3dca57af-8220-449b-a5fc-8001bcc8c180\") " pod="openstack/mysqld-exporter-0" Dec 11 08:37:19 crc kubenswrapper[4881]: I1211 08:37:19.293536 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-24zj7\" (UniqueName: \"kubernetes.io/projected/c45356cd-f35d-41fa-98d3-6697e7a8100a-kube-api-access-24zj7\") pod \"neutron-db-sync-rhqlf\" (UID: \"c45356cd-f35d-41fa-98d3-6697e7a8100a\") " pod="openstack/neutron-db-sync-rhqlf" Dec 11 08:37:19 crc kubenswrapper[4881]: I1211 08:37:19.294648 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3dca57af-8220-449b-a5fc-8001bcc8c180-combined-ca-bundle\") pod \"mysqld-exporter-0\" (UID: \"3dca57af-8220-449b-a5fc-8001bcc8c180\") " pod="openstack/mysqld-exporter-0" Dec 11 08:37:19 crc kubenswrapper[4881]: I1211 08:37:19.295028 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mqsg2\" (UniqueName: \"kubernetes.io/projected/3dca57af-8220-449b-a5fc-8001bcc8c180-kube-api-access-mqsg2\") pod \"mysqld-exporter-0\" (UID: \"3dca57af-8220-449b-a5fc-8001bcc8c180\") " pod="openstack/mysqld-exporter-0" Dec 11 08:37:19 crc kubenswrapper[4881]: I1211 08:37:19.295068 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/c45356cd-f35d-41fa-98d3-6697e7a8100a-config\") pod \"neutron-db-sync-rhqlf\" (UID: \"c45356cd-f35d-41fa-98d3-6697e7a8100a\") " pod="openstack/neutron-db-sync-rhqlf" Dec 11 08:37:19 crc kubenswrapper[4881]: I1211 08:37:19.430665 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-0" Dec 11 08:37:19 crc kubenswrapper[4881]: I1211 08:37:19.448580 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-rhqlf" Dec 11 08:37:19 crc kubenswrapper[4881]: I1211 08:37:19.473407 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-sync-bcjrt"] Dec 11 08:37:19 crc kubenswrapper[4881]: I1211 08:37:19.567258 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-vrsnd" Dec 11 08:37:19 crc kubenswrapper[4881]: I1211 08:37:19.647760 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Dec 11 08:37:19 crc kubenswrapper[4881]: E1211 08:37:19.648554 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="09761130-7785-4617-b078-ea94c099676a" containerName="keystone-bootstrap" Dec 11 08:37:19 crc kubenswrapper[4881]: I1211 08:37:19.648575 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="09761130-7785-4617-b078-ea94c099676a" containerName="keystone-bootstrap" Dec 11 08:37:19 crc kubenswrapper[4881]: I1211 08:37:19.648814 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="09761130-7785-4617-b078-ea94c099676a" containerName="keystone-bootstrap" Dec 11 08:37:19 crc kubenswrapper[4881]: I1211 08:37:19.651074 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 11 08:37:19 crc kubenswrapper[4881]: I1211 08:37:19.655079 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Dec 11 08:37:19 crc kubenswrapper[4881]: I1211 08:37:19.655221 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Dec 11 08:37:19 crc kubenswrapper[4881]: I1211 08:37:19.687994 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/09761130-7785-4617-b078-ea94c099676a-config-data\") pod \"09761130-7785-4617-b078-ea94c099676a\" (UID: \"09761130-7785-4617-b078-ea94c099676a\") " Dec 11 08:37:19 crc kubenswrapper[4881]: I1211 08:37:19.688109 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/09761130-7785-4617-b078-ea94c099676a-scripts\") pod \"09761130-7785-4617-b078-ea94c099676a\" (UID: \"09761130-7785-4617-b078-ea94c099676a\") " Dec 11 08:37:19 crc kubenswrapper[4881]: I1211 08:37:19.688164 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ghrkf\" (UniqueName: \"kubernetes.io/projected/09761130-7785-4617-b078-ea94c099676a-kube-api-access-ghrkf\") pod \"09761130-7785-4617-b078-ea94c099676a\" (UID: \"09761130-7785-4617-b078-ea94c099676a\") " Dec 11 08:37:19 crc kubenswrapper[4881]: I1211 08:37:19.688297 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/09761130-7785-4617-b078-ea94c099676a-fernet-keys\") pod \"09761130-7785-4617-b078-ea94c099676a\" (UID: \"09761130-7785-4617-b078-ea94c099676a\") " Dec 11 08:37:19 crc kubenswrapper[4881]: I1211 08:37:19.688346 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/09761130-7785-4617-b078-ea94c099676a-combined-ca-bundle\") pod \"09761130-7785-4617-b078-ea94c099676a\" (UID: \"09761130-7785-4617-b078-ea94c099676a\") " Dec 11 08:37:19 crc kubenswrapper[4881]: I1211 08:37:19.688373 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/09761130-7785-4617-b078-ea94c099676a-credential-keys\") pod \"09761130-7785-4617-b078-ea94c099676a\" (UID: \"09761130-7785-4617-b078-ea94c099676a\") " Dec 11 08:37:19 crc kubenswrapper[4881]: I1211 08:37:19.689506 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0074c6f2-5d03-406d-a8a3-19f87e5980d8-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"0074c6f2-5d03-406d-a8a3-19f87e5980d8\") " pod="openstack/ceilometer-0" Dec 11 08:37:19 crc kubenswrapper[4881]: I1211 08:37:19.689609 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0074c6f2-5d03-406d-a8a3-19f87e5980d8-scripts\") pod \"ceilometer-0\" (UID: \"0074c6f2-5d03-406d-a8a3-19f87e5980d8\") " pod="openstack/ceilometer-0" Dec 11 08:37:19 crc kubenswrapper[4881]: I1211 08:37:19.689667 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0074c6f2-5d03-406d-a8a3-19f87e5980d8-log-httpd\") pod \"ceilometer-0\" (UID: \"0074c6f2-5d03-406d-a8a3-19f87e5980d8\") " pod="openstack/ceilometer-0" Dec 11 08:37:19 crc kubenswrapper[4881]: I1211 08:37:19.689773 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0074c6f2-5d03-406d-a8a3-19f87e5980d8-run-httpd\") pod \"ceilometer-0\" (UID: \"0074c6f2-5d03-406d-a8a3-19f87e5980d8\") " pod="openstack/ceilometer-0" Dec 11 08:37:19 crc kubenswrapper[4881]: I1211 08:37:19.689972 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0074c6f2-5d03-406d-a8a3-19f87e5980d8-config-data\") pod \"ceilometer-0\" (UID: \"0074c6f2-5d03-406d-a8a3-19f87e5980d8\") " pod="openstack/ceilometer-0" Dec 11 08:37:19 crc kubenswrapper[4881]: I1211 08:37:19.690031 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nsqms\" (UniqueName: \"kubernetes.io/projected/0074c6f2-5d03-406d-a8a3-19f87e5980d8-kube-api-access-nsqms\") pod \"ceilometer-0\" (UID: \"0074c6f2-5d03-406d-a8a3-19f87e5980d8\") " pod="openstack/ceilometer-0" Dec 11 08:37:19 crc kubenswrapper[4881]: I1211 08:37:19.690101 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/0074c6f2-5d03-406d-a8a3-19f87e5980d8-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"0074c6f2-5d03-406d-a8a3-19f87e5980d8\") " pod="openstack/ceilometer-0" Dec 11 08:37:19 crc kubenswrapper[4881]: I1211 08:37:19.694274 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09761130-7785-4617-b078-ea94c099676a-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "09761130-7785-4617-b078-ea94c099676a" (UID: "09761130-7785-4617-b078-ea94c099676a"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:37:19 crc kubenswrapper[4881]: I1211 08:37:19.701968 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09761130-7785-4617-b078-ea94c099676a-kube-api-access-ghrkf" (OuterVolumeSpecName: "kube-api-access-ghrkf") pod "09761130-7785-4617-b078-ea94c099676a" (UID: "09761130-7785-4617-b078-ea94c099676a"). InnerVolumeSpecName "kube-api-access-ghrkf". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:37:19 crc kubenswrapper[4881]: I1211 08:37:19.702082 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09761130-7785-4617-b078-ea94c099676a-scripts" (OuterVolumeSpecName: "scripts") pod "09761130-7785-4617-b078-ea94c099676a" (UID: "09761130-7785-4617-b078-ea94c099676a"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:37:19 crc kubenswrapper[4881]: I1211 08:37:19.703665 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09761130-7785-4617-b078-ea94c099676a-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "09761130-7785-4617-b078-ea94c099676a" (UID: "09761130-7785-4617-b078-ea94c099676a"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:37:19 crc kubenswrapper[4881]: I1211 08:37:19.722582 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Dec 11 08:37:19 crc kubenswrapper[4881]: I1211 08:37:19.723523 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09761130-7785-4617-b078-ea94c099676a-config-data" (OuterVolumeSpecName: "config-data") pod "09761130-7785-4617-b078-ea94c099676a" (UID: "09761130-7785-4617-b078-ea94c099676a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:37:19 crc kubenswrapper[4881]: I1211 08:37:19.734842 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09761130-7785-4617-b078-ea94c099676a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "09761130-7785-4617-b078-ea94c099676a" (UID: "09761130-7785-4617-b078-ea94c099676a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:37:19 crc kubenswrapper[4881]: W1211 08:37:19.791318 4881 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2dde239c_3502_4b29_8f5d_1893f53819bd.slice/crio-ae456e6ac12e1365bac8ea21e4bbfb88a0c67b634a3e7710e3758cf72bb0f884 WatchSource:0}: Error finding container ae456e6ac12e1365bac8ea21e4bbfb88a0c67b634a3e7710e3758cf72bb0f884: Status 404 returned error can't find the container with id ae456e6ac12e1365bac8ea21e4bbfb88a0c67b634a3e7710e3758cf72bb0f884 Dec 11 08:37:19 crc kubenswrapper[4881]: I1211 08:37:19.791692 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0074c6f2-5d03-406d-a8a3-19f87e5980d8-scripts\") pod \"ceilometer-0\" (UID: \"0074c6f2-5d03-406d-a8a3-19f87e5980d8\") " pod="openstack/ceilometer-0" Dec 11 08:37:19 crc kubenswrapper[4881]: I1211 08:37:19.791748 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0074c6f2-5d03-406d-a8a3-19f87e5980d8-log-httpd\") pod \"ceilometer-0\" (UID: \"0074c6f2-5d03-406d-a8a3-19f87e5980d8\") " pod="openstack/ceilometer-0" Dec 11 08:37:19 crc kubenswrapper[4881]: I1211 08:37:19.791805 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0074c6f2-5d03-406d-a8a3-19f87e5980d8-run-httpd\") pod \"ceilometer-0\" (UID: \"0074c6f2-5d03-406d-a8a3-19f87e5980d8\") " pod="openstack/ceilometer-0" Dec 11 08:37:19 crc kubenswrapper[4881]: I1211 08:37:19.791897 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0074c6f2-5d03-406d-a8a3-19f87e5980d8-config-data\") pod \"ceilometer-0\" (UID: \"0074c6f2-5d03-406d-a8a3-19f87e5980d8\") " pod="openstack/ceilometer-0" Dec 11 08:37:19 crc kubenswrapper[4881]: I1211 08:37:19.791935 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nsqms\" (UniqueName: \"kubernetes.io/projected/0074c6f2-5d03-406d-a8a3-19f87e5980d8-kube-api-access-nsqms\") pod \"ceilometer-0\" (UID: \"0074c6f2-5d03-406d-a8a3-19f87e5980d8\") " pod="openstack/ceilometer-0" Dec 11 08:37:19 crc kubenswrapper[4881]: I1211 08:37:19.791976 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/0074c6f2-5d03-406d-a8a3-19f87e5980d8-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"0074c6f2-5d03-406d-a8a3-19f87e5980d8\") " pod="openstack/ceilometer-0" Dec 11 08:37:19 crc kubenswrapper[4881]: I1211 08:37:19.792052 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0074c6f2-5d03-406d-a8a3-19f87e5980d8-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"0074c6f2-5d03-406d-a8a3-19f87e5980d8\") " pod="openstack/ceilometer-0" Dec 11 08:37:19 crc kubenswrapper[4881]: I1211 08:37:19.792120 4881 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/09761130-7785-4617-b078-ea94c099676a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 11 08:37:19 crc kubenswrapper[4881]: I1211 08:37:19.792136 4881 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/09761130-7785-4617-b078-ea94c099676a-credential-keys\") on node \"crc\" DevicePath \"\"" Dec 11 08:37:19 crc kubenswrapper[4881]: I1211 08:37:19.792148 4881 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/09761130-7785-4617-b078-ea94c099676a-config-data\") on node \"crc\" DevicePath \"\"" Dec 11 08:37:19 crc kubenswrapper[4881]: I1211 08:37:19.792159 4881 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/09761130-7785-4617-b078-ea94c099676a-scripts\") on node \"crc\" DevicePath \"\"" Dec 11 08:37:19 crc kubenswrapper[4881]: I1211 08:37:19.792170 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ghrkf\" (UniqueName: \"kubernetes.io/projected/09761130-7785-4617-b078-ea94c099676a-kube-api-access-ghrkf\") on node \"crc\" DevicePath \"\"" Dec 11 08:37:19 crc kubenswrapper[4881]: I1211 08:37:19.792182 4881 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/09761130-7785-4617-b078-ea94c099676a-fernet-keys\") on node \"crc\" DevicePath \"\"" Dec 11 08:37:19 crc kubenswrapper[4881]: I1211 08:37:19.792666 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0074c6f2-5d03-406d-a8a3-19f87e5980d8-run-httpd\") pod \"ceilometer-0\" (UID: \"0074c6f2-5d03-406d-a8a3-19f87e5980d8\") " pod="openstack/ceilometer-0" Dec 11 08:37:19 crc kubenswrapper[4881]: I1211 08:37:19.800129 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0074c6f2-5d03-406d-a8a3-19f87e5980d8-scripts\") pod \"ceilometer-0\" (UID: \"0074c6f2-5d03-406d-a8a3-19f87e5980d8\") " pod="openstack/ceilometer-0" Dec 11 08:37:19 crc kubenswrapper[4881]: I1211 08:37:19.800776 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0074c6f2-5d03-406d-a8a3-19f87e5980d8-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"0074c6f2-5d03-406d-a8a3-19f87e5980d8\") " pod="openstack/ceilometer-0" Dec 11 08:37:19 crc kubenswrapper[4881]: I1211 08:37:19.801280 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0074c6f2-5d03-406d-a8a3-19f87e5980d8-log-httpd\") pod \"ceilometer-0\" (UID: \"0074c6f2-5d03-406d-a8a3-19f87e5980d8\") " pod="openstack/ceilometer-0" Dec 11 08:37:19 crc kubenswrapper[4881]: I1211 08:37:19.814824 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/0074c6f2-5d03-406d-a8a3-19f87e5980d8-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"0074c6f2-5d03-406d-a8a3-19f87e5980d8\") " pod="openstack/ceilometer-0" Dec 11 08:37:19 crc kubenswrapper[4881]: I1211 08:37:19.817539 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nsqms\" (UniqueName: \"kubernetes.io/projected/0074c6f2-5d03-406d-a8a3-19f87e5980d8-kube-api-access-nsqms\") pod \"ceilometer-0\" (UID: \"0074c6f2-5d03-406d-a8a3-19f87e5980d8\") " pod="openstack/ceilometer-0" Dec 11 08:37:19 crc kubenswrapper[4881]: I1211 08:37:19.826357 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0074c6f2-5d03-406d-a8a3-19f87e5980d8-config-data\") pod \"ceilometer-0\" (UID: \"0074c6f2-5d03-406d-a8a3-19f87e5980d8\") " pod="openstack/ceilometer-0" Dec 11 08:37:19 crc kubenswrapper[4881]: I1211 08:37:19.830276 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-db-sync-jvkz7"] Dec 11 08:37:19 crc kubenswrapper[4881]: I1211 08:37:19.982254 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-vmjdf" event={"ID":"422faa6a-f2ed-4015-87cd-7878bac246e4","Type":"ContainerStarted","Data":"4dff55711a9ea928a36047afb2d3ccf0107f427a994b77b9be3b15a05784469e"} Dec 11 08:37:19 crc kubenswrapper[4881]: I1211 08:37:19.985607 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-bcjrt" event={"ID":"cc1ec075-9e84-4cfd-9f5a-b29d5af0d610","Type":"ContainerStarted","Data":"5a7e5ca3250301990b5064d6d2a24967ea49f54786fa1b754c2659ead514f72f"} Dec 11 08:37:19 crc kubenswrapper[4881]: I1211 08:37:19.987745 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-vrsnd" Dec 11 08:37:19 crc kubenswrapper[4881]: I1211 08:37:19.987898 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-vrsnd" event={"ID":"09761130-7785-4617-b078-ea94c099676a","Type":"ContainerDied","Data":"536e1cf2200e922961c97ce57ed7f2e215e8235c81203aa0de42adb0f7f32950"} Dec 11 08:37:19 crc kubenswrapper[4881]: I1211 08:37:19.987923 4881 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="536e1cf2200e922961c97ce57ed7f2e215e8235c81203aa0de42adb0f7f32950" Dec 11 08:37:19 crc kubenswrapper[4881]: I1211 08:37:19.990699 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-sync-jvkz7" event={"ID":"2dde239c-3502-4b29-8f5d-1893f53819bd","Type":"ContainerStarted","Data":"ae456e6ac12e1365bac8ea21e4bbfb88a0c67b634a3e7710e3758cf72bb0f884"} Dec 11 08:37:19 crc kubenswrapper[4881]: I1211 08:37:19.991514 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 11 08:37:20 crc kubenswrapper[4881]: I1211 08:37:20.146920 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-bootstrap-vrsnd"] Dec 11 08:37:20 crc kubenswrapper[4881]: I1211 08:37:20.226873 4881 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-bootstrap-vrsnd"] Dec 11 08:37:20 crc kubenswrapper[4881]: I1211 08:37:20.290012 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mysqld-exporter-0"] Dec 11 08:37:20 crc kubenswrapper[4881]: I1211 08:37:20.332031 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-bootstrap-p9qkf"] Dec 11 08:37:20 crc kubenswrapper[4881]: I1211 08:37:20.333691 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-p9qkf" Dec 11 08:37:20 crc kubenswrapper[4881]: I1211 08:37:20.337221 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Dec 11 08:37:20 crc kubenswrapper[4881]: I1211 08:37:20.338215 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Dec 11 08:37:20 crc kubenswrapper[4881]: I1211 08:37:20.338425 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-9vkph" Dec 11 08:37:20 crc kubenswrapper[4881]: I1211 08:37:20.338571 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Dec 11 08:37:20 crc kubenswrapper[4881]: I1211 08:37:20.342259 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-p9qkf"] Dec 11 08:37:20 crc kubenswrapper[4881]: I1211 08:37:20.353946 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-sync-rhqlf"] Dec 11 08:37:20 crc kubenswrapper[4881]: I1211 08:37:20.421828 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5e8aab09-6d6f-4c67-965c-38b8dd3bb7ce-config-data\") pod \"keystone-bootstrap-p9qkf\" (UID: \"5e8aab09-6d6f-4c67-965c-38b8dd3bb7ce\") " pod="openstack/keystone-bootstrap-p9qkf" Dec 11 08:37:20 crc kubenswrapper[4881]: I1211 08:37:20.422095 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/5e8aab09-6d6f-4c67-965c-38b8dd3bb7ce-credential-keys\") pod \"keystone-bootstrap-p9qkf\" (UID: \"5e8aab09-6d6f-4c67-965c-38b8dd3bb7ce\") " pod="openstack/keystone-bootstrap-p9qkf" Dec 11 08:37:20 crc kubenswrapper[4881]: I1211 08:37:20.422242 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/5e8aab09-6d6f-4c67-965c-38b8dd3bb7ce-fernet-keys\") pod \"keystone-bootstrap-p9qkf\" (UID: \"5e8aab09-6d6f-4c67-965c-38b8dd3bb7ce\") " pod="openstack/keystone-bootstrap-p9qkf" Dec 11 08:37:20 crc kubenswrapper[4881]: I1211 08:37:20.422395 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5e8aab09-6d6f-4c67-965c-38b8dd3bb7ce-combined-ca-bundle\") pod \"keystone-bootstrap-p9qkf\" (UID: \"5e8aab09-6d6f-4c67-965c-38b8dd3bb7ce\") " pod="openstack/keystone-bootstrap-p9qkf" Dec 11 08:37:20 crc kubenswrapper[4881]: I1211 08:37:20.422488 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6r7ld\" (UniqueName: \"kubernetes.io/projected/5e8aab09-6d6f-4c67-965c-38b8dd3bb7ce-kube-api-access-6r7ld\") pod \"keystone-bootstrap-p9qkf\" (UID: \"5e8aab09-6d6f-4c67-965c-38b8dd3bb7ce\") " pod="openstack/keystone-bootstrap-p9qkf" Dec 11 08:37:20 crc kubenswrapper[4881]: I1211 08:37:20.422603 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5e8aab09-6d6f-4c67-965c-38b8dd3bb7ce-scripts\") pod \"keystone-bootstrap-p9qkf\" (UID: \"5e8aab09-6d6f-4c67-965c-38b8dd3bb7ce\") " pod="openstack/keystone-bootstrap-p9qkf" Dec 11 08:37:20 crc kubenswrapper[4881]: I1211 08:37:20.524844 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/5e8aab09-6d6f-4c67-965c-38b8dd3bb7ce-fernet-keys\") pod \"keystone-bootstrap-p9qkf\" (UID: \"5e8aab09-6d6f-4c67-965c-38b8dd3bb7ce\") " pod="openstack/keystone-bootstrap-p9qkf" Dec 11 08:37:20 crc kubenswrapper[4881]: I1211 08:37:20.524950 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5e8aab09-6d6f-4c67-965c-38b8dd3bb7ce-combined-ca-bundle\") pod \"keystone-bootstrap-p9qkf\" (UID: \"5e8aab09-6d6f-4c67-965c-38b8dd3bb7ce\") " pod="openstack/keystone-bootstrap-p9qkf" Dec 11 08:37:20 crc kubenswrapper[4881]: I1211 08:37:20.524976 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6r7ld\" (UniqueName: \"kubernetes.io/projected/5e8aab09-6d6f-4c67-965c-38b8dd3bb7ce-kube-api-access-6r7ld\") pod \"keystone-bootstrap-p9qkf\" (UID: \"5e8aab09-6d6f-4c67-965c-38b8dd3bb7ce\") " pod="openstack/keystone-bootstrap-p9qkf" Dec 11 08:37:20 crc kubenswrapper[4881]: I1211 08:37:20.525019 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5e8aab09-6d6f-4c67-965c-38b8dd3bb7ce-scripts\") pod \"keystone-bootstrap-p9qkf\" (UID: \"5e8aab09-6d6f-4c67-965c-38b8dd3bb7ce\") " pod="openstack/keystone-bootstrap-p9qkf" Dec 11 08:37:20 crc kubenswrapper[4881]: I1211 08:37:20.525236 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5e8aab09-6d6f-4c67-965c-38b8dd3bb7ce-config-data\") pod \"keystone-bootstrap-p9qkf\" (UID: \"5e8aab09-6d6f-4c67-965c-38b8dd3bb7ce\") " pod="openstack/keystone-bootstrap-p9qkf" Dec 11 08:37:20 crc kubenswrapper[4881]: I1211 08:37:20.525270 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/5e8aab09-6d6f-4c67-965c-38b8dd3bb7ce-credential-keys\") pod \"keystone-bootstrap-p9qkf\" (UID: \"5e8aab09-6d6f-4c67-965c-38b8dd3bb7ce\") " pod="openstack/keystone-bootstrap-p9qkf" Dec 11 08:37:20 crc kubenswrapper[4881]: I1211 08:37:20.529127 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5e8aab09-6d6f-4c67-965c-38b8dd3bb7ce-scripts\") pod \"keystone-bootstrap-p9qkf\" (UID: \"5e8aab09-6d6f-4c67-965c-38b8dd3bb7ce\") " pod="openstack/keystone-bootstrap-p9qkf" Dec 11 08:37:20 crc kubenswrapper[4881]: I1211 08:37:20.529879 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/5e8aab09-6d6f-4c67-965c-38b8dd3bb7ce-fernet-keys\") pod \"keystone-bootstrap-p9qkf\" (UID: \"5e8aab09-6d6f-4c67-965c-38b8dd3bb7ce\") " pod="openstack/keystone-bootstrap-p9qkf" Dec 11 08:37:20 crc kubenswrapper[4881]: I1211 08:37:20.530136 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5e8aab09-6d6f-4c67-965c-38b8dd3bb7ce-config-data\") pod \"keystone-bootstrap-p9qkf\" (UID: \"5e8aab09-6d6f-4c67-965c-38b8dd3bb7ce\") " pod="openstack/keystone-bootstrap-p9qkf" Dec 11 08:37:20 crc kubenswrapper[4881]: I1211 08:37:20.545916 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5e8aab09-6d6f-4c67-965c-38b8dd3bb7ce-combined-ca-bundle\") pod \"keystone-bootstrap-p9qkf\" (UID: \"5e8aab09-6d6f-4c67-965c-38b8dd3bb7ce\") " pod="openstack/keystone-bootstrap-p9qkf" Dec 11 08:37:20 crc kubenswrapper[4881]: I1211 08:37:20.548201 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6r7ld\" (UniqueName: \"kubernetes.io/projected/5e8aab09-6d6f-4c67-965c-38b8dd3bb7ce-kube-api-access-6r7ld\") pod \"keystone-bootstrap-p9qkf\" (UID: \"5e8aab09-6d6f-4c67-965c-38b8dd3bb7ce\") " pod="openstack/keystone-bootstrap-p9qkf" Dec 11 08:37:20 crc kubenswrapper[4881]: I1211 08:37:20.552500 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/5e8aab09-6d6f-4c67-965c-38b8dd3bb7ce-credential-keys\") pod \"keystone-bootstrap-p9qkf\" (UID: \"5e8aab09-6d6f-4c67-965c-38b8dd3bb7ce\") " pod="openstack/keystone-bootstrap-p9qkf" Dec 11 08:37:20 crc kubenswrapper[4881]: I1211 08:37:20.660557 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-p9qkf" Dec 11 08:37:20 crc kubenswrapper[4881]: I1211 08:37:20.694029 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Dec 11 08:37:21 crc kubenswrapper[4881]: I1211 08:37:21.021697 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09761130-7785-4617-b078-ea94c099676a" path="/var/lib/kubelet/pods/09761130-7785-4617-b078-ea94c099676a/volumes" Dec 11 08:37:21 crc kubenswrapper[4881]: I1211 08:37:21.023315 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-rhqlf" event={"ID":"c45356cd-f35d-41fa-98d3-6697e7a8100a","Type":"ContainerStarted","Data":"3845b4845598a018e7d54d3a688631756bdb219853a748ec1617f3155be66256"} Dec 11 08:37:21 crc kubenswrapper[4881]: I1211 08:37:21.023466 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-rhqlf" event={"ID":"c45356cd-f35d-41fa-98d3-6697e7a8100a","Type":"ContainerStarted","Data":"e596a8ff36255df8134f4dd68ca41fb09a0db49062f3d114624d88a1323570d7"} Dec 11 08:37:21 crc kubenswrapper[4881]: I1211 08:37:21.024389 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-0" event={"ID":"3dca57af-8220-449b-a5fc-8001bcc8c180","Type":"ContainerStarted","Data":"4ccc156b3fed1a049ca01fe859b4b141a69afe251293b4611698658c47764f88"} Dec 11 08:37:21 crc kubenswrapper[4881]: I1211 08:37:21.026083 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0074c6f2-5d03-406d-a8a3-19f87e5980d8","Type":"ContainerStarted","Data":"d8fdb909bec2b926989722e08fd72d701faa5ebc6cca3a579809322ea84ecce9"} Dec 11 08:37:21 crc kubenswrapper[4881]: I1211 08:37:21.049666 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-db-sync-rhqlf" podStartSLOduration=2.049643483 podStartE2EDuration="2.049643483s" podCreationTimestamp="2025-12-11 08:37:19 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 08:37:21.039232282 +0000 UTC m=+1289.416600979" watchObservedRunningTime="2025-12-11 08:37:21.049643483 +0000 UTC m=+1289.427012190" Dec 11 08:37:21 crc kubenswrapper[4881]: I1211 08:37:21.208039 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-p9qkf"] Dec 11 08:37:21 crc kubenswrapper[4881]: W1211 08:37:21.220599 4881 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod5e8aab09_6d6f_4c67_965c_38b8dd3bb7ce.slice/crio-07c511b037163dac794c71d4ee8742054bc8a9376deda13aedf60e35cc46efdb WatchSource:0}: Error finding container 07c511b037163dac794c71d4ee8742054bc8a9376deda13aedf60e35cc46efdb: Status 404 returned error can't find the container with id 07c511b037163dac794c71d4ee8742054bc8a9376deda13aedf60e35cc46efdb Dec 11 08:37:22 crc kubenswrapper[4881]: I1211 08:37:22.038610 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-p9qkf" event={"ID":"5e8aab09-6d6f-4c67-965c-38b8dd3bb7ce","Type":"ContainerStarted","Data":"07c511b037163dac794c71d4ee8742054bc8a9376deda13aedf60e35cc46efdb"} Dec 11 08:37:22 crc kubenswrapper[4881]: I1211 08:37:22.799974 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-76fcf4b695-628mp" Dec 11 08:37:22 crc kubenswrapper[4881]: I1211 08:37:22.865224 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-77585f5f8c-s775q"] Dec 11 08:37:22 crc kubenswrapper[4881]: I1211 08:37:22.865581 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-77585f5f8c-s775q" podUID="2793ce53-41a4-4170-998f-b4ddcd0dbcaa" containerName="dnsmasq-dns" containerID="cri-o://88e3d3f95b7e7011258611183d8dc80e67bbafc9ef51ce2a0cfde6734d7ee085" gracePeriod=10 Dec 11 08:37:23 crc kubenswrapper[4881]: I1211 08:37:23.120977 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-p9qkf" event={"ID":"5e8aab09-6d6f-4c67-965c-38b8dd3bb7ce","Type":"ContainerStarted","Data":"8a6c34b4eb13c9b973c44a7403af19f6140199990c444843f4f0330be7b4fc30"} Dec 11 08:37:23 crc kubenswrapper[4881]: I1211 08:37:23.136863 4881 generic.go:334] "Generic (PLEG): container finished" podID="2793ce53-41a4-4170-998f-b4ddcd0dbcaa" containerID="88e3d3f95b7e7011258611183d8dc80e67bbafc9ef51ce2a0cfde6734d7ee085" exitCode=0 Dec 11 08:37:23 crc kubenswrapper[4881]: I1211 08:37:23.136932 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-77585f5f8c-s775q" event={"ID":"2793ce53-41a4-4170-998f-b4ddcd0dbcaa","Type":"ContainerDied","Data":"88e3d3f95b7e7011258611183d8dc80e67bbafc9ef51ce2a0cfde6734d7ee085"} Dec 11 08:37:23 crc kubenswrapper[4881]: I1211 08:37:23.140231 4881 generic.go:334] "Generic (PLEG): container finished" podID="4406be5c-0e4c-40ff-ac0f-4da87b36b145" containerID="f2fda50fe0522e806ea7b78ac99013d0af1df7ba3435818f78881969a91abf57" exitCode=0 Dec 11 08:37:23 crc kubenswrapper[4881]: I1211 08:37:23.150573 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-cb9l4" event={"ID":"4406be5c-0e4c-40ff-ac0f-4da87b36b145","Type":"ContainerDied","Data":"f2fda50fe0522e806ea7b78ac99013d0af1df7ba3435818f78881969a91abf57"} Dec 11 08:37:23 crc kubenswrapper[4881]: I1211 08:37:23.224129 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-bootstrap-p9qkf" podStartSLOduration=3.224109367 podStartE2EDuration="3.224109367s" podCreationTimestamp="2025-12-11 08:37:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 08:37:23.193792578 +0000 UTC m=+1291.571161275" watchObservedRunningTime="2025-12-11 08:37:23.224109367 +0000 UTC m=+1291.601478064" Dec 11 08:37:24 crc kubenswrapper[4881]: I1211 08:37:24.159823 4881 generic.go:334] "Generic (PLEG): container finished" podID="b515a685-da3e-4d92-a8e5-60561e9de83f" containerID="3819c06d346a851494a0f572fc73842482075ee7932c7739229c872cd48f1d46" exitCode=0 Dec 11 08:37:24 crc kubenswrapper[4881]: I1211 08:37:24.159910 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"b515a685-da3e-4d92-a8e5-60561e9de83f","Type":"ContainerDied","Data":"3819c06d346a851494a0f572fc73842482075ee7932c7739229c872cd48f1d46"} Dec 11 08:37:25 crc kubenswrapper[4881]: I1211 08:37:25.173767 4881 generic.go:334] "Generic (PLEG): container finished" podID="e87c3b7f-ebb4-4f97-bf3b-df5869f3c1ec" containerID="848fe4d3938e93638f3177a44497a800500651736f73ccaf195458f2ef728205" exitCode=0 Dec 11 08:37:25 crc kubenswrapper[4881]: I1211 08:37:25.173870 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-lx525" event={"ID":"e87c3b7f-ebb4-4f97-bf3b-df5869f3c1ec","Type":"ContainerDied","Data":"848fe4d3938e93638f3177a44497a800500651736f73ccaf195458f2ef728205"} Dec 11 08:37:28 crc kubenswrapper[4881]: I1211 08:37:28.204984 4881 generic.go:334] "Generic (PLEG): container finished" podID="5e8aab09-6d6f-4c67-965c-38b8dd3bb7ce" containerID="8a6c34b4eb13c9b973c44a7403af19f6140199990c444843f4f0330be7b4fc30" exitCode=0 Dec 11 08:37:28 crc kubenswrapper[4881]: I1211 08:37:28.205069 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-p9qkf" event={"ID":"5e8aab09-6d6f-4c67-965c-38b8dd3bb7ce","Type":"ContainerDied","Data":"8a6c34b4eb13c9b973c44a7403af19f6140199990c444843f4f0330be7b4fc30"} Dec 11 08:37:29 crc kubenswrapper[4881]: I1211 08:37:29.470382 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-lx525" Dec 11 08:37:29 crc kubenswrapper[4881]: I1211 08:37:29.573783 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/e87c3b7f-ebb4-4f97-bf3b-df5869f3c1ec-db-sync-config-data\") pod \"e87c3b7f-ebb4-4f97-bf3b-df5869f3c1ec\" (UID: \"e87c3b7f-ebb4-4f97-bf3b-df5869f3c1ec\") " Dec 11 08:37:29 crc kubenswrapper[4881]: I1211 08:37:29.573922 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-k4k68\" (UniqueName: \"kubernetes.io/projected/e87c3b7f-ebb4-4f97-bf3b-df5869f3c1ec-kube-api-access-k4k68\") pod \"e87c3b7f-ebb4-4f97-bf3b-df5869f3c1ec\" (UID: \"e87c3b7f-ebb4-4f97-bf3b-df5869f3c1ec\") " Dec 11 08:37:29 crc kubenswrapper[4881]: I1211 08:37:29.574002 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e87c3b7f-ebb4-4f97-bf3b-df5869f3c1ec-combined-ca-bundle\") pod \"e87c3b7f-ebb4-4f97-bf3b-df5869f3c1ec\" (UID: \"e87c3b7f-ebb4-4f97-bf3b-df5869f3c1ec\") " Dec 11 08:37:29 crc kubenswrapper[4881]: I1211 08:37:29.574496 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e87c3b7f-ebb4-4f97-bf3b-df5869f3c1ec-config-data\") pod \"e87c3b7f-ebb4-4f97-bf3b-df5869f3c1ec\" (UID: \"e87c3b7f-ebb4-4f97-bf3b-df5869f3c1ec\") " Dec 11 08:37:29 crc kubenswrapper[4881]: I1211 08:37:29.579980 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e87c3b7f-ebb4-4f97-bf3b-df5869f3c1ec-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "e87c3b7f-ebb4-4f97-bf3b-df5869f3c1ec" (UID: "e87c3b7f-ebb4-4f97-bf3b-df5869f3c1ec"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:37:29 crc kubenswrapper[4881]: I1211 08:37:29.580390 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e87c3b7f-ebb4-4f97-bf3b-df5869f3c1ec-kube-api-access-k4k68" (OuterVolumeSpecName: "kube-api-access-k4k68") pod "e87c3b7f-ebb4-4f97-bf3b-df5869f3c1ec" (UID: "e87c3b7f-ebb4-4f97-bf3b-df5869f3c1ec"). InnerVolumeSpecName "kube-api-access-k4k68". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:37:29 crc kubenswrapper[4881]: I1211 08:37:29.615300 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e87c3b7f-ebb4-4f97-bf3b-df5869f3c1ec-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "e87c3b7f-ebb4-4f97-bf3b-df5869f3c1ec" (UID: "e87c3b7f-ebb4-4f97-bf3b-df5869f3c1ec"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:37:29 crc kubenswrapper[4881]: I1211 08:37:29.631031 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e87c3b7f-ebb4-4f97-bf3b-df5869f3c1ec-config-data" (OuterVolumeSpecName: "config-data") pod "e87c3b7f-ebb4-4f97-bf3b-df5869f3c1ec" (UID: "e87c3b7f-ebb4-4f97-bf3b-df5869f3c1ec"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:37:29 crc kubenswrapper[4881]: I1211 08:37:29.677315 4881 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e87c3b7f-ebb4-4f97-bf3b-df5869f3c1ec-config-data\") on node \"crc\" DevicePath \"\"" Dec 11 08:37:29 crc kubenswrapper[4881]: I1211 08:37:29.677358 4881 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/e87c3b7f-ebb4-4f97-bf3b-df5869f3c1ec-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Dec 11 08:37:29 crc kubenswrapper[4881]: I1211 08:37:29.677371 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-k4k68\" (UniqueName: \"kubernetes.io/projected/e87c3b7f-ebb4-4f97-bf3b-df5869f3c1ec-kube-api-access-k4k68\") on node \"crc\" DevicePath \"\"" Dec 11 08:37:29 crc kubenswrapper[4881]: I1211 08:37:29.677379 4881 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e87c3b7f-ebb4-4f97-bf3b-df5869f3c1ec-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 11 08:37:30 crc kubenswrapper[4881]: I1211 08:37:30.261310 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-lx525" event={"ID":"e87c3b7f-ebb4-4f97-bf3b-df5869f3c1ec","Type":"ContainerDied","Data":"037dea28846af65979d310752ff1396dc248b72cb1422c85838ffc7bac3b7267"} Dec 11 08:37:30 crc kubenswrapper[4881]: I1211 08:37:30.261382 4881 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="037dea28846af65979d310752ff1396dc248b72cb1422c85838ffc7bac3b7267" Dec 11 08:37:30 crc kubenswrapper[4881]: I1211 08:37:30.261458 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-lx525" Dec 11 08:37:30 crc kubenswrapper[4881]: E1211 08:37:30.532952 4881 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode87c3b7f_ebb4_4f97_bf3b_df5869f3c1ec.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode87c3b7f_ebb4_4f97_bf3b_df5869f3c1ec.slice/crio-037dea28846af65979d310752ff1396dc248b72cb1422c85838ffc7bac3b7267\": RecentStats: unable to find data in memory cache]" Dec 11 08:37:30 crc kubenswrapper[4881]: I1211 08:37:30.925956 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-8b5c85b87-lwp6f"] Dec 11 08:37:30 crc kubenswrapper[4881]: E1211 08:37:30.926588 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e87c3b7f-ebb4-4f97-bf3b-df5869f3c1ec" containerName="glance-db-sync" Dec 11 08:37:30 crc kubenswrapper[4881]: I1211 08:37:30.926612 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="e87c3b7f-ebb4-4f97-bf3b-df5869f3c1ec" containerName="glance-db-sync" Dec 11 08:37:30 crc kubenswrapper[4881]: I1211 08:37:30.926846 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="e87c3b7f-ebb4-4f97-bf3b-df5869f3c1ec" containerName="glance-db-sync" Dec 11 08:37:30 crc kubenswrapper[4881]: I1211 08:37:30.928205 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8b5c85b87-lwp6f" Dec 11 08:37:30 crc kubenswrapper[4881]: I1211 08:37:30.950171 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-8b5c85b87-lwp6f"] Dec 11 08:37:31 crc kubenswrapper[4881]: I1211 08:37:31.018802 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-25fhz\" (UniqueName: \"kubernetes.io/projected/0fe972c7-ac62-43a9-87b0-17fb27c4c2c9-kube-api-access-25fhz\") pod \"dnsmasq-dns-8b5c85b87-lwp6f\" (UID: \"0fe972c7-ac62-43a9-87b0-17fb27c4c2c9\") " pod="openstack/dnsmasq-dns-8b5c85b87-lwp6f" Dec 11 08:37:31 crc kubenswrapper[4881]: I1211 08:37:31.018885 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0fe972c7-ac62-43a9-87b0-17fb27c4c2c9-dns-svc\") pod \"dnsmasq-dns-8b5c85b87-lwp6f\" (UID: \"0fe972c7-ac62-43a9-87b0-17fb27c4c2c9\") " pod="openstack/dnsmasq-dns-8b5c85b87-lwp6f" Dec 11 08:37:31 crc kubenswrapper[4881]: I1211 08:37:31.018950 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0fe972c7-ac62-43a9-87b0-17fb27c4c2c9-config\") pod \"dnsmasq-dns-8b5c85b87-lwp6f\" (UID: \"0fe972c7-ac62-43a9-87b0-17fb27c4c2c9\") " pod="openstack/dnsmasq-dns-8b5c85b87-lwp6f" Dec 11 08:37:31 crc kubenswrapper[4881]: I1211 08:37:31.018993 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/0fe972c7-ac62-43a9-87b0-17fb27c4c2c9-ovsdbserver-nb\") pod \"dnsmasq-dns-8b5c85b87-lwp6f\" (UID: \"0fe972c7-ac62-43a9-87b0-17fb27c4c2c9\") " pod="openstack/dnsmasq-dns-8b5c85b87-lwp6f" Dec 11 08:37:31 crc kubenswrapper[4881]: I1211 08:37:31.019030 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/0fe972c7-ac62-43a9-87b0-17fb27c4c2c9-ovsdbserver-sb\") pod \"dnsmasq-dns-8b5c85b87-lwp6f\" (UID: \"0fe972c7-ac62-43a9-87b0-17fb27c4c2c9\") " pod="openstack/dnsmasq-dns-8b5c85b87-lwp6f" Dec 11 08:37:31 crc kubenswrapper[4881]: I1211 08:37:31.019109 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/0fe972c7-ac62-43a9-87b0-17fb27c4c2c9-dns-swift-storage-0\") pod \"dnsmasq-dns-8b5c85b87-lwp6f\" (UID: \"0fe972c7-ac62-43a9-87b0-17fb27c4c2c9\") " pod="openstack/dnsmasq-dns-8b5c85b87-lwp6f" Dec 11 08:37:31 crc kubenswrapper[4881]: I1211 08:37:31.120624 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-25fhz\" (UniqueName: \"kubernetes.io/projected/0fe972c7-ac62-43a9-87b0-17fb27c4c2c9-kube-api-access-25fhz\") pod \"dnsmasq-dns-8b5c85b87-lwp6f\" (UID: \"0fe972c7-ac62-43a9-87b0-17fb27c4c2c9\") " pod="openstack/dnsmasq-dns-8b5c85b87-lwp6f" Dec 11 08:37:31 crc kubenswrapper[4881]: I1211 08:37:31.120957 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0fe972c7-ac62-43a9-87b0-17fb27c4c2c9-dns-svc\") pod \"dnsmasq-dns-8b5c85b87-lwp6f\" (UID: \"0fe972c7-ac62-43a9-87b0-17fb27c4c2c9\") " pod="openstack/dnsmasq-dns-8b5c85b87-lwp6f" Dec 11 08:37:31 crc kubenswrapper[4881]: I1211 08:37:31.121060 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0fe972c7-ac62-43a9-87b0-17fb27c4c2c9-config\") pod \"dnsmasq-dns-8b5c85b87-lwp6f\" (UID: \"0fe972c7-ac62-43a9-87b0-17fb27c4c2c9\") " pod="openstack/dnsmasq-dns-8b5c85b87-lwp6f" Dec 11 08:37:31 crc kubenswrapper[4881]: I1211 08:37:31.121166 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/0fe972c7-ac62-43a9-87b0-17fb27c4c2c9-ovsdbserver-nb\") pod \"dnsmasq-dns-8b5c85b87-lwp6f\" (UID: \"0fe972c7-ac62-43a9-87b0-17fb27c4c2c9\") " pod="openstack/dnsmasq-dns-8b5c85b87-lwp6f" Dec 11 08:37:31 crc kubenswrapper[4881]: I1211 08:37:31.121249 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/0fe972c7-ac62-43a9-87b0-17fb27c4c2c9-ovsdbserver-sb\") pod \"dnsmasq-dns-8b5c85b87-lwp6f\" (UID: \"0fe972c7-ac62-43a9-87b0-17fb27c4c2c9\") " pod="openstack/dnsmasq-dns-8b5c85b87-lwp6f" Dec 11 08:37:31 crc kubenswrapper[4881]: I1211 08:37:31.121407 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/0fe972c7-ac62-43a9-87b0-17fb27c4c2c9-dns-swift-storage-0\") pod \"dnsmasq-dns-8b5c85b87-lwp6f\" (UID: \"0fe972c7-ac62-43a9-87b0-17fb27c4c2c9\") " pod="openstack/dnsmasq-dns-8b5c85b87-lwp6f" Dec 11 08:37:31 crc kubenswrapper[4881]: I1211 08:37:31.121661 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0fe972c7-ac62-43a9-87b0-17fb27c4c2c9-dns-svc\") pod \"dnsmasq-dns-8b5c85b87-lwp6f\" (UID: \"0fe972c7-ac62-43a9-87b0-17fb27c4c2c9\") " pod="openstack/dnsmasq-dns-8b5c85b87-lwp6f" Dec 11 08:37:31 crc kubenswrapper[4881]: I1211 08:37:31.122219 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/0fe972c7-ac62-43a9-87b0-17fb27c4c2c9-ovsdbserver-nb\") pod \"dnsmasq-dns-8b5c85b87-lwp6f\" (UID: \"0fe972c7-ac62-43a9-87b0-17fb27c4c2c9\") " pod="openstack/dnsmasq-dns-8b5c85b87-lwp6f" Dec 11 08:37:31 crc kubenswrapper[4881]: I1211 08:37:31.122348 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/0fe972c7-ac62-43a9-87b0-17fb27c4c2c9-dns-swift-storage-0\") pod \"dnsmasq-dns-8b5c85b87-lwp6f\" (UID: \"0fe972c7-ac62-43a9-87b0-17fb27c4c2c9\") " pod="openstack/dnsmasq-dns-8b5c85b87-lwp6f" Dec 11 08:37:31 crc kubenswrapper[4881]: I1211 08:37:31.123216 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0fe972c7-ac62-43a9-87b0-17fb27c4c2c9-config\") pod \"dnsmasq-dns-8b5c85b87-lwp6f\" (UID: \"0fe972c7-ac62-43a9-87b0-17fb27c4c2c9\") " pod="openstack/dnsmasq-dns-8b5c85b87-lwp6f" Dec 11 08:37:31 crc kubenswrapper[4881]: I1211 08:37:31.123562 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/0fe972c7-ac62-43a9-87b0-17fb27c4c2c9-ovsdbserver-sb\") pod \"dnsmasq-dns-8b5c85b87-lwp6f\" (UID: \"0fe972c7-ac62-43a9-87b0-17fb27c4c2c9\") " pod="openstack/dnsmasq-dns-8b5c85b87-lwp6f" Dec 11 08:37:31 crc kubenswrapper[4881]: I1211 08:37:31.145464 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-25fhz\" (UniqueName: \"kubernetes.io/projected/0fe972c7-ac62-43a9-87b0-17fb27c4c2c9-kube-api-access-25fhz\") pod \"dnsmasq-dns-8b5c85b87-lwp6f\" (UID: \"0fe972c7-ac62-43a9-87b0-17fb27c4c2c9\") " pod="openstack/dnsmasq-dns-8b5c85b87-lwp6f" Dec 11 08:37:31 crc kubenswrapper[4881]: I1211 08:37:31.284225 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8b5c85b87-lwp6f" Dec 11 08:37:31 crc kubenswrapper[4881]: I1211 08:37:31.826745 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Dec 11 08:37:31 crc kubenswrapper[4881]: I1211 08:37:31.828753 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Dec 11 08:37:31 crc kubenswrapper[4881]: I1211 08:37:31.831259 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-scripts" Dec 11 08:37:31 crc kubenswrapper[4881]: I1211 08:37:31.831936 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-w497b" Dec 11 08:37:31 crc kubenswrapper[4881]: I1211 08:37:31.832178 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Dec 11 08:37:31 crc kubenswrapper[4881]: I1211 08:37:31.844731 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Dec 11 08:37:31 crc kubenswrapper[4881]: I1211 08:37:31.940836 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/99628143-5f10-4ebc-beea-4e33458d3eb3-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"99628143-5f10-4ebc-beea-4e33458d3eb3\") " pod="openstack/glance-default-external-api-0" Dec 11 08:37:31 crc kubenswrapper[4881]: I1211 08:37:31.940880 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/99628143-5f10-4ebc-beea-4e33458d3eb3-config-data\") pod \"glance-default-external-api-0\" (UID: \"99628143-5f10-4ebc-beea-4e33458d3eb3\") " pod="openstack/glance-default-external-api-0" Dec 11 08:37:31 crc kubenswrapper[4881]: I1211 08:37:31.940989 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"glance-default-external-api-0\" (UID: \"99628143-5f10-4ebc-beea-4e33458d3eb3\") " pod="openstack/glance-default-external-api-0" Dec 11 08:37:31 crc kubenswrapper[4881]: I1211 08:37:31.941013 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/99628143-5f10-4ebc-beea-4e33458d3eb3-scripts\") pod \"glance-default-external-api-0\" (UID: \"99628143-5f10-4ebc-beea-4e33458d3eb3\") " pod="openstack/glance-default-external-api-0" Dec 11 08:37:31 crc kubenswrapper[4881]: I1211 08:37:31.941047 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/99628143-5f10-4ebc-beea-4e33458d3eb3-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"99628143-5f10-4ebc-beea-4e33458d3eb3\") " pod="openstack/glance-default-external-api-0" Dec 11 08:37:31 crc kubenswrapper[4881]: I1211 08:37:31.941088 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/99628143-5f10-4ebc-beea-4e33458d3eb3-logs\") pod \"glance-default-external-api-0\" (UID: \"99628143-5f10-4ebc-beea-4e33458d3eb3\") " pod="openstack/glance-default-external-api-0" Dec 11 08:37:31 crc kubenswrapper[4881]: I1211 08:37:31.941199 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xbttd\" (UniqueName: \"kubernetes.io/projected/99628143-5f10-4ebc-beea-4e33458d3eb3-kube-api-access-xbttd\") pod \"glance-default-external-api-0\" (UID: \"99628143-5f10-4ebc-beea-4e33458d3eb3\") " pod="openstack/glance-default-external-api-0" Dec 11 08:37:32 crc kubenswrapper[4881]: I1211 08:37:32.042770 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/99628143-5f10-4ebc-beea-4e33458d3eb3-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"99628143-5f10-4ebc-beea-4e33458d3eb3\") " pod="openstack/glance-default-external-api-0" Dec 11 08:37:32 crc kubenswrapper[4881]: I1211 08:37:32.043092 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/99628143-5f10-4ebc-beea-4e33458d3eb3-config-data\") pod \"glance-default-external-api-0\" (UID: \"99628143-5f10-4ebc-beea-4e33458d3eb3\") " pod="openstack/glance-default-external-api-0" Dec 11 08:37:32 crc kubenswrapper[4881]: I1211 08:37:32.043159 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"glance-default-external-api-0\" (UID: \"99628143-5f10-4ebc-beea-4e33458d3eb3\") " pod="openstack/glance-default-external-api-0" Dec 11 08:37:32 crc kubenswrapper[4881]: I1211 08:37:32.043193 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/99628143-5f10-4ebc-beea-4e33458d3eb3-scripts\") pod \"glance-default-external-api-0\" (UID: \"99628143-5f10-4ebc-beea-4e33458d3eb3\") " pod="openstack/glance-default-external-api-0" Dec 11 08:37:32 crc kubenswrapper[4881]: I1211 08:37:32.043219 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/99628143-5f10-4ebc-beea-4e33458d3eb3-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"99628143-5f10-4ebc-beea-4e33458d3eb3\") " pod="openstack/glance-default-external-api-0" Dec 11 08:37:32 crc kubenswrapper[4881]: I1211 08:37:32.043263 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/99628143-5f10-4ebc-beea-4e33458d3eb3-logs\") pod \"glance-default-external-api-0\" (UID: \"99628143-5f10-4ebc-beea-4e33458d3eb3\") " pod="openstack/glance-default-external-api-0" Dec 11 08:37:32 crc kubenswrapper[4881]: I1211 08:37:32.043387 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xbttd\" (UniqueName: \"kubernetes.io/projected/99628143-5f10-4ebc-beea-4e33458d3eb3-kube-api-access-xbttd\") pod \"glance-default-external-api-0\" (UID: \"99628143-5f10-4ebc-beea-4e33458d3eb3\") " pod="openstack/glance-default-external-api-0" Dec 11 08:37:32 crc kubenswrapper[4881]: I1211 08:37:32.045300 4881 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"glance-default-external-api-0\" (UID: \"99628143-5f10-4ebc-beea-4e33458d3eb3\") device mount path \"/mnt/openstack/pv02\"" pod="openstack/glance-default-external-api-0" Dec 11 08:37:32 crc kubenswrapper[4881]: I1211 08:37:32.048809 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/99628143-5f10-4ebc-beea-4e33458d3eb3-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"99628143-5f10-4ebc-beea-4e33458d3eb3\") " pod="openstack/glance-default-external-api-0" Dec 11 08:37:32 crc kubenswrapper[4881]: I1211 08:37:32.048947 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/99628143-5f10-4ebc-beea-4e33458d3eb3-logs\") pod \"glance-default-external-api-0\" (UID: \"99628143-5f10-4ebc-beea-4e33458d3eb3\") " pod="openstack/glance-default-external-api-0" Dec 11 08:37:32 crc kubenswrapper[4881]: I1211 08:37:32.052395 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/99628143-5f10-4ebc-beea-4e33458d3eb3-scripts\") pod \"glance-default-external-api-0\" (UID: \"99628143-5f10-4ebc-beea-4e33458d3eb3\") " pod="openstack/glance-default-external-api-0" Dec 11 08:37:32 crc kubenswrapper[4881]: I1211 08:37:32.055237 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/99628143-5f10-4ebc-beea-4e33458d3eb3-config-data\") pod \"glance-default-external-api-0\" (UID: \"99628143-5f10-4ebc-beea-4e33458d3eb3\") " pod="openstack/glance-default-external-api-0" Dec 11 08:37:32 crc kubenswrapper[4881]: I1211 08:37:32.069898 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/99628143-5f10-4ebc-beea-4e33458d3eb3-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"99628143-5f10-4ebc-beea-4e33458d3eb3\") " pod="openstack/glance-default-external-api-0" Dec 11 08:37:32 crc kubenswrapper[4881]: I1211 08:37:32.070114 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xbttd\" (UniqueName: \"kubernetes.io/projected/99628143-5f10-4ebc-beea-4e33458d3eb3-kube-api-access-xbttd\") pod \"glance-default-external-api-0\" (UID: \"99628143-5f10-4ebc-beea-4e33458d3eb3\") " pod="openstack/glance-default-external-api-0" Dec 11 08:37:32 crc kubenswrapper[4881]: I1211 08:37:32.074745 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Dec 11 08:37:32 crc kubenswrapper[4881]: I1211 08:37:32.077189 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Dec 11 08:37:32 crc kubenswrapper[4881]: I1211 08:37:32.079709 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Dec 11 08:37:32 crc kubenswrapper[4881]: I1211 08:37:32.097303 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"glance-default-external-api-0\" (UID: \"99628143-5f10-4ebc-beea-4e33458d3eb3\") " pod="openstack/glance-default-external-api-0" Dec 11 08:37:32 crc kubenswrapper[4881]: I1211 08:37:32.105808 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Dec 11 08:37:32 crc kubenswrapper[4881]: I1211 08:37:32.145488 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m9xcv\" (UniqueName: \"kubernetes.io/projected/52860b37-6c04-42e4-ada9-94c9a46f1773-kube-api-access-m9xcv\") pod \"glance-default-internal-api-0\" (UID: \"52860b37-6c04-42e4-ada9-94c9a46f1773\") " pod="openstack/glance-default-internal-api-0" Dec 11 08:37:32 crc kubenswrapper[4881]: I1211 08:37:32.145810 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/52860b37-6c04-42e4-ada9-94c9a46f1773-scripts\") pod \"glance-default-internal-api-0\" (UID: \"52860b37-6c04-42e4-ada9-94c9a46f1773\") " pod="openstack/glance-default-internal-api-0" Dec 11 08:37:32 crc kubenswrapper[4881]: I1211 08:37:32.145859 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/52860b37-6c04-42e4-ada9-94c9a46f1773-config-data\") pod \"glance-default-internal-api-0\" (UID: \"52860b37-6c04-42e4-ada9-94c9a46f1773\") " pod="openstack/glance-default-internal-api-0" Dec 11 08:37:32 crc kubenswrapper[4881]: I1211 08:37:32.145902 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/52860b37-6c04-42e4-ada9-94c9a46f1773-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"52860b37-6c04-42e4-ada9-94c9a46f1773\") " pod="openstack/glance-default-internal-api-0" Dec 11 08:37:32 crc kubenswrapper[4881]: I1211 08:37:32.145965 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/52860b37-6c04-42e4-ada9-94c9a46f1773-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"52860b37-6c04-42e4-ada9-94c9a46f1773\") " pod="openstack/glance-default-internal-api-0" Dec 11 08:37:32 crc kubenswrapper[4881]: I1211 08:37:32.146064 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"glance-default-internal-api-0\" (UID: \"52860b37-6c04-42e4-ada9-94c9a46f1773\") " pod="openstack/glance-default-internal-api-0" Dec 11 08:37:32 crc kubenswrapper[4881]: I1211 08:37:32.146142 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/52860b37-6c04-42e4-ada9-94c9a46f1773-logs\") pod \"glance-default-internal-api-0\" (UID: \"52860b37-6c04-42e4-ada9-94c9a46f1773\") " pod="openstack/glance-default-internal-api-0" Dec 11 08:37:32 crc kubenswrapper[4881]: I1211 08:37:32.156835 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Dec 11 08:37:32 crc kubenswrapper[4881]: I1211 08:37:32.247880 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m9xcv\" (UniqueName: \"kubernetes.io/projected/52860b37-6c04-42e4-ada9-94c9a46f1773-kube-api-access-m9xcv\") pod \"glance-default-internal-api-0\" (UID: \"52860b37-6c04-42e4-ada9-94c9a46f1773\") " pod="openstack/glance-default-internal-api-0" Dec 11 08:37:32 crc kubenswrapper[4881]: I1211 08:37:32.248018 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/52860b37-6c04-42e4-ada9-94c9a46f1773-scripts\") pod \"glance-default-internal-api-0\" (UID: \"52860b37-6c04-42e4-ada9-94c9a46f1773\") " pod="openstack/glance-default-internal-api-0" Dec 11 08:37:32 crc kubenswrapper[4881]: I1211 08:37:32.248049 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/52860b37-6c04-42e4-ada9-94c9a46f1773-config-data\") pod \"glance-default-internal-api-0\" (UID: \"52860b37-6c04-42e4-ada9-94c9a46f1773\") " pod="openstack/glance-default-internal-api-0" Dec 11 08:37:32 crc kubenswrapper[4881]: I1211 08:37:32.248076 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/52860b37-6c04-42e4-ada9-94c9a46f1773-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"52860b37-6c04-42e4-ada9-94c9a46f1773\") " pod="openstack/glance-default-internal-api-0" Dec 11 08:37:32 crc kubenswrapper[4881]: I1211 08:37:32.248102 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/52860b37-6c04-42e4-ada9-94c9a46f1773-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"52860b37-6c04-42e4-ada9-94c9a46f1773\") " pod="openstack/glance-default-internal-api-0" Dec 11 08:37:32 crc kubenswrapper[4881]: I1211 08:37:32.248149 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"glance-default-internal-api-0\" (UID: \"52860b37-6c04-42e4-ada9-94c9a46f1773\") " pod="openstack/glance-default-internal-api-0" Dec 11 08:37:32 crc kubenswrapper[4881]: I1211 08:37:32.248194 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/52860b37-6c04-42e4-ada9-94c9a46f1773-logs\") pod \"glance-default-internal-api-0\" (UID: \"52860b37-6c04-42e4-ada9-94c9a46f1773\") " pod="openstack/glance-default-internal-api-0" Dec 11 08:37:32 crc kubenswrapper[4881]: I1211 08:37:32.248614 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/52860b37-6c04-42e4-ada9-94c9a46f1773-logs\") pod \"glance-default-internal-api-0\" (UID: \"52860b37-6c04-42e4-ada9-94c9a46f1773\") " pod="openstack/glance-default-internal-api-0" Dec 11 08:37:32 crc kubenswrapper[4881]: I1211 08:37:32.248866 4881 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"glance-default-internal-api-0\" (UID: \"52860b37-6c04-42e4-ada9-94c9a46f1773\") device mount path \"/mnt/openstack/pv05\"" pod="openstack/glance-default-internal-api-0" Dec 11 08:37:32 crc kubenswrapper[4881]: I1211 08:37:32.249031 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/52860b37-6c04-42e4-ada9-94c9a46f1773-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"52860b37-6c04-42e4-ada9-94c9a46f1773\") " pod="openstack/glance-default-internal-api-0" Dec 11 08:37:32 crc kubenswrapper[4881]: I1211 08:37:32.252202 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/52860b37-6c04-42e4-ada9-94c9a46f1773-scripts\") pod \"glance-default-internal-api-0\" (UID: \"52860b37-6c04-42e4-ada9-94c9a46f1773\") " pod="openstack/glance-default-internal-api-0" Dec 11 08:37:32 crc kubenswrapper[4881]: I1211 08:37:32.253460 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/52860b37-6c04-42e4-ada9-94c9a46f1773-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"52860b37-6c04-42e4-ada9-94c9a46f1773\") " pod="openstack/glance-default-internal-api-0" Dec 11 08:37:32 crc kubenswrapper[4881]: I1211 08:37:32.253485 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/52860b37-6c04-42e4-ada9-94c9a46f1773-config-data\") pod \"glance-default-internal-api-0\" (UID: \"52860b37-6c04-42e4-ada9-94c9a46f1773\") " pod="openstack/glance-default-internal-api-0" Dec 11 08:37:32 crc kubenswrapper[4881]: I1211 08:37:32.266805 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m9xcv\" (UniqueName: \"kubernetes.io/projected/52860b37-6c04-42e4-ada9-94c9a46f1773-kube-api-access-m9xcv\") pod \"glance-default-internal-api-0\" (UID: \"52860b37-6c04-42e4-ada9-94c9a46f1773\") " pod="openstack/glance-default-internal-api-0" Dec 11 08:37:32 crc kubenswrapper[4881]: I1211 08:37:32.286146 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"glance-default-internal-api-0\" (UID: \"52860b37-6c04-42e4-ada9-94c9a46f1773\") " pod="openstack/glance-default-internal-api-0" Dec 11 08:37:32 crc kubenswrapper[4881]: I1211 08:37:32.485212 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Dec 11 08:37:32 crc kubenswrapper[4881]: I1211 08:37:32.765216 4881 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-77585f5f8c-s775q" podUID="2793ce53-41a4-4170-998f-b4ddcd0dbcaa" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.162:5353: i/o timeout" Dec 11 08:37:33 crc kubenswrapper[4881]: I1211 08:37:33.933288 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Dec 11 08:37:34 crc kubenswrapper[4881]: I1211 08:37:34.014588 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Dec 11 08:37:37 crc kubenswrapper[4881]: I1211 08:37:37.766019 4881 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-77585f5f8c-s775q" podUID="2793ce53-41a4-4170-998f-b4ddcd0dbcaa" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.162:5353: i/o timeout" Dec 11 08:37:39 crc kubenswrapper[4881]: I1211 08:37:39.028024 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-cb9l4" Dec 11 08:37:39 crc kubenswrapper[4881]: I1211 08:37:39.036954 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-77585f5f8c-s775q" Dec 11 08:37:39 crc kubenswrapper[4881]: I1211 08:37:39.203765 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qzxn4\" (UniqueName: \"kubernetes.io/projected/4406be5c-0e4c-40ff-ac0f-4da87b36b145-kube-api-access-qzxn4\") pod \"4406be5c-0e4c-40ff-ac0f-4da87b36b145\" (UID: \"4406be5c-0e4c-40ff-ac0f-4da87b36b145\") " Dec 11 08:37:39 crc kubenswrapper[4881]: I1211 08:37:39.203909 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/2793ce53-41a4-4170-998f-b4ddcd0dbcaa-dns-swift-storage-0\") pod \"2793ce53-41a4-4170-998f-b4ddcd0dbcaa\" (UID: \"2793ce53-41a4-4170-998f-b4ddcd0dbcaa\") " Dec 11 08:37:39 crc kubenswrapper[4881]: I1211 08:37:39.204013 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4406be5c-0e4c-40ff-ac0f-4da87b36b145-logs\") pod \"4406be5c-0e4c-40ff-ac0f-4da87b36b145\" (UID: \"4406be5c-0e4c-40ff-ac0f-4da87b36b145\") " Dec 11 08:37:39 crc kubenswrapper[4881]: I1211 08:37:39.204080 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2793ce53-41a4-4170-998f-b4ddcd0dbcaa-ovsdbserver-sb\") pod \"2793ce53-41a4-4170-998f-b4ddcd0dbcaa\" (UID: \"2793ce53-41a4-4170-998f-b4ddcd0dbcaa\") " Dec 11 08:37:39 crc kubenswrapper[4881]: I1211 08:37:39.204133 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4406be5c-0e4c-40ff-ac0f-4da87b36b145-combined-ca-bundle\") pod \"4406be5c-0e4c-40ff-ac0f-4da87b36b145\" (UID: \"4406be5c-0e4c-40ff-ac0f-4da87b36b145\") " Dec 11 08:37:39 crc kubenswrapper[4881]: I1211 08:37:39.204174 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2793ce53-41a4-4170-998f-b4ddcd0dbcaa-config\") pod \"2793ce53-41a4-4170-998f-b4ddcd0dbcaa\" (UID: \"2793ce53-41a4-4170-998f-b4ddcd0dbcaa\") " Dec 11 08:37:39 crc kubenswrapper[4881]: I1211 08:37:39.204235 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4406be5c-0e4c-40ff-ac0f-4da87b36b145-config-data\") pod \"4406be5c-0e4c-40ff-ac0f-4da87b36b145\" (UID: \"4406be5c-0e4c-40ff-ac0f-4da87b36b145\") " Dec 11 08:37:39 crc kubenswrapper[4881]: I1211 08:37:39.204272 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2793ce53-41a4-4170-998f-b4ddcd0dbcaa-ovsdbserver-nb\") pod \"2793ce53-41a4-4170-998f-b4ddcd0dbcaa\" (UID: \"2793ce53-41a4-4170-998f-b4ddcd0dbcaa\") " Dec 11 08:37:39 crc kubenswrapper[4881]: I1211 08:37:39.204295 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6269s\" (UniqueName: \"kubernetes.io/projected/2793ce53-41a4-4170-998f-b4ddcd0dbcaa-kube-api-access-6269s\") pod \"2793ce53-41a4-4170-998f-b4ddcd0dbcaa\" (UID: \"2793ce53-41a4-4170-998f-b4ddcd0dbcaa\") " Dec 11 08:37:39 crc kubenswrapper[4881]: I1211 08:37:39.204301 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4406be5c-0e4c-40ff-ac0f-4da87b36b145-logs" (OuterVolumeSpecName: "logs") pod "4406be5c-0e4c-40ff-ac0f-4da87b36b145" (UID: "4406be5c-0e4c-40ff-ac0f-4da87b36b145"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 08:37:39 crc kubenswrapper[4881]: I1211 08:37:39.204320 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2793ce53-41a4-4170-998f-b4ddcd0dbcaa-dns-svc\") pod \"2793ce53-41a4-4170-998f-b4ddcd0dbcaa\" (UID: \"2793ce53-41a4-4170-998f-b4ddcd0dbcaa\") " Dec 11 08:37:39 crc kubenswrapper[4881]: I1211 08:37:39.204369 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4406be5c-0e4c-40ff-ac0f-4da87b36b145-scripts\") pod \"4406be5c-0e4c-40ff-ac0f-4da87b36b145\" (UID: \"4406be5c-0e4c-40ff-ac0f-4da87b36b145\") " Dec 11 08:37:39 crc kubenswrapper[4881]: I1211 08:37:39.205461 4881 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4406be5c-0e4c-40ff-ac0f-4da87b36b145-logs\") on node \"crc\" DevicePath \"\"" Dec 11 08:37:39 crc kubenswrapper[4881]: I1211 08:37:39.210689 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4406be5c-0e4c-40ff-ac0f-4da87b36b145-kube-api-access-qzxn4" (OuterVolumeSpecName: "kube-api-access-qzxn4") pod "4406be5c-0e4c-40ff-ac0f-4da87b36b145" (UID: "4406be5c-0e4c-40ff-ac0f-4da87b36b145"). InnerVolumeSpecName "kube-api-access-qzxn4". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:37:39 crc kubenswrapper[4881]: I1211 08:37:39.210952 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4406be5c-0e4c-40ff-ac0f-4da87b36b145-scripts" (OuterVolumeSpecName: "scripts") pod "4406be5c-0e4c-40ff-ac0f-4da87b36b145" (UID: "4406be5c-0e4c-40ff-ac0f-4da87b36b145"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:37:39 crc kubenswrapper[4881]: I1211 08:37:39.226785 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2793ce53-41a4-4170-998f-b4ddcd0dbcaa-kube-api-access-6269s" (OuterVolumeSpecName: "kube-api-access-6269s") pod "2793ce53-41a4-4170-998f-b4ddcd0dbcaa" (UID: "2793ce53-41a4-4170-998f-b4ddcd0dbcaa"). InnerVolumeSpecName "kube-api-access-6269s". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:37:39 crc kubenswrapper[4881]: I1211 08:37:39.240579 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4406be5c-0e4c-40ff-ac0f-4da87b36b145-config-data" (OuterVolumeSpecName: "config-data") pod "4406be5c-0e4c-40ff-ac0f-4da87b36b145" (UID: "4406be5c-0e4c-40ff-ac0f-4da87b36b145"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:37:39 crc kubenswrapper[4881]: I1211 08:37:39.257178 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4406be5c-0e4c-40ff-ac0f-4da87b36b145-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "4406be5c-0e4c-40ff-ac0f-4da87b36b145" (UID: "4406be5c-0e4c-40ff-ac0f-4da87b36b145"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:37:39 crc kubenswrapper[4881]: I1211 08:37:39.264799 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2793ce53-41a4-4170-998f-b4ddcd0dbcaa-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "2793ce53-41a4-4170-998f-b4ddcd0dbcaa" (UID: "2793ce53-41a4-4170-998f-b4ddcd0dbcaa"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:37:39 crc kubenswrapper[4881]: I1211 08:37:39.272012 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2793ce53-41a4-4170-998f-b4ddcd0dbcaa-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "2793ce53-41a4-4170-998f-b4ddcd0dbcaa" (UID: "2793ce53-41a4-4170-998f-b4ddcd0dbcaa"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:37:39 crc kubenswrapper[4881]: I1211 08:37:39.273139 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2793ce53-41a4-4170-998f-b4ddcd0dbcaa-config" (OuterVolumeSpecName: "config") pod "2793ce53-41a4-4170-998f-b4ddcd0dbcaa" (UID: "2793ce53-41a4-4170-998f-b4ddcd0dbcaa"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:37:39 crc kubenswrapper[4881]: I1211 08:37:39.274519 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2793ce53-41a4-4170-998f-b4ddcd0dbcaa-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "2793ce53-41a4-4170-998f-b4ddcd0dbcaa" (UID: "2793ce53-41a4-4170-998f-b4ddcd0dbcaa"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:37:39 crc kubenswrapper[4881]: I1211 08:37:39.275751 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2793ce53-41a4-4170-998f-b4ddcd0dbcaa-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "2793ce53-41a4-4170-998f-b4ddcd0dbcaa" (UID: "2793ce53-41a4-4170-998f-b4ddcd0dbcaa"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:37:39 crc kubenswrapper[4881]: I1211 08:37:39.310394 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qzxn4\" (UniqueName: \"kubernetes.io/projected/4406be5c-0e4c-40ff-ac0f-4da87b36b145-kube-api-access-qzxn4\") on node \"crc\" DevicePath \"\"" Dec 11 08:37:39 crc kubenswrapper[4881]: I1211 08:37:39.310467 4881 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/2793ce53-41a4-4170-998f-b4ddcd0dbcaa-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Dec 11 08:37:39 crc kubenswrapper[4881]: I1211 08:37:39.310480 4881 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2793ce53-41a4-4170-998f-b4ddcd0dbcaa-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Dec 11 08:37:39 crc kubenswrapper[4881]: I1211 08:37:39.310492 4881 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4406be5c-0e4c-40ff-ac0f-4da87b36b145-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 11 08:37:39 crc kubenswrapper[4881]: I1211 08:37:39.310979 4881 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2793ce53-41a4-4170-998f-b4ddcd0dbcaa-config\") on node \"crc\" DevicePath \"\"" Dec 11 08:37:39 crc kubenswrapper[4881]: I1211 08:37:39.311007 4881 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4406be5c-0e4c-40ff-ac0f-4da87b36b145-config-data\") on node \"crc\" DevicePath \"\"" Dec 11 08:37:39 crc kubenswrapper[4881]: I1211 08:37:39.311021 4881 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2793ce53-41a4-4170-998f-b4ddcd0dbcaa-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Dec 11 08:37:39 crc kubenswrapper[4881]: I1211 08:37:39.311035 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6269s\" (UniqueName: \"kubernetes.io/projected/2793ce53-41a4-4170-998f-b4ddcd0dbcaa-kube-api-access-6269s\") on node \"crc\" DevicePath \"\"" Dec 11 08:37:39 crc kubenswrapper[4881]: I1211 08:37:39.311046 4881 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2793ce53-41a4-4170-998f-b4ddcd0dbcaa-dns-svc\") on node \"crc\" DevicePath \"\"" Dec 11 08:37:39 crc kubenswrapper[4881]: I1211 08:37:39.311056 4881 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4406be5c-0e4c-40ff-ac0f-4da87b36b145-scripts\") on node \"crc\" DevicePath \"\"" Dec 11 08:37:39 crc kubenswrapper[4881]: I1211 08:37:39.373959 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-cb9l4" Dec 11 08:37:39 crc kubenswrapper[4881]: I1211 08:37:39.373981 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-cb9l4" event={"ID":"4406be5c-0e4c-40ff-ac0f-4da87b36b145","Type":"ContainerDied","Data":"f47189dad2695138b96b3e9508efece25d6ff0bed68200235fd23c428b85a229"} Dec 11 08:37:39 crc kubenswrapper[4881]: I1211 08:37:39.374069 4881 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f47189dad2695138b96b3e9508efece25d6ff0bed68200235fd23c428b85a229" Dec 11 08:37:39 crc kubenswrapper[4881]: I1211 08:37:39.377327 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-77585f5f8c-s775q" event={"ID":"2793ce53-41a4-4170-998f-b4ddcd0dbcaa","Type":"ContainerDied","Data":"4652d228ca21149e0e0d79e99f48007085421a032e692e98623ee97e8b0e0978"} Dec 11 08:37:39 crc kubenswrapper[4881]: I1211 08:37:39.377395 4881 scope.go:117] "RemoveContainer" containerID="88e3d3f95b7e7011258611183d8dc80e67bbafc9ef51ce2a0cfde6734d7ee085" Dec 11 08:37:39 crc kubenswrapper[4881]: I1211 08:37:39.377506 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-77585f5f8c-s775q" Dec 11 08:37:39 crc kubenswrapper[4881]: I1211 08:37:39.427367 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-77585f5f8c-s775q"] Dec 11 08:37:39 crc kubenswrapper[4881]: I1211 08:37:39.437088 4881 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-77585f5f8c-s775q"] Dec 11 08:37:40 crc kubenswrapper[4881]: I1211 08:37:40.237576 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-55fbb6c694-gw7p4"] Dec 11 08:37:40 crc kubenswrapper[4881]: E1211 08:37:40.238219 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2793ce53-41a4-4170-998f-b4ddcd0dbcaa" containerName="dnsmasq-dns" Dec 11 08:37:40 crc kubenswrapper[4881]: I1211 08:37:40.238231 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="2793ce53-41a4-4170-998f-b4ddcd0dbcaa" containerName="dnsmasq-dns" Dec 11 08:37:40 crc kubenswrapper[4881]: E1211 08:37:40.238254 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2793ce53-41a4-4170-998f-b4ddcd0dbcaa" containerName="init" Dec 11 08:37:40 crc kubenswrapper[4881]: I1211 08:37:40.238261 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="2793ce53-41a4-4170-998f-b4ddcd0dbcaa" containerName="init" Dec 11 08:37:40 crc kubenswrapper[4881]: E1211 08:37:40.238288 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4406be5c-0e4c-40ff-ac0f-4da87b36b145" containerName="placement-db-sync" Dec 11 08:37:40 crc kubenswrapper[4881]: I1211 08:37:40.238294 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="4406be5c-0e4c-40ff-ac0f-4da87b36b145" containerName="placement-db-sync" Dec 11 08:37:40 crc kubenswrapper[4881]: I1211 08:37:40.238507 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="2793ce53-41a4-4170-998f-b4ddcd0dbcaa" containerName="dnsmasq-dns" Dec 11 08:37:40 crc kubenswrapper[4881]: I1211 08:37:40.238527 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="4406be5c-0e4c-40ff-ac0f-4da87b36b145" containerName="placement-db-sync" Dec 11 08:37:40 crc kubenswrapper[4881]: I1211 08:37:40.239605 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-55fbb6c694-gw7p4" Dec 11 08:37:40 crc kubenswrapper[4881]: I1211 08:37:40.242882 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-placement-public-svc" Dec 11 08:37:40 crc kubenswrapper[4881]: I1211 08:37:40.243046 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-scripts" Dec 11 08:37:40 crc kubenswrapper[4881]: I1211 08:37:40.243177 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-placement-dockercfg-mm49w" Dec 11 08:37:40 crc kubenswrapper[4881]: I1211 08:37:40.244197 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-config-data" Dec 11 08:37:40 crc kubenswrapper[4881]: I1211 08:37:40.244603 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-placement-internal-svc" Dec 11 08:37:40 crc kubenswrapper[4881]: I1211 08:37:40.252904 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-55fbb6c694-gw7p4"] Dec 11 08:37:40 crc kubenswrapper[4881]: I1211 08:37:40.336310 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/30f691dc-faf6-411b-8cb8-db57047199b0-internal-tls-certs\") pod \"placement-55fbb6c694-gw7p4\" (UID: \"30f691dc-faf6-411b-8cb8-db57047199b0\") " pod="openstack/placement-55fbb6c694-gw7p4" Dec 11 08:37:40 crc kubenswrapper[4881]: I1211 08:37:40.336675 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/30f691dc-faf6-411b-8cb8-db57047199b0-combined-ca-bundle\") pod \"placement-55fbb6c694-gw7p4\" (UID: \"30f691dc-faf6-411b-8cb8-db57047199b0\") " pod="openstack/placement-55fbb6c694-gw7p4" Dec 11 08:37:40 crc kubenswrapper[4881]: I1211 08:37:40.336857 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/30f691dc-faf6-411b-8cb8-db57047199b0-config-data\") pod \"placement-55fbb6c694-gw7p4\" (UID: \"30f691dc-faf6-411b-8cb8-db57047199b0\") " pod="openstack/placement-55fbb6c694-gw7p4" Dec 11 08:37:40 crc kubenswrapper[4881]: I1211 08:37:40.336975 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/30f691dc-faf6-411b-8cb8-db57047199b0-public-tls-certs\") pod \"placement-55fbb6c694-gw7p4\" (UID: \"30f691dc-faf6-411b-8cb8-db57047199b0\") " pod="openstack/placement-55fbb6c694-gw7p4" Dec 11 08:37:40 crc kubenswrapper[4881]: I1211 08:37:40.337111 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/30f691dc-faf6-411b-8cb8-db57047199b0-logs\") pod \"placement-55fbb6c694-gw7p4\" (UID: \"30f691dc-faf6-411b-8cb8-db57047199b0\") " pod="openstack/placement-55fbb6c694-gw7p4" Dec 11 08:37:40 crc kubenswrapper[4881]: I1211 08:37:40.337250 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hgk6c\" (UniqueName: \"kubernetes.io/projected/30f691dc-faf6-411b-8cb8-db57047199b0-kube-api-access-hgk6c\") pod \"placement-55fbb6c694-gw7p4\" (UID: \"30f691dc-faf6-411b-8cb8-db57047199b0\") " pod="openstack/placement-55fbb6c694-gw7p4" Dec 11 08:37:40 crc kubenswrapper[4881]: I1211 08:37:40.337363 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/30f691dc-faf6-411b-8cb8-db57047199b0-scripts\") pod \"placement-55fbb6c694-gw7p4\" (UID: \"30f691dc-faf6-411b-8cb8-db57047199b0\") " pod="openstack/placement-55fbb6c694-gw7p4" Dec 11 08:37:40 crc kubenswrapper[4881]: I1211 08:37:40.438431 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/30f691dc-faf6-411b-8cb8-db57047199b0-config-data\") pod \"placement-55fbb6c694-gw7p4\" (UID: \"30f691dc-faf6-411b-8cb8-db57047199b0\") " pod="openstack/placement-55fbb6c694-gw7p4" Dec 11 08:37:40 crc kubenswrapper[4881]: I1211 08:37:40.438511 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/30f691dc-faf6-411b-8cb8-db57047199b0-public-tls-certs\") pod \"placement-55fbb6c694-gw7p4\" (UID: \"30f691dc-faf6-411b-8cb8-db57047199b0\") " pod="openstack/placement-55fbb6c694-gw7p4" Dec 11 08:37:40 crc kubenswrapper[4881]: I1211 08:37:40.438581 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/30f691dc-faf6-411b-8cb8-db57047199b0-logs\") pod \"placement-55fbb6c694-gw7p4\" (UID: \"30f691dc-faf6-411b-8cb8-db57047199b0\") " pod="openstack/placement-55fbb6c694-gw7p4" Dec 11 08:37:40 crc kubenswrapper[4881]: I1211 08:37:40.438628 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hgk6c\" (UniqueName: \"kubernetes.io/projected/30f691dc-faf6-411b-8cb8-db57047199b0-kube-api-access-hgk6c\") pod \"placement-55fbb6c694-gw7p4\" (UID: \"30f691dc-faf6-411b-8cb8-db57047199b0\") " pod="openstack/placement-55fbb6c694-gw7p4" Dec 11 08:37:40 crc kubenswrapper[4881]: I1211 08:37:40.438656 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/30f691dc-faf6-411b-8cb8-db57047199b0-scripts\") pod \"placement-55fbb6c694-gw7p4\" (UID: \"30f691dc-faf6-411b-8cb8-db57047199b0\") " pod="openstack/placement-55fbb6c694-gw7p4" Dec 11 08:37:40 crc kubenswrapper[4881]: I1211 08:37:40.438694 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/30f691dc-faf6-411b-8cb8-db57047199b0-internal-tls-certs\") pod \"placement-55fbb6c694-gw7p4\" (UID: \"30f691dc-faf6-411b-8cb8-db57047199b0\") " pod="openstack/placement-55fbb6c694-gw7p4" Dec 11 08:37:40 crc kubenswrapper[4881]: I1211 08:37:40.438709 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/30f691dc-faf6-411b-8cb8-db57047199b0-combined-ca-bundle\") pod \"placement-55fbb6c694-gw7p4\" (UID: \"30f691dc-faf6-411b-8cb8-db57047199b0\") " pod="openstack/placement-55fbb6c694-gw7p4" Dec 11 08:37:40 crc kubenswrapper[4881]: I1211 08:37:40.440305 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/30f691dc-faf6-411b-8cb8-db57047199b0-logs\") pod \"placement-55fbb6c694-gw7p4\" (UID: \"30f691dc-faf6-411b-8cb8-db57047199b0\") " pod="openstack/placement-55fbb6c694-gw7p4" Dec 11 08:37:40 crc kubenswrapper[4881]: I1211 08:37:40.445087 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/30f691dc-faf6-411b-8cb8-db57047199b0-combined-ca-bundle\") pod \"placement-55fbb6c694-gw7p4\" (UID: \"30f691dc-faf6-411b-8cb8-db57047199b0\") " pod="openstack/placement-55fbb6c694-gw7p4" Dec 11 08:37:40 crc kubenswrapper[4881]: I1211 08:37:40.449498 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/30f691dc-faf6-411b-8cb8-db57047199b0-internal-tls-certs\") pod \"placement-55fbb6c694-gw7p4\" (UID: \"30f691dc-faf6-411b-8cb8-db57047199b0\") " pod="openstack/placement-55fbb6c694-gw7p4" Dec 11 08:37:40 crc kubenswrapper[4881]: I1211 08:37:40.449761 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/30f691dc-faf6-411b-8cb8-db57047199b0-config-data\") pod \"placement-55fbb6c694-gw7p4\" (UID: \"30f691dc-faf6-411b-8cb8-db57047199b0\") " pod="openstack/placement-55fbb6c694-gw7p4" Dec 11 08:37:40 crc kubenswrapper[4881]: I1211 08:37:40.450427 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/30f691dc-faf6-411b-8cb8-db57047199b0-public-tls-certs\") pod \"placement-55fbb6c694-gw7p4\" (UID: \"30f691dc-faf6-411b-8cb8-db57047199b0\") " pod="openstack/placement-55fbb6c694-gw7p4" Dec 11 08:37:40 crc kubenswrapper[4881]: I1211 08:37:40.454857 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/30f691dc-faf6-411b-8cb8-db57047199b0-scripts\") pod \"placement-55fbb6c694-gw7p4\" (UID: \"30f691dc-faf6-411b-8cb8-db57047199b0\") " pod="openstack/placement-55fbb6c694-gw7p4" Dec 11 08:37:40 crc kubenswrapper[4881]: I1211 08:37:40.459552 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hgk6c\" (UniqueName: \"kubernetes.io/projected/30f691dc-faf6-411b-8cb8-db57047199b0-kube-api-access-hgk6c\") pod \"placement-55fbb6c694-gw7p4\" (UID: \"30f691dc-faf6-411b-8cb8-db57047199b0\") " pod="openstack/placement-55fbb6c694-gw7p4" Dec 11 08:37:40 crc kubenswrapper[4881]: I1211 08:37:40.557924 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-55fbb6c694-gw7p4" Dec 11 08:37:41 crc kubenswrapper[4881]: I1211 08:37:41.018518 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2793ce53-41a4-4170-998f-b4ddcd0dbcaa" path="/var/lib/kubelet/pods/2793ce53-41a4-4170-998f-b4ddcd0dbcaa/volumes" Dec 11 08:37:42 crc kubenswrapper[4881]: I1211 08:37:42.766886 4881 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-77585f5f8c-s775q" podUID="2793ce53-41a4-4170-998f-b4ddcd0dbcaa" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.162:5353: i/o timeout" Dec 11 08:37:43 crc kubenswrapper[4881]: E1211 08:37:43.881443 4881 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-barbican-api:current-podified" Dec 11 08:37:43 crc kubenswrapper[4881]: E1211 08:37:43.881669 4881 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:barbican-db-sync,Image:quay.io/podified-antelope-centos9/openstack-barbican-api:current-podified,Command:[/bin/bash],Args:[-c barbican-manage db upgrade],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:db-sync-config-data,ReadOnly:true,MountPath:/etc/barbican/barbican.conf.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-dpbqz,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42403,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:*42403,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod barbican-db-sync-vmjdf_openstack(422faa6a-f2ed-4015-87cd-7878bac246e4): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 11 08:37:43 crc kubenswrapper[4881]: E1211 08:37:43.882945 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"barbican-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/barbican-db-sync-vmjdf" podUID="422faa6a-f2ed-4015-87cd-7878bac246e4" Dec 11 08:37:44 crc kubenswrapper[4881]: E1211 08:37:44.436714 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"barbican-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-barbican-api:current-podified\\\"\"" pod="openstack/barbican-db-sync-vmjdf" podUID="422faa6a-f2ed-4015-87cd-7878bac246e4" Dec 11 08:37:45 crc kubenswrapper[4881]: I1211 08:37:45.448429 4881 generic.go:334] "Generic (PLEG): container finished" podID="c45356cd-f35d-41fa-98d3-6697e7a8100a" containerID="3845b4845598a018e7d54d3a688631756bdb219853a748ec1617f3155be66256" exitCode=0 Dec 11 08:37:45 crc kubenswrapper[4881]: I1211 08:37:45.448526 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-rhqlf" event={"ID":"c45356cd-f35d-41fa-98d3-6697e7a8100a","Type":"ContainerDied","Data":"3845b4845598a018e7d54d3a688631756bdb219853a748ec1617f3155be66256"} Dec 11 08:37:51 crc kubenswrapper[4881]: I1211 08:37:51.535194 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-p9qkf" event={"ID":"5e8aab09-6d6f-4c67-965c-38b8dd3bb7ce","Type":"ContainerDied","Data":"07c511b037163dac794c71d4ee8742054bc8a9376deda13aedf60e35cc46efdb"} Dec 11 08:37:51 crc kubenswrapper[4881]: I1211 08:37:51.535727 4881 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="07c511b037163dac794c71d4ee8742054bc8a9376deda13aedf60e35cc46efdb" Dec 11 08:37:51 crc kubenswrapper[4881]: I1211 08:37:51.539763 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-p9qkf" Dec 11 08:37:51 crc kubenswrapper[4881]: I1211 08:37:51.636504 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5e8aab09-6d6f-4c67-965c-38b8dd3bb7ce-scripts\") pod \"5e8aab09-6d6f-4c67-965c-38b8dd3bb7ce\" (UID: \"5e8aab09-6d6f-4c67-965c-38b8dd3bb7ce\") " Dec 11 08:37:51 crc kubenswrapper[4881]: I1211 08:37:51.636707 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5e8aab09-6d6f-4c67-965c-38b8dd3bb7ce-config-data\") pod \"5e8aab09-6d6f-4c67-965c-38b8dd3bb7ce\" (UID: \"5e8aab09-6d6f-4c67-965c-38b8dd3bb7ce\") " Dec 11 08:37:51 crc kubenswrapper[4881]: I1211 08:37:51.636786 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5e8aab09-6d6f-4c67-965c-38b8dd3bb7ce-combined-ca-bundle\") pod \"5e8aab09-6d6f-4c67-965c-38b8dd3bb7ce\" (UID: \"5e8aab09-6d6f-4c67-965c-38b8dd3bb7ce\") " Dec 11 08:37:51 crc kubenswrapper[4881]: I1211 08:37:51.636839 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6r7ld\" (UniqueName: \"kubernetes.io/projected/5e8aab09-6d6f-4c67-965c-38b8dd3bb7ce-kube-api-access-6r7ld\") pod \"5e8aab09-6d6f-4c67-965c-38b8dd3bb7ce\" (UID: \"5e8aab09-6d6f-4c67-965c-38b8dd3bb7ce\") " Dec 11 08:37:51 crc kubenswrapper[4881]: I1211 08:37:51.636913 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/5e8aab09-6d6f-4c67-965c-38b8dd3bb7ce-fernet-keys\") pod \"5e8aab09-6d6f-4c67-965c-38b8dd3bb7ce\" (UID: \"5e8aab09-6d6f-4c67-965c-38b8dd3bb7ce\") " Dec 11 08:37:51 crc kubenswrapper[4881]: I1211 08:37:51.636974 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/5e8aab09-6d6f-4c67-965c-38b8dd3bb7ce-credential-keys\") pod \"5e8aab09-6d6f-4c67-965c-38b8dd3bb7ce\" (UID: \"5e8aab09-6d6f-4c67-965c-38b8dd3bb7ce\") " Dec 11 08:37:51 crc kubenswrapper[4881]: I1211 08:37:51.644117 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5e8aab09-6d6f-4c67-965c-38b8dd3bb7ce-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "5e8aab09-6d6f-4c67-965c-38b8dd3bb7ce" (UID: "5e8aab09-6d6f-4c67-965c-38b8dd3bb7ce"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:37:51 crc kubenswrapper[4881]: I1211 08:37:51.645228 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5e8aab09-6d6f-4c67-965c-38b8dd3bb7ce-scripts" (OuterVolumeSpecName: "scripts") pod "5e8aab09-6d6f-4c67-965c-38b8dd3bb7ce" (UID: "5e8aab09-6d6f-4c67-965c-38b8dd3bb7ce"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:37:51 crc kubenswrapper[4881]: I1211 08:37:51.653558 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5e8aab09-6d6f-4c67-965c-38b8dd3bb7ce-kube-api-access-6r7ld" (OuterVolumeSpecName: "kube-api-access-6r7ld") pod "5e8aab09-6d6f-4c67-965c-38b8dd3bb7ce" (UID: "5e8aab09-6d6f-4c67-965c-38b8dd3bb7ce"). InnerVolumeSpecName "kube-api-access-6r7ld". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:37:51 crc kubenswrapper[4881]: I1211 08:37:51.659498 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5e8aab09-6d6f-4c67-965c-38b8dd3bb7ce-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "5e8aab09-6d6f-4c67-965c-38b8dd3bb7ce" (UID: "5e8aab09-6d6f-4c67-965c-38b8dd3bb7ce"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:37:51 crc kubenswrapper[4881]: I1211 08:37:51.667602 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5e8aab09-6d6f-4c67-965c-38b8dd3bb7ce-config-data" (OuterVolumeSpecName: "config-data") pod "5e8aab09-6d6f-4c67-965c-38b8dd3bb7ce" (UID: "5e8aab09-6d6f-4c67-965c-38b8dd3bb7ce"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:37:51 crc kubenswrapper[4881]: I1211 08:37:51.678896 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5e8aab09-6d6f-4c67-965c-38b8dd3bb7ce-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "5e8aab09-6d6f-4c67-965c-38b8dd3bb7ce" (UID: "5e8aab09-6d6f-4c67-965c-38b8dd3bb7ce"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:37:51 crc kubenswrapper[4881]: I1211 08:37:51.739185 4881 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/5e8aab09-6d6f-4c67-965c-38b8dd3bb7ce-credential-keys\") on node \"crc\" DevicePath \"\"" Dec 11 08:37:51 crc kubenswrapper[4881]: I1211 08:37:51.739217 4881 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5e8aab09-6d6f-4c67-965c-38b8dd3bb7ce-scripts\") on node \"crc\" DevicePath \"\"" Dec 11 08:37:51 crc kubenswrapper[4881]: I1211 08:37:51.739227 4881 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5e8aab09-6d6f-4c67-965c-38b8dd3bb7ce-config-data\") on node \"crc\" DevicePath \"\"" Dec 11 08:37:51 crc kubenswrapper[4881]: I1211 08:37:51.739235 4881 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5e8aab09-6d6f-4c67-965c-38b8dd3bb7ce-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 11 08:37:51 crc kubenswrapper[4881]: I1211 08:37:51.739245 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6r7ld\" (UniqueName: \"kubernetes.io/projected/5e8aab09-6d6f-4c67-965c-38b8dd3bb7ce-kube-api-access-6r7ld\") on node \"crc\" DevicePath \"\"" Dec 11 08:37:51 crc kubenswrapper[4881]: I1211 08:37:51.739255 4881 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/5e8aab09-6d6f-4c67-965c-38b8dd3bb7ce-fernet-keys\") on node \"crc\" DevicePath \"\"" Dec 11 08:37:52 crc kubenswrapper[4881]: I1211 08:37:52.546096 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-p9qkf" Dec 11 08:37:52 crc kubenswrapper[4881]: E1211 08:37:52.561241 4881 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified" Dec 11 08:37:52 crc kubenswrapper[4881]: E1211 08:37:52.561482 4881 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:cinder-db-sync,Image:quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_set_configs && /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:etc-machine-id,ReadOnly:true,MountPath:/etc/machine-id,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:scripts,ReadOnly:true,MountPath:/usr/local/bin/container-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/config-data/merged,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:db-sync-config-data,ReadOnly:true,MountPath:/etc/cinder/cinder.conf.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:db-sync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-c7s5v,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:*0,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod cinder-db-sync-bcjrt_openstack(cc1ec075-9e84-4cfd-9f5a-b29d5af0d610): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 11 08:37:52 crc kubenswrapper[4881]: E1211 08:37:52.562753 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cinder-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/cinder-db-sync-bcjrt" podUID="cc1ec075-9e84-4cfd-9f5a-b29d5af0d610" Dec 11 08:37:52 crc kubenswrapper[4881]: I1211 08:37:52.659394 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-7fbbb6db6c-bqwjn"] Dec 11 08:37:52 crc kubenswrapper[4881]: E1211 08:37:52.660229 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5e8aab09-6d6f-4c67-965c-38b8dd3bb7ce" containerName="keystone-bootstrap" Dec 11 08:37:52 crc kubenswrapper[4881]: I1211 08:37:52.660249 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="5e8aab09-6d6f-4c67-965c-38b8dd3bb7ce" containerName="keystone-bootstrap" Dec 11 08:37:52 crc kubenswrapper[4881]: I1211 08:37:52.660548 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="5e8aab09-6d6f-4c67-965c-38b8dd3bb7ce" containerName="keystone-bootstrap" Dec 11 08:37:52 crc kubenswrapper[4881]: I1211 08:37:52.661571 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-7fbbb6db6c-bqwjn" Dec 11 08:37:52 crc kubenswrapper[4881]: I1211 08:37:52.664569 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Dec 11 08:37:52 crc kubenswrapper[4881]: I1211 08:37:52.664971 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-keystone-internal-svc" Dec 11 08:37:52 crc kubenswrapper[4881]: I1211 08:37:52.665162 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-9vkph" Dec 11 08:37:52 crc kubenswrapper[4881]: I1211 08:37:52.665411 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-keystone-public-svc" Dec 11 08:37:52 crc kubenswrapper[4881]: I1211 08:37:52.665700 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Dec 11 08:37:52 crc kubenswrapper[4881]: I1211 08:37:52.665818 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Dec 11 08:37:52 crc kubenswrapper[4881]: I1211 08:37:52.671920 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-7fbbb6db6c-bqwjn"] Dec 11 08:37:52 crc kubenswrapper[4881]: I1211 08:37:52.766501 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zrmgd\" (UniqueName: \"kubernetes.io/projected/cae51d9b-e997-4228-af25-872a6e16df8d-kube-api-access-zrmgd\") pod \"keystone-7fbbb6db6c-bqwjn\" (UID: \"cae51d9b-e997-4228-af25-872a6e16df8d\") " pod="openstack/keystone-7fbbb6db6c-bqwjn" Dec 11 08:37:52 crc kubenswrapper[4881]: I1211 08:37:52.766559 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/cae51d9b-e997-4228-af25-872a6e16df8d-public-tls-certs\") pod \"keystone-7fbbb6db6c-bqwjn\" (UID: \"cae51d9b-e997-4228-af25-872a6e16df8d\") " pod="openstack/keystone-7fbbb6db6c-bqwjn" Dec 11 08:37:52 crc kubenswrapper[4881]: I1211 08:37:52.766591 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/cae51d9b-e997-4228-af25-872a6e16df8d-fernet-keys\") pod \"keystone-7fbbb6db6c-bqwjn\" (UID: \"cae51d9b-e997-4228-af25-872a6e16df8d\") " pod="openstack/keystone-7fbbb6db6c-bqwjn" Dec 11 08:37:52 crc kubenswrapper[4881]: I1211 08:37:52.766761 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/cae51d9b-e997-4228-af25-872a6e16df8d-internal-tls-certs\") pod \"keystone-7fbbb6db6c-bqwjn\" (UID: \"cae51d9b-e997-4228-af25-872a6e16df8d\") " pod="openstack/keystone-7fbbb6db6c-bqwjn" Dec 11 08:37:52 crc kubenswrapper[4881]: I1211 08:37:52.766939 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cae51d9b-e997-4228-af25-872a6e16df8d-config-data\") pod \"keystone-7fbbb6db6c-bqwjn\" (UID: \"cae51d9b-e997-4228-af25-872a6e16df8d\") " pod="openstack/keystone-7fbbb6db6c-bqwjn" Dec 11 08:37:52 crc kubenswrapper[4881]: I1211 08:37:52.767489 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cae51d9b-e997-4228-af25-872a6e16df8d-scripts\") pod \"keystone-7fbbb6db6c-bqwjn\" (UID: \"cae51d9b-e997-4228-af25-872a6e16df8d\") " pod="openstack/keystone-7fbbb6db6c-bqwjn" Dec 11 08:37:52 crc kubenswrapper[4881]: I1211 08:37:52.767598 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cae51d9b-e997-4228-af25-872a6e16df8d-combined-ca-bundle\") pod \"keystone-7fbbb6db6c-bqwjn\" (UID: \"cae51d9b-e997-4228-af25-872a6e16df8d\") " pod="openstack/keystone-7fbbb6db6c-bqwjn" Dec 11 08:37:52 crc kubenswrapper[4881]: I1211 08:37:52.767780 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/cae51d9b-e997-4228-af25-872a6e16df8d-credential-keys\") pod \"keystone-7fbbb6db6c-bqwjn\" (UID: \"cae51d9b-e997-4228-af25-872a6e16df8d\") " pod="openstack/keystone-7fbbb6db6c-bqwjn" Dec 11 08:37:52 crc kubenswrapper[4881]: I1211 08:37:52.870581 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/cae51d9b-e997-4228-af25-872a6e16df8d-credential-keys\") pod \"keystone-7fbbb6db6c-bqwjn\" (UID: \"cae51d9b-e997-4228-af25-872a6e16df8d\") " pod="openstack/keystone-7fbbb6db6c-bqwjn" Dec 11 08:37:52 crc kubenswrapper[4881]: I1211 08:37:52.870721 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zrmgd\" (UniqueName: \"kubernetes.io/projected/cae51d9b-e997-4228-af25-872a6e16df8d-kube-api-access-zrmgd\") pod \"keystone-7fbbb6db6c-bqwjn\" (UID: \"cae51d9b-e997-4228-af25-872a6e16df8d\") " pod="openstack/keystone-7fbbb6db6c-bqwjn" Dec 11 08:37:52 crc kubenswrapper[4881]: I1211 08:37:52.870780 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/cae51d9b-e997-4228-af25-872a6e16df8d-public-tls-certs\") pod \"keystone-7fbbb6db6c-bqwjn\" (UID: \"cae51d9b-e997-4228-af25-872a6e16df8d\") " pod="openstack/keystone-7fbbb6db6c-bqwjn" Dec 11 08:37:52 crc kubenswrapper[4881]: I1211 08:37:52.870833 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/cae51d9b-e997-4228-af25-872a6e16df8d-fernet-keys\") pod \"keystone-7fbbb6db6c-bqwjn\" (UID: \"cae51d9b-e997-4228-af25-872a6e16df8d\") " pod="openstack/keystone-7fbbb6db6c-bqwjn" Dec 11 08:37:52 crc kubenswrapper[4881]: I1211 08:37:52.870927 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/cae51d9b-e997-4228-af25-872a6e16df8d-internal-tls-certs\") pod \"keystone-7fbbb6db6c-bqwjn\" (UID: \"cae51d9b-e997-4228-af25-872a6e16df8d\") " pod="openstack/keystone-7fbbb6db6c-bqwjn" Dec 11 08:37:52 crc kubenswrapper[4881]: I1211 08:37:52.870998 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cae51d9b-e997-4228-af25-872a6e16df8d-config-data\") pod \"keystone-7fbbb6db6c-bqwjn\" (UID: \"cae51d9b-e997-4228-af25-872a6e16df8d\") " pod="openstack/keystone-7fbbb6db6c-bqwjn" Dec 11 08:37:52 crc kubenswrapper[4881]: I1211 08:37:52.871061 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cae51d9b-e997-4228-af25-872a6e16df8d-scripts\") pod \"keystone-7fbbb6db6c-bqwjn\" (UID: \"cae51d9b-e997-4228-af25-872a6e16df8d\") " pod="openstack/keystone-7fbbb6db6c-bqwjn" Dec 11 08:37:52 crc kubenswrapper[4881]: I1211 08:37:52.871135 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cae51d9b-e997-4228-af25-872a6e16df8d-combined-ca-bundle\") pod \"keystone-7fbbb6db6c-bqwjn\" (UID: \"cae51d9b-e997-4228-af25-872a6e16df8d\") " pod="openstack/keystone-7fbbb6db6c-bqwjn" Dec 11 08:37:52 crc kubenswrapper[4881]: I1211 08:37:52.881729 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/cae51d9b-e997-4228-af25-872a6e16df8d-fernet-keys\") pod \"keystone-7fbbb6db6c-bqwjn\" (UID: \"cae51d9b-e997-4228-af25-872a6e16df8d\") " pod="openstack/keystone-7fbbb6db6c-bqwjn" Dec 11 08:37:52 crc kubenswrapper[4881]: I1211 08:37:52.881899 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/cae51d9b-e997-4228-af25-872a6e16df8d-credential-keys\") pod \"keystone-7fbbb6db6c-bqwjn\" (UID: \"cae51d9b-e997-4228-af25-872a6e16df8d\") " pod="openstack/keystone-7fbbb6db6c-bqwjn" Dec 11 08:37:52 crc kubenswrapper[4881]: I1211 08:37:52.882256 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cae51d9b-e997-4228-af25-872a6e16df8d-config-data\") pod \"keystone-7fbbb6db6c-bqwjn\" (UID: \"cae51d9b-e997-4228-af25-872a6e16df8d\") " pod="openstack/keystone-7fbbb6db6c-bqwjn" Dec 11 08:37:52 crc kubenswrapper[4881]: I1211 08:37:52.882691 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cae51d9b-e997-4228-af25-872a6e16df8d-scripts\") pod \"keystone-7fbbb6db6c-bqwjn\" (UID: \"cae51d9b-e997-4228-af25-872a6e16df8d\") " pod="openstack/keystone-7fbbb6db6c-bqwjn" Dec 11 08:37:52 crc kubenswrapper[4881]: I1211 08:37:52.882766 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/cae51d9b-e997-4228-af25-872a6e16df8d-public-tls-certs\") pod \"keystone-7fbbb6db6c-bqwjn\" (UID: \"cae51d9b-e997-4228-af25-872a6e16df8d\") " pod="openstack/keystone-7fbbb6db6c-bqwjn" Dec 11 08:37:52 crc kubenswrapper[4881]: I1211 08:37:52.885154 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/cae51d9b-e997-4228-af25-872a6e16df8d-internal-tls-certs\") pod \"keystone-7fbbb6db6c-bqwjn\" (UID: \"cae51d9b-e997-4228-af25-872a6e16df8d\") " pod="openstack/keystone-7fbbb6db6c-bqwjn" Dec 11 08:37:52 crc kubenswrapper[4881]: I1211 08:37:52.886815 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cae51d9b-e997-4228-af25-872a6e16df8d-combined-ca-bundle\") pod \"keystone-7fbbb6db6c-bqwjn\" (UID: \"cae51d9b-e997-4228-af25-872a6e16df8d\") " pod="openstack/keystone-7fbbb6db6c-bqwjn" Dec 11 08:37:52 crc kubenswrapper[4881]: I1211 08:37:52.897092 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zrmgd\" (UniqueName: \"kubernetes.io/projected/cae51d9b-e997-4228-af25-872a6e16df8d-kube-api-access-zrmgd\") pod \"keystone-7fbbb6db6c-bqwjn\" (UID: \"cae51d9b-e997-4228-af25-872a6e16df8d\") " pod="openstack/keystone-7fbbb6db6c-bqwjn" Dec 11 08:37:52 crc kubenswrapper[4881]: I1211 08:37:52.999597 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-9vkph" Dec 11 08:37:53 crc kubenswrapper[4881]: I1211 08:37:53.008806 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-7fbbb6db6c-bqwjn" Dec 11 08:37:53 crc kubenswrapper[4881]: E1211 08:37:53.033184 4881 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-ceilometer-central:current-podified" Dec 11 08:37:53 crc kubenswrapper[4881]: E1211 08:37:53.033406 4881 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ceilometer-central-agent,Image:quay.io/podified-antelope-centos9/openstack-ceilometer-central:current-podified,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_set_configs && /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:nd4h85h77h5f6h564h58h75hcfh645hdbh674hbdh676h5f8h584h6chd9h658h9h64dh64dh77h6bh59bh5dfh5fdh64chf9hcfh557h5b5h59cq,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:ceilometer-central-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-nsqms,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/python3 /var/lib/openstack/bin/centralhealth.py],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*0,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(0074c6f2-5d03-406d-a8a3-19f87e5980d8): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 11 08:37:53 crc kubenswrapper[4881]: I1211 08:37:53.138806 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-rhqlf" Dec 11 08:37:53 crc kubenswrapper[4881]: I1211 08:37:53.179487 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-24zj7\" (UniqueName: \"kubernetes.io/projected/c45356cd-f35d-41fa-98d3-6697e7a8100a-kube-api-access-24zj7\") pod \"c45356cd-f35d-41fa-98d3-6697e7a8100a\" (UID: \"c45356cd-f35d-41fa-98d3-6697e7a8100a\") " Dec 11 08:37:53 crc kubenswrapper[4881]: I1211 08:37:53.179663 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c45356cd-f35d-41fa-98d3-6697e7a8100a-combined-ca-bundle\") pod \"c45356cd-f35d-41fa-98d3-6697e7a8100a\" (UID: \"c45356cd-f35d-41fa-98d3-6697e7a8100a\") " Dec 11 08:37:53 crc kubenswrapper[4881]: I1211 08:37:53.179773 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/c45356cd-f35d-41fa-98d3-6697e7a8100a-config\") pod \"c45356cd-f35d-41fa-98d3-6697e7a8100a\" (UID: \"c45356cd-f35d-41fa-98d3-6697e7a8100a\") " Dec 11 08:37:53 crc kubenswrapper[4881]: I1211 08:37:53.194656 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c45356cd-f35d-41fa-98d3-6697e7a8100a-kube-api-access-24zj7" (OuterVolumeSpecName: "kube-api-access-24zj7") pod "c45356cd-f35d-41fa-98d3-6697e7a8100a" (UID: "c45356cd-f35d-41fa-98d3-6697e7a8100a"). InnerVolumeSpecName "kube-api-access-24zj7". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:37:53 crc kubenswrapper[4881]: I1211 08:37:53.211320 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c45356cd-f35d-41fa-98d3-6697e7a8100a-config" (OuterVolumeSpecName: "config") pod "c45356cd-f35d-41fa-98d3-6697e7a8100a" (UID: "c45356cd-f35d-41fa-98d3-6697e7a8100a"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:37:53 crc kubenswrapper[4881]: I1211 08:37:53.211854 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c45356cd-f35d-41fa-98d3-6697e7a8100a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c45356cd-f35d-41fa-98d3-6697e7a8100a" (UID: "c45356cd-f35d-41fa-98d3-6697e7a8100a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:37:53 crc kubenswrapper[4881]: I1211 08:37:53.281881 4881 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c45356cd-f35d-41fa-98d3-6697e7a8100a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 11 08:37:53 crc kubenswrapper[4881]: I1211 08:37:53.281926 4881 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/c45356cd-f35d-41fa-98d3-6697e7a8100a-config\") on node \"crc\" DevicePath \"\"" Dec 11 08:37:53 crc kubenswrapper[4881]: I1211 08:37:53.281936 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-24zj7\" (UniqueName: \"kubernetes.io/projected/c45356cd-f35d-41fa-98d3-6697e7a8100a-kube-api-access-24zj7\") on node \"crc\" DevicePath \"\"" Dec 11 08:37:53 crc kubenswrapper[4881]: E1211 08:37:53.432514 4881 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-heat-engine:current-podified" Dec 11 08:37:53 crc kubenswrapper[4881]: E1211 08:37:53.432766 4881 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:heat-db-sync,Image:quay.io/podified-antelope-centos9/openstack-heat-engine:current-podified,Command:[/bin/bash],Args:[-c /usr/bin/heat-manage --config-dir /etc/heat/heat.conf.d db_sync],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/heat/heat.conf.d/00-default.conf,SubPath:00-default.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:false,MountPath:/etc/heat/heat.conf.d/01-custom.conf,SubPath:01-custom.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-qd2dc,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42418,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:*42418,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod heat-db-sync-jvkz7_openstack(2dde239c-3502-4b29-8f5d-1893f53819bd): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 11 08:37:53 crc kubenswrapper[4881]: E1211 08:37:53.434154 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/heat-db-sync-jvkz7" podUID="2dde239c-3502-4b29-8f5d-1893f53819bd" Dec 11 08:37:53 crc kubenswrapper[4881]: I1211 08:37:53.476494 4881 scope.go:117] "RemoveContainer" containerID="4bc0e81d968f9e5cffc363ba7585560c6a7e636d710910c73c29abe205a2c605" Dec 11 08:37:53 crc kubenswrapper[4881]: I1211 08:37:53.568879 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-rhqlf" event={"ID":"c45356cd-f35d-41fa-98d3-6697e7a8100a","Type":"ContainerDied","Data":"e596a8ff36255df8134f4dd68ca41fb09a0db49062f3d114624d88a1323570d7"} Dec 11 08:37:53 crc kubenswrapper[4881]: I1211 08:37:53.568931 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-rhqlf" Dec 11 08:37:53 crc kubenswrapper[4881]: I1211 08:37:53.568967 4881 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e596a8ff36255df8134f4dd68ca41fb09a0db49062f3d114624d88a1323570d7" Dec 11 08:37:53 crc kubenswrapper[4881]: E1211 08:37:53.589189 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cinder-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified\\\"\"" pod="openstack/cinder-db-sync-bcjrt" podUID="cc1ec075-9e84-4cfd-9f5a-b29d5af0d610" Dec 11 08:37:53 crc kubenswrapper[4881]: E1211 08:37:53.589666 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-heat-engine:current-podified\\\"\"" pod="openstack/heat-db-sync-jvkz7" podUID="2dde239c-3502-4b29-8f5d-1893f53819bd" Dec 11 08:37:54 crc kubenswrapper[4881]: I1211 08:37:54.334700 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-8b5c85b87-lwp6f"] Dec 11 08:37:54 crc kubenswrapper[4881]: I1211 08:37:54.396162 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-84b966f6c9-4kcvc"] Dec 11 08:37:54 crc kubenswrapper[4881]: E1211 08:37:54.396594 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c45356cd-f35d-41fa-98d3-6697e7a8100a" containerName="neutron-db-sync" Dec 11 08:37:54 crc kubenswrapper[4881]: I1211 08:37:54.396607 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="c45356cd-f35d-41fa-98d3-6697e7a8100a" containerName="neutron-db-sync" Dec 11 08:37:54 crc kubenswrapper[4881]: I1211 08:37:54.396824 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="c45356cd-f35d-41fa-98d3-6697e7a8100a" containerName="neutron-db-sync" Dec 11 08:37:54 crc kubenswrapper[4881]: I1211 08:37:54.415460 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-84b966f6c9-4kcvc" Dec 11 08:37:54 crc kubenswrapper[4881]: I1211 08:37:54.420805 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-8b5c85b87-lwp6f"] Dec 11 08:37:54 crc kubenswrapper[4881]: I1211 08:37:54.460413 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-84b966f6c9-4kcvc"] Dec 11 08:37:54 crc kubenswrapper[4881]: I1211 08:37:54.472838 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-55fbb6c694-gw7p4"] Dec 11 08:37:54 crc kubenswrapper[4881]: I1211 08:37:54.515150 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pjnjs\" (UniqueName: \"kubernetes.io/projected/d56a4394-101c-4fa1-ba3c-b9eb0907a5ec-kube-api-access-pjnjs\") pod \"dnsmasq-dns-84b966f6c9-4kcvc\" (UID: \"d56a4394-101c-4fa1-ba3c-b9eb0907a5ec\") " pod="openstack/dnsmasq-dns-84b966f6c9-4kcvc" Dec 11 08:37:54 crc kubenswrapper[4881]: I1211 08:37:54.515227 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d56a4394-101c-4fa1-ba3c-b9eb0907a5ec-ovsdbserver-sb\") pod \"dnsmasq-dns-84b966f6c9-4kcvc\" (UID: \"d56a4394-101c-4fa1-ba3c-b9eb0907a5ec\") " pod="openstack/dnsmasq-dns-84b966f6c9-4kcvc" Dec 11 08:37:54 crc kubenswrapper[4881]: I1211 08:37:54.515355 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d56a4394-101c-4fa1-ba3c-b9eb0907a5ec-dns-svc\") pod \"dnsmasq-dns-84b966f6c9-4kcvc\" (UID: \"d56a4394-101c-4fa1-ba3c-b9eb0907a5ec\") " pod="openstack/dnsmasq-dns-84b966f6c9-4kcvc" Dec 11 08:37:54 crc kubenswrapper[4881]: I1211 08:37:54.515435 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d56a4394-101c-4fa1-ba3c-b9eb0907a5ec-ovsdbserver-nb\") pod \"dnsmasq-dns-84b966f6c9-4kcvc\" (UID: \"d56a4394-101c-4fa1-ba3c-b9eb0907a5ec\") " pod="openstack/dnsmasq-dns-84b966f6c9-4kcvc" Dec 11 08:37:54 crc kubenswrapper[4881]: I1211 08:37:54.515503 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d56a4394-101c-4fa1-ba3c-b9eb0907a5ec-config\") pod \"dnsmasq-dns-84b966f6c9-4kcvc\" (UID: \"d56a4394-101c-4fa1-ba3c-b9eb0907a5ec\") " pod="openstack/dnsmasq-dns-84b966f6c9-4kcvc" Dec 11 08:37:54 crc kubenswrapper[4881]: I1211 08:37:54.515543 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/d56a4394-101c-4fa1-ba3c-b9eb0907a5ec-dns-swift-storage-0\") pod \"dnsmasq-dns-84b966f6c9-4kcvc\" (UID: \"d56a4394-101c-4fa1-ba3c-b9eb0907a5ec\") " pod="openstack/dnsmasq-dns-84b966f6c9-4kcvc" Dec 11 08:37:54 crc kubenswrapper[4881]: I1211 08:37:54.521916 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-5ccbc79968-kjfhq"] Dec 11 08:37:54 crc kubenswrapper[4881]: I1211 08:37:54.523955 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-5ccbc79968-kjfhq" Dec 11 08:37:54 crc kubenswrapper[4881]: I1211 08:37:54.527600 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-httpd-config" Dec 11 08:37:54 crc kubenswrapper[4881]: I1211 08:37:54.528398 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-config" Dec 11 08:37:54 crc kubenswrapper[4881]: I1211 08:37:54.528519 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-ovndbs" Dec 11 08:37:54 crc kubenswrapper[4881]: I1211 08:37:54.528752 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-neutron-dockercfg-xndx6" Dec 11 08:37:54 crc kubenswrapper[4881]: I1211 08:37:54.620910 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9cac4a82-5425-4f3a-bf86-26d9099432e3-combined-ca-bundle\") pod \"neutron-5ccbc79968-kjfhq\" (UID: \"9cac4a82-5425-4f3a-bf86-26d9099432e3\") " pod="openstack/neutron-5ccbc79968-kjfhq" Dec 11 08:37:54 crc kubenswrapper[4881]: I1211 08:37:54.620965 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/9cac4a82-5425-4f3a-bf86-26d9099432e3-config\") pod \"neutron-5ccbc79968-kjfhq\" (UID: \"9cac4a82-5425-4f3a-bf86-26d9099432e3\") " pod="openstack/neutron-5ccbc79968-kjfhq" Dec 11 08:37:54 crc kubenswrapper[4881]: I1211 08:37:54.621038 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d56a4394-101c-4fa1-ba3c-b9eb0907a5ec-dns-svc\") pod \"dnsmasq-dns-84b966f6c9-4kcvc\" (UID: \"d56a4394-101c-4fa1-ba3c-b9eb0907a5ec\") " pod="openstack/dnsmasq-dns-84b966f6c9-4kcvc" Dec 11 08:37:54 crc kubenswrapper[4881]: I1211 08:37:54.621063 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d56a4394-101c-4fa1-ba3c-b9eb0907a5ec-ovsdbserver-nb\") pod \"dnsmasq-dns-84b966f6c9-4kcvc\" (UID: \"d56a4394-101c-4fa1-ba3c-b9eb0907a5ec\") " pod="openstack/dnsmasq-dns-84b966f6c9-4kcvc" Dec 11 08:37:54 crc kubenswrapper[4881]: I1211 08:37:54.621087 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d56a4394-101c-4fa1-ba3c-b9eb0907a5ec-config\") pod \"dnsmasq-dns-84b966f6c9-4kcvc\" (UID: \"d56a4394-101c-4fa1-ba3c-b9eb0907a5ec\") " pod="openstack/dnsmasq-dns-84b966f6c9-4kcvc" Dec 11 08:37:54 crc kubenswrapper[4881]: I1211 08:37:54.621114 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/d56a4394-101c-4fa1-ba3c-b9eb0907a5ec-dns-swift-storage-0\") pod \"dnsmasq-dns-84b966f6c9-4kcvc\" (UID: \"d56a4394-101c-4fa1-ba3c-b9eb0907a5ec\") " pod="openstack/dnsmasq-dns-84b966f6c9-4kcvc" Dec 11 08:37:54 crc kubenswrapper[4881]: I1211 08:37:54.621190 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pjnjs\" (UniqueName: \"kubernetes.io/projected/d56a4394-101c-4fa1-ba3c-b9eb0907a5ec-kube-api-access-pjnjs\") pod \"dnsmasq-dns-84b966f6c9-4kcvc\" (UID: \"d56a4394-101c-4fa1-ba3c-b9eb0907a5ec\") " pod="openstack/dnsmasq-dns-84b966f6c9-4kcvc" Dec 11 08:37:54 crc kubenswrapper[4881]: I1211 08:37:54.621219 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d56a4394-101c-4fa1-ba3c-b9eb0907a5ec-ovsdbserver-sb\") pod \"dnsmasq-dns-84b966f6c9-4kcvc\" (UID: \"d56a4394-101c-4fa1-ba3c-b9eb0907a5ec\") " pod="openstack/dnsmasq-dns-84b966f6c9-4kcvc" Dec 11 08:37:54 crc kubenswrapper[4881]: I1211 08:37:54.621245 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nht9t\" (UniqueName: \"kubernetes.io/projected/9cac4a82-5425-4f3a-bf86-26d9099432e3-kube-api-access-nht9t\") pod \"neutron-5ccbc79968-kjfhq\" (UID: \"9cac4a82-5425-4f3a-bf86-26d9099432e3\") " pod="openstack/neutron-5ccbc79968-kjfhq" Dec 11 08:37:54 crc kubenswrapper[4881]: I1211 08:37:54.621266 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/9cac4a82-5425-4f3a-bf86-26d9099432e3-httpd-config\") pod \"neutron-5ccbc79968-kjfhq\" (UID: \"9cac4a82-5425-4f3a-bf86-26d9099432e3\") " pod="openstack/neutron-5ccbc79968-kjfhq" Dec 11 08:37:54 crc kubenswrapper[4881]: I1211 08:37:54.621290 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/9cac4a82-5425-4f3a-bf86-26d9099432e3-ovndb-tls-certs\") pod \"neutron-5ccbc79968-kjfhq\" (UID: \"9cac4a82-5425-4f3a-bf86-26d9099432e3\") " pod="openstack/neutron-5ccbc79968-kjfhq" Dec 11 08:37:54 crc kubenswrapper[4881]: I1211 08:37:54.622165 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d56a4394-101c-4fa1-ba3c-b9eb0907a5ec-dns-svc\") pod \"dnsmasq-dns-84b966f6c9-4kcvc\" (UID: \"d56a4394-101c-4fa1-ba3c-b9eb0907a5ec\") " pod="openstack/dnsmasq-dns-84b966f6c9-4kcvc" Dec 11 08:37:54 crc kubenswrapper[4881]: I1211 08:37:54.622683 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d56a4394-101c-4fa1-ba3c-b9eb0907a5ec-ovsdbserver-nb\") pod \"dnsmasq-dns-84b966f6c9-4kcvc\" (UID: \"d56a4394-101c-4fa1-ba3c-b9eb0907a5ec\") " pod="openstack/dnsmasq-dns-84b966f6c9-4kcvc" Dec 11 08:37:54 crc kubenswrapper[4881]: I1211 08:37:54.623137 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d56a4394-101c-4fa1-ba3c-b9eb0907a5ec-config\") pod \"dnsmasq-dns-84b966f6c9-4kcvc\" (UID: \"d56a4394-101c-4fa1-ba3c-b9eb0907a5ec\") " pod="openstack/dnsmasq-dns-84b966f6c9-4kcvc" Dec 11 08:37:54 crc kubenswrapper[4881]: I1211 08:37:54.623755 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/d56a4394-101c-4fa1-ba3c-b9eb0907a5ec-dns-swift-storage-0\") pod \"dnsmasq-dns-84b966f6c9-4kcvc\" (UID: \"d56a4394-101c-4fa1-ba3c-b9eb0907a5ec\") " pod="openstack/dnsmasq-dns-84b966f6c9-4kcvc" Dec 11 08:37:54 crc kubenswrapper[4881]: I1211 08:37:54.624546 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d56a4394-101c-4fa1-ba3c-b9eb0907a5ec-ovsdbserver-sb\") pod \"dnsmasq-dns-84b966f6c9-4kcvc\" (UID: \"d56a4394-101c-4fa1-ba3c-b9eb0907a5ec\") " pod="openstack/dnsmasq-dns-84b966f6c9-4kcvc" Dec 11 08:37:54 crc kubenswrapper[4881]: I1211 08:37:54.630676 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-5ccbc79968-kjfhq"] Dec 11 08:37:54 crc kubenswrapper[4881]: I1211 08:37:54.658796 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-7fbbb6db6c-bqwjn"] Dec 11 08:37:54 crc kubenswrapper[4881]: I1211 08:37:54.676101 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Dec 11 08:37:54 crc kubenswrapper[4881]: I1211 08:37:54.676709 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pjnjs\" (UniqueName: \"kubernetes.io/projected/d56a4394-101c-4fa1-ba3c-b9eb0907a5ec-kube-api-access-pjnjs\") pod \"dnsmasq-dns-84b966f6c9-4kcvc\" (UID: \"d56a4394-101c-4fa1-ba3c-b9eb0907a5ec\") " pod="openstack/dnsmasq-dns-84b966f6c9-4kcvc" Dec 11 08:37:54 crc kubenswrapper[4881]: I1211 08:37:54.686819 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-0" event={"ID":"3dca57af-8220-449b-a5fc-8001bcc8c180","Type":"ContainerStarted","Data":"040473c11cc342fb095cdc5e6d4af3dd423c70da54ff938477dab4b9b33715a3"} Dec 11 08:37:54 crc kubenswrapper[4881]: I1211 08:37:54.723955 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nht9t\" (UniqueName: \"kubernetes.io/projected/9cac4a82-5425-4f3a-bf86-26d9099432e3-kube-api-access-nht9t\") pod \"neutron-5ccbc79968-kjfhq\" (UID: \"9cac4a82-5425-4f3a-bf86-26d9099432e3\") " pod="openstack/neutron-5ccbc79968-kjfhq" Dec 11 08:37:54 crc kubenswrapper[4881]: I1211 08:37:54.724020 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/9cac4a82-5425-4f3a-bf86-26d9099432e3-httpd-config\") pod \"neutron-5ccbc79968-kjfhq\" (UID: \"9cac4a82-5425-4f3a-bf86-26d9099432e3\") " pod="openstack/neutron-5ccbc79968-kjfhq" Dec 11 08:37:54 crc kubenswrapper[4881]: I1211 08:37:54.724060 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/9cac4a82-5425-4f3a-bf86-26d9099432e3-ovndb-tls-certs\") pod \"neutron-5ccbc79968-kjfhq\" (UID: \"9cac4a82-5425-4f3a-bf86-26d9099432e3\") " pod="openstack/neutron-5ccbc79968-kjfhq" Dec 11 08:37:54 crc kubenswrapper[4881]: I1211 08:37:54.724095 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9cac4a82-5425-4f3a-bf86-26d9099432e3-combined-ca-bundle\") pod \"neutron-5ccbc79968-kjfhq\" (UID: \"9cac4a82-5425-4f3a-bf86-26d9099432e3\") " pod="openstack/neutron-5ccbc79968-kjfhq" Dec 11 08:37:54 crc kubenswrapper[4881]: I1211 08:37:54.728599 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/9cac4a82-5425-4f3a-bf86-26d9099432e3-config\") pod \"neutron-5ccbc79968-kjfhq\" (UID: \"9cac4a82-5425-4f3a-bf86-26d9099432e3\") " pod="openstack/neutron-5ccbc79968-kjfhq" Dec 11 08:37:54 crc kubenswrapper[4881]: I1211 08:37:54.742011 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/9cac4a82-5425-4f3a-bf86-26d9099432e3-httpd-config\") pod \"neutron-5ccbc79968-kjfhq\" (UID: \"9cac4a82-5425-4f3a-bf86-26d9099432e3\") " pod="openstack/neutron-5ccbc79968-kjfhq" Dec 11 08:37:54 crc kubenswrapper[4881]: I1211 08:37:54.753619 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9cac4a82-5425-4f3a-bf86-26d9099432e3-combined-ca-bundle\") pod \"neutron-5ccbc79968-kjfhq\" (UID: \"9cac4a82-5425-4f3a-bf86-26d9099432e3\") " pod="openstack/neutron-5ccbc79968-kjfhq" Dec 11 08:37:54 crc kubenswrapper[4881]: I1211 08:37:54.753916 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/9cac4a82-5425-4f3a-bf86-26d9099432e3-ovndb-tls-certs\") pod \"neutron-5ccbc79968-kjfhq\" (UID: \"9cac4a82-5425-4f3a-bf86-26d9099432e3\") " pod="openstack/neutron-5ccbc79968-kjfhq" Dec 11 08:37:54 crc kubenswrapper[4881]: I1211 08:37:54.753916 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"b515a685-da3e-4d92-a8e5-60561e9de83f","Type":"ContainerStarted","Data":"b0f68e2f3dbdd0949e067a20a560be357205ca8c922ce464cbaa94fae9132126"} Dec 11 08:37:54 crc kubenswrapper[4881]: I1211 08:37:54.753997 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/9cac4a82-5425-4f3a-bf86-26d9099432e3-config\") pod \"neutron-5ccbc79968-kjfhq\" (UID: \"9cac4a82-5425-4f3a-bf86-26d9099432e3\") " pod="openstack/neutron-5ccbc79968-kjfhq" Dec 11 08:37:54 crc kubenswrapper[4881]: I1211 08:37:54.757550 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-84b966f6c9-4kcvc" Dec 11 08:37:54 crc kubenswrapper[4881]: I1211 08:37:54.774087 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/mysqld-exporter-0" podStartSLOduration=3.921619614 podStartE2EDuration="36.774067375s" podCreationTimestamp="2025-12-11 08:37:18 +0000 UTC" firstStartedPulling="2025-12-11 08:37:20.178417162 +0000 UTC m=+1288.555785849" lastFinishedPulling="2025-12-11 08:37:53.030864893 +0000 UTC m=+1321.408233610" observedRunningTime="2025-12-11 08:37:54.717927575 +0000 UTC m=+1323.095296272" watchObservedRunningTime="2025-12-11 08:37:54.774067375 +0000 UTC m=+1323.151436082" Dec 11 08:37:54 crc kubenswrapper[4881]: I1211 08:37:54.783781 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nht9t\" (UniqueName: \"kubernetes.io/projected/9cac4a82-5425-4f3a-bf86-26d9099432e3-kube-api-access-nht9t\") pod \"neutron-5ccbc79968-kjfhq\" (UID: \"9cac4a82-5425-4f3a-bf86-26d9099432e3\") " pod="openstack/neutron-5ccbc79968-kjfhq" Dec 11 08:37:54 crc kubenswrapper[4881]: I1211 08:37:54.813826 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Dec 11 08:37:54 crc kubenswrapper[4881]: I1211 08:37:54.911024 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-5ccbc79968-kjfhq" Dec 11 08:37:55 crc kubenswrapper[4881]: W1211 08:37:55.148813 4881 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod30f691dc_faf6_411b_8cb8_db57047199b0.slice/crio-1918937effe8cb309e1d9f21e75a96fe6c5c8a6d00b4755c3d1808334e01b609 WatchSource:0}: Error finding container 1918937effe8cb309e1d9f21e75a96fe6c5c8a6d00b4755c3d1808334e01b609: Status 404 returned error can't find the container with id 1918937effe8cb309e1d9f21e75a96fe6c5c8a6d00b4755c3d1808334e01b609 Dec 11 08:37:55 crc kubenswrapper[4881]: W1211 08:37:55.151301 4881 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod99628143_5f10_4ebc_beea_4e33458d3eb3.slice/crio-ea8e0d66da7a3d1e62ee204d23734280a51a82f6d86d6e7ed79f893b2b14ee73 WatchSource:0}: Error finding container ea8e0d66da7a3d1e62ee204d23734280a51a82f6d86d6e7ed79f893b2b14ee73: Status 404 returned error can't find the container with id ea8e0d66da7a3d1e62ee204d23734280a51a82f6d86d6e7ed79f893b2b14ee73 Dec 11 08:37:55 crc kubenswrapper[4881]: I1211 08:37:55.813518 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-7fbbb6db6c-bqwjn" event={"ID":"cae51d9b-e997-4228-af25-872a6e16df8d","Type":"ContainerStarted","Data":"d2688a99152c27e62cca68dd033c9a63e9dbda15f13b48b1f466218d3be25de9"} Dec 11 08:37:55 crc kubenswrapper[4881]: I1211 08:37:55.815437 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"99628143-5f10-4ebc-beea-4e33458d3eb3","Type":"ContainerStarted","Data":"ea8e0d66da7a3d1e62ee204d23734280a51a82f6d86d6e7ed79f893b2b14ee73"} Dec 11 08:37:55 crc kubenswrapper[4881]: I1211 08:37:55.815947 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-84b966f6c9-4kcvc"] Dec 11 08:37:55 crc kubenswrapper[4881]: I1211 08:37:55.819116 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-55fbb6c694-gw7p4" event={"ID":"30f691dc-faf6-411b-8cb8-db57047199b0","Type":"ContainerStarted","Data":"1918937effe8cb309e1d9f21e75a96fe6c5c8a6d00b4755c3d1808334e01b609"} Dec 11 08:37:55 crc kubenswrapper[4881]: I1211 08:37:55.822276 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8b5c85b87-lwp6f" event={"ID":"0fe972c7-ac62-43a9-87b0-17fb27c4c2c9","Type":"ContainerStarted","Data":"b30ff406f73cf1864f1b0f1aa9750f17f35da33b9460d6685309a4a7014c0de1"} Dec 11 08:37:55 crc kubenswrapper[4881]: I1211 08:37:55.826092 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"52860b37-6c04-42e4-ada9-94c9a46f1773","Type":"ContainerStarted","Data":"8831cbe9ab9ccd27f2f61e0baec07684212052c2808d3a1b4116518cc5678c57"} Dec 11 08:37:56 crc kubenswrapper[4881]: I1211 08:37:56.037078 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-5ccbc79968-kjfhq"] Dec 11 08:37:56 crc kubenswrapper[4881]: W1211 08:37:56.039400 4881 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9cac4a82_5425_4f3a_bf86_26d9099432e3.slice/crio-8fc0b155ad9f9c867c71b4cf517c1afdb007ba51f3af9c990591a4b1a157b5f7 WatchSource:0}: Error finding container 8fc0b155ad9f9c867c71b4cf517c1afdb007ba51f3af9c990591a4b1a157b5f7: Status 404 returned error can't find the container with id 8fc0b155ad9f9c867c71b4cf517c1afdb007ba51f3af9c990591a4b1a157b5f7 Dec 11 08:37:56 crc kubenswrapper[4881]: I1211 08:37:56.841566 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-84b966f6c9-4kcvc" event={"ID":"d56a4394-101c-4fa1-ba3c-b9eb0907a5ec","Type":"ContainerStarted","Data":"69d0bb914bdc4ee02bb01c8f476bc1747e6b62a4a66cc42e8d4da98133bd85dd"} Dec 11 08:37:56 crc kubenswrapper[4881]: I1211 08:37:56.844440 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"99628143-5f10-4ebc-beea-4e33458d3eb3","Type":"ContainerStarted","Data":"e2dcc92765468e5223506afbad1c5b420610a2bdf9bcafc4501419b80cccc1ef"} Dec 11 08:37:56 crc kubenswrapper[4881]: I1211 08:37:56.848696 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-55fbb6c694-gw7p4" event={"ID":"30f691dc-faf6-411b-8cb8-db57047199b0","Type":"ContainerStarted","Data":"c4f5e33aa0f165693630e4004a83520f902a28c9270bdc1faafe4981a69fdd5e"} Dec 11 08:37:56 crc kubenswrapper[4881]: I1211 08:37:56.865250 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5ccbc79968-kjfhq" event={"ID":"9cac4a82-5425-4f3a-bf86-26d9099432e3","Type":"ContainerStarted","Data":"8fc0b155ad9f9c867c71b4cf517c1afdb007ba51f3af9c990591a4b1a157b5f7"} Dec 11 08:37:56 crc kubenswrapper[4881]: I1211 08:37:56.867571 4881 generic.go:334] "Generic (PLEG): container finished" podID="0fe972c7-ac62-43a9-87b0-17fb27c4c2c9" containerID="a66ec0b58c9521267d3a71d325213bd9f73d155aeb70dad119e448efb03b74b8" exitCode=0 Dec 11 08:37:56 crc kubenswrapper[4881]: I1211 08:37:56.867638 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8b5c85b87-lwp6f" event={"ID":"0fe972c7-ac62-43a9-87b0-17fb27c4c2c9","Type":"ContainerDied","Data":"a66ec0b58c9521267d3a71d325213bd9f73d155aeb70dad119e448efb03b74b8"} Dec 11 08:37:56 crc kubenswrapper[4881]: I1211 08:37:56.870374 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-7fbbb6db6c-bqwjn" event={"ID":"cae51d9b-e997-4228-af25-872a6e16df8d","Type":"ContainerStarted","Data":"60992bdc992053b00621d4ea9cf1d64cff2ae04f5e1ca7c1d197c14d70e8b1a0"} Dec 11 08:37:56 crc kubenswrapper[4881]: I1211 08:37:56.870594 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/keystone-7fbbb6db6c-bqwjn" Dec 11 08:37:56 crc kubenswrapper[4881]: I1211 08:37:56.919793 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-7fbbb6db6c-bqwjn" podStartSLOduration=4.919772221 podStartE2EDuration="4.919772221s" podCreationTimestamp="2025-12-11 08:37:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 08:37:56.906443844 +0000 UTC m=+1325.283812551" watchObservedRunningTime="2025-12-11 08:37:56.919772221 +0000 UTC m=+1325.297140918" Dec 11 08:37:57 crc kubenswrapper[4881]: I1211 08:37:57.297534 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-6986b4b8b9-dlx84"] Dec 11 08:37:57 crc kubenswrapper[4881]: I1211 08:37:57.301083 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-6986b4b8b9-dlx84" Dec 11 08:37:57 crc kubenswrapper[4881]: I1211 08:37:57.305160 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-public-svc" Dec 11 08:37:57 crc kubenswrapper[4881]: I1211 08:37:57.305210 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-internal-svc" Dec 11 08:37:57 crc kubenswrapper[4881]: I1211 08:37:57.316052 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8edd456d-09d4-46fc-97ef-68c44cb5320c-combined-ca-bundle\") pod \"neutron-6986b4b8b9-dlx84\" (UID: \"8edd456d-09d4-46fc-97ef-68c44cb5320c\") " pod="openstack/neutron-6986b4b8b9-dlx84" Dec 11 08:37:57 crc kubenswrapper[4881]: I1211 08:37:57.316096 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/8edd456d-09d4-46fc-97ef-68c44cb5320c-public-tls-certs\") pod \"neutron-6986b4b8b9-dlx84\" (UID: \"8edd456d-09d4-46fc-97ef-68c44cb5320c\") " pod="openstack/neutron-6986b4b8b9-dlx84" Dec 11 08:37:57 crc kubenswrapper[4881]: I1211 08:37:57.316123 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nr59d\" (UniqueName: \"kubernetes.io/projected/8edd456d-09d4-46fc-97ef-68c44cb5320c-kube-api-access-nr59d\") pod \"neutron-6986b4b8b9-dlx84\" (UID: \"8edd456d-09d4-46fc-97ef-68c44cb5320c\") " pod="openstack/neutron-6986b4b8b9-dlx84" Dec 11 08:37:57 crc kubenswrapper[4881]: I1211 08:37:57.316157 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/8edd456d-09d4-46fc-97ef-68c44cb5320c-httpd-config\") pod \"neutron-6986b4b8b9-dlx84\" (UID: \"8edd456d-09d4-46fc-97ef-68c44cb5320c\") " pod="openstack/neutron-6986b4b8b9-dlx84" Dec 11 08:37:57 crc kubenswrapper[4881]: I1211 08:37:57.316193 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/8edd456d-09d4-46fc-97ef-68c44cb5320c-internal-tls-certs\") pod \"neutron-6986b4b8b9-dlx84\" (UID: \"8edd456d-09d4-46fc-97ef-68c44cb5320c\") " pod="openstack/neutron-6986b4b8b9-dlx84" Dec 11 08:37:57 crc kubenswrapper[4881]: I1211 08:37:57.316290 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/8edd456d-09d4-46fc-97ef-68c44cb5320c-ovndb-tls-certs\") pod \"neutron-6986b4b8b9-dlx84\" (UID: \"8edd456d-09d4-46fc-97ef-68c44cb5320c\") " pod="openstack/neutron-6986b4b8b9-dlx84" Dec 11 08:37:57 crc kubenswrapper[4881]: I1211 08:37:57.316362 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/8edd456d-09d4-46fc-97ef-68c44cb5320c-config\") pod \"neutron-6986b4b8b9-dlx84\" (UID: \"8edd456d-09d4-46fc-97ef-68c44cb5320c\") " pod="openstack/neutron-6986b4b8b9-dlx84" Dec 11 08:37:57 crc kubenswrapper[4881]: I1211 08:37:57.317735 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-6986b4b8b9-dlx84"] Dec 11 08:37:57 crc kubenswrapper[4881]: I1211 08:37:57.421516 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/8edd456d-09d4-46fc-97ef-68c44cb5320c-config\") pod \"neutron-6986b4b8b9-dlx84\" (UID: \"8edd456d-09d4-46fc-97ef-68c44cb5320c\") " pod="openstack/neutron-6986b4b8b9-dlx84" Dec 11 08:37:57 crc kubenswrapper[4881]: I1211 08:37:57.421741 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8edd456d-09d4-46fc-97ef-68c44cb5320c-combined-ca-bundle\") pod \"neutron-6986b4b8b9-dlx84\" (UID: \"8edd456d-09d4-46fc-97ef-68c44cb5320c\") " pod="openstack/neutron-6986b4b8b9-dlx84" Dec 11 08:37:57 crc kubenswrapper[4881]: I1211 08:37:57.421769 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/8edd456d-09d4-46fc-97ef-68c44cb5320c-public-tls-certs\") pod \"neutron-6986b4b8b9-dlx84\" (UID: \"8edd456d-09d4-46fc-97ef-68c44cb5320c\") " pod="openstack/neutron-6986b4b8b9-dlx84" Dec 11 08:37:57 crc kubenswrapper[4881]: I1211 08:37:57.421794 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nr59d\" (UniqueName: \"kubernetes.io/projected/8edd456d-09d4-46fc-97ef-68c44cb5320c-kube-api-access-nr59d\") pod \"neutron-6986b4b8b9-dlx84\" (UID: \"8edd456d-09d4-46fc-97ef-68c44cb5320c\") " pod="openstack/neutron-6986b4b8b9-dlx84" Dec 11 08:37:57 crc kubenswrapper[4881]: I1211 08:37:57.421841 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/8edd456d-09d4-46fc-97ef-68c44cb5320c-httpd-config\") pod \"neutron-6986b4b8b9-dlx84\" (UID: \"8edd456d-09d4-46fc-97ef-68c44cb5320c\") " pod="openstack/neutron-6986b4b8b9-dlx84" Dec 11 08:37:57 crc kubenswrapper[4881]: I1211 08:37:57.421907 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/8edd456d-09d4-46fc-97ef-68c44cb5320c-internal-tls-certs\") pod \"neutron-6986b4b8b9-dlx84\" (UID: \"8edd456d-09d4-46fc-97ef-68c44cb5320c\") " pod="openstack/neutron-6986b4b8b9-dlx84" Dec 11 08:37:57 crc kubenswrapper[4881]: I1211 08:37:57.422138 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/8edd456d-09d4-46fc-97ef-68c44cb5320c-ovndb-tls-certs\") pod \"neutron-6986b4b8b9-dlx84\" (UID: \"8edd456d-09d4-46fc-97ef-68c44cb5320c\") " pod="openstack/neutron-6986b4b8b9-dlx84" Dec 11 08:37:57 crc kubenswrapper[4881]: I1211 08:37:57.428840 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/8edd456d-09d4-46fc-97ef-68c44cb5320c-public-tls-certs\") pod \"neutron-6986b4b8b9-dlx84\" (UID: \"8edd456d-09d4-46fc-97ef-68c44cb5320c\") " pod="openstack/neutron-6986b4b8b9-dlx84" Dec 11 08:37:57 crc kubenswrapper[4881]: I1211 08:37:57.429441 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/8edd456d-09d4-46fc-97ef-68c44cb5320c-httpd-config\") pod \"neutron-6986b4b8b9-dlx84\" (UID: \"8edd456d-09d4-46fc-97ef-68c44cb5320c\") " pod="openstack/neutron-6986b4b8b9-dlx84" Dec 11 08:37:57 crc kubenswrapper[4881]: I1211 08:37:57.430970 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/8edd456d-09d4-46fc-97ef-68c44cb5320c-ovndb-tls-certs\") pod \"neutron-6986b4b8b9-dlx84\" (UID: \"8edd456d-09d4-46fc-97ef-68c44cb5320c\") " pod="openstack/neutron-6986b4b8b9-dlx84" Dec 11 08:37:57 crc kubenswrapper[4881]: I1211 08:37:57.432051 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8edd456d-09d4-46fc-97ef-68c44cb5320c-combined-ca-bundle\") pod \"neutron-6986b4b8b9-dlx84\" (UID: \"8edd456d-09d4-46fc-97ef-68c44cb5320c\") " pod="openstack/neutron-6986b4b8b9-dlx84" Dec 11 08:37:57 crc kubenswrapper[4881]: I1211 08:37:57.432884 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/8edd456d-09d4-46fc-97ef-68c44cb5320c-config\") pod \"neutron-6986b4b8b9-dlx84\" (UID: \"8edd456d-09d4-46fc-97ef-68c44cb5320c\") " pod="openstack/neutron-6986b4b8b9-dlx84" Dec 11 08:37:57 crc kubenswrapper[4881]: I1211 08:37:57.433099 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/8edd456d-09d4-46fc-97ef-68c44cb5320c-internal-tls-certs\") pod \"neutron-6986b4b8b9-dlx84\" (UID: \"8edd456d-09d4-46fc-97ef-68c44cb5320c\") " pod="openstack/neutron-6986b4b8b9-dlx84" Dec 11 08:37:57 crc kubenswrapper[4881]: I1211 08:37:57.451133 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nr59d\" (UniqueName: \"kubernetes.io/projected/8edd456d-09d4-46fc-97ef-68c44cb5320c-kube-api-access-nr59d\") pod \"neutron-6986b4b8b9-dlx84\" (UID: \"8edd456d-09d4-46fc-97ef-68c44cb5320c\") " pod="openstack/neutron-6986b4b8b9-dlx84" Dec 11 08:37:57 crc kubenswrapper[4881]: I1211 08:37:57.511753 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-6986b4b8b9-dlx84" Dec 11 08:37:57 crc kubenswrapper[4881]: I1211 08:37:57.519444 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8b5c85b87-lwp6f" Dec 11 08:37:57 crc kubenswrapper[4881]: I1211 08:37:57.627450 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0fe972c7-ac62-43a9-87b0-17fb27c4c2c9-config\") pod \"0fe972c7-ac62-43a9-87b0-17fb27c4c2c9\" (UID: \"0fe972c7-ac62-43a9-87b0-17fb27c4c2c9\") " Dec 11 08:37:57 crc kubenswrapper[4881]: I1211 08:37:57.628633 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/0fe972c7-ac62-43a9-87b0-17fb27c4c2c9-ovsdbserver-sb\") pod \"0fe972c7-ac62-43a9-87b0-17fb27c4c2c9\" (UID: \"0fe972c7-ac62-43a9-87b0-17fb27c4c2c9\") " Dec 11 08:37:57 crc kubenswrapper[4881]: I1211 08:37:57.628853 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0fe972c7-ac62-43a9-87b0-17fb27c4c2c9-dns-svc\") pod \"0fe972c7-ac62-43a9-87b0-17fb27c4c2c9\" (UID: \"0fe972c7-ac62-43a9-87b0-17fb27c4c2c9\") " Dec 11 08:37:57 crc kubenswrapper[4881]: I1211 08:37:57.629064 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-25fhz\" (UniqueName: \"kubernetes.io/projected/0fe972c7-ac62-43a9-87b0-17fb27c4c2c9-kube-api-access-25fhz\") pod \"0fe972c7-ac62-43a9-87b0-17fb27c4c2c9\" (UID: \"0fe972c7-ac62-43a9-87b0-17fb27c4c2c9\") " Dec 11 08:37:57 crc kubenswrapper[4881]: I1211 08:37:57.629247 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/0fe972c7-ac62-43a9-87b0-17fb27c4c2c9-dns-swift-storage-0\") pod \"0fe972c7-ac62-43a9-87b0-17fb27c4c2c9\" (UID: \"0fe972c7-ac62-43a9-87b0-17fb27c4c2c9\") " Dec 11 08:37:57 crc kubenswrapper[4881]: I1211 08:37:57.629481 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/0fe972c7-ac62-43a9-87b0-17fb27c4c2c9-ovsdbserver-nb\") pod \"0fe972c7-ac62-43a9-87b0-17fb27c4c2c9\" (UID: \"0fe972c7-ac62-43a9-87b0-17fb27c4c2c9\") " Dec 11 08:37:57 crc kubenswrapper[4881]: I1211 08:37:57.638597 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0fe972c7-ac62-43a9-87b0-17fb27c4c2c9-kube-api-access-25fhz" (OuterVolumeSpecName: "kube-api-access-25fhz") pod "0fe972c7-ac62-43a9-87b0-17fb27c4c2c9" (UID: "0fe972c7-ac62-43a9-87b0-17fb27c4c2c9"). InnerVolumeSpecName "kube-api-access-25fhz". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:37:57 crc kubenswrapper[4881]: I1211 08:37:57.645385 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-25fhz\" (UniqueName: \"kubernetes.io/projected/0fe972c7-ac62-43a9-87b0-17fb27c4c2c9-kube-api-access-25fhz\") on node \"crc\" DevicePath \"\"" Dec 11 08:37:57 crc kubenswrapper[4881]: I1211 08:37:57.660549 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0fe972c7-ac62-43a9-87b0-17fb27c4c2c9-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "0fe972c7-ac62-43a9-87b0-17fb27c4c2c9" (UID: "0fe972c7-ac62-43a9-87b0-17fb27c4c2c9"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:37:57 crc kubenswrapper[4881]: I1211 08:37:57.675097 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0fe972c7-ac62-43a9-87b0-17fb27c4c2c9-config" (OuterVolumeSpecName: "config") pod "0fe972c7-ac62-43a9-87b0-17fb27c4c2c9" (UID: "0fe972c7-ac62-43a9-87b0-17fb27c4c2c9"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:37:57 crc kubenswrapper[4881]: I1211 08:37:57.681288 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0fe972c7-ac62-43a9-87b0-17fb27c4c2c9-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "0fe972c7-ac62-43a9-87b0-17fb27c4c2c9" (UID: "0fe972c7-ac62-43a9-87b0-17fb27c4c2c9"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:37:57 crc kubenswrapper[4881]: I1211 08:37:57.691964 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0fe972c7-ac62-43a9-87b0-17fb27c4c2c9-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "0fe972c7-ac62-43a9-87b0-17fb27c4c2c9" (UID: "0fe972c7-ac62-43a9-87b0-17fb27c4c2c9"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:37:57 crc kubenswrapper[4881]: I1211 08:37:57.707562 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0fe972c7-ac62-43a9-87b0-17fb27c4c2c9-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "0fe972c7-ac62-43a9-87b0-17fb27c4c2c9" (UID: "0fe972c7-ac62-43a9-87b0-17fb27c4c2c9"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:37:57 crc kubenswrapper[4881]: I1211 08:37:57.749046 4881 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/0fe972c7-ac62-43a9-87b0-17fb27c4c2c9-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Dec 11 08:37:57 crc kubenswrapper[4881]: I1211 08:37:57.749075 4881 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0fe972c7-ac62-43a9-87b0-17fb27c4c2c9-config\") on node \"crc\" DevicePath \"\"" Dec 11 08:37:57 crc kubenswrapper[4881]: I1211 08:37:57.749086 4881 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/0fe972c7-ac62-43a9-87b0-17fb27c4c2c9-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Dec 11 08:37:57 crc kubenswrapper[4881]: I1211 08:37:57.749096 4881 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0fe972c7-ac62-43a9-87b0-17fb27c4c2c9-dns-svc\") on node \"crc\" DevicePath \"\"" Dec 11 08:37:57 crc kubenswrapper[4881]: I1211 08:37:57.749106 4881 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/0fe972c7-ac62-43a9-87b0-17fb27c4c2c9-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Dec 11 08:37:57 crc kubenswrapper[4881]: I1211 08:37:57.889756 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"b515a685-da3e-4d92-a8e5-60561e9de83f","Type":"ContainerStarted","Data":"6e030bb3142081f13d8ead715f85af104c1f3f77223d6293f3a7fd92af808dcf"} Dec 11 08:37:57 crc kubenswrapper[4881]: I1211 08:37:57.889807 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"b515a685-da3e-4d92-a8e5-60561e9de83f","Type":"ContainerStarted","Data":"85615273db8770d0bfd4f2f0f6fc27f2cf35a4e2a13ef157251724772c7b59fb"} Dec 11 08:37:57 crc kubenswrapper[4881]: I1211 08:37:57.896362 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-vmjdf" event={"ID":"422faa6a-f2ed-4015-87cd-7878bac246e4","Type":"ContainerStarted","Data":"0bafc15cd1a4a159ece5b6186664af0b35f78a1f3442fe0b6442a32e1917de5f"} Dec 11 08:37:57 crc kubenswrapper[4881]: I1211 08:37:57.905289 4881 generic.go:334] "Generic (PLEG): container finished" podID="d56a4394-101c-4fa1-ba3c-b9eb0907a5ec" containerID="66166530f41eca8f38ad0d100510c7d6652a0e107bdab21546d7e959198b045d" exitCode=0 Dec 11 08:37:57 crc kubenswrapper[4881]: I1211 08:37:57.905440 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-84b966f6c9-4kcvc" event={"ID":"d56a4394-101c-4fa1-ba3c-b9eb0907a5ec","Type":"ContainerDied","Data":"66166530f41eca8f38ad0d100510c7d6652a0e107bdab21546d7e959198b045d"} Dec 11 08:37:57 crc kubenswrapper[4881]: I1211 08:37:57.926241 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/prometheus-metric-storage-0" podStartSLOduration=46.926225457 podStartE2EDuration="46.926225457s" podCreationTimestamp="2025-12-11 08:37:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 08:37:57.920194891 +0000 UTC m=+1326.297563588" watchObservedRunningTime="2025-12-11 08:37:57.926225457 +0000 UTC m=+1326.303594154" Dec 11 08:37:57 crc kubenswrapper[4881]: I1211 08:37:57.942471 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"99628143-5f10-4ebc-beea-4e33458d3eb3","Type":"ContainerStarted","Data":"fffc15ad744dd26122404fd7132bd7ea47b15fc8cb648cc197dc66b920f982e4"} Dec 11 08:37:57 crc kubenswrapper[4881]: I1211 08:37:57.942719 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="99628143-5f10-4ebc-beea-4e33458d3eb3" containerName="glance-log" containerID="cri-o://e2dcc92765468e5223506afbad1c5b420610a2bdf9bcafc4501419b80cccc1ef" gracePeriod=30 Dec 11 08:37:57 crc kubenswrapper[4881]: I1211 08:37:57.943259 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="99628143-5f10-4ebc-beea-4e33458d3eb3" containerName="glance-httpd" containerID="cri-o://fffc15ad744dd26122404fd7132bd7ea47b15fc8cb648cc197dc66b920f982e4" gracePeriod=30 Dec 11 08:37:57 crc kubenswrapper[4881]: I1211 08:37:57.981148 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-db-sync-vmjdf" podStartSLOduration=2.635112656 podStartE2EDuration="39.981125095s" podCreationTimestamp="2025-12-11 08:37:18 +0000 UTC" firstStartedPulling="2025-12-11 08:37:19.284697926 +0000 UTC m=+1287.662066623" lastFinishedPulling="2025-12-11 08:37:56.630710365 +0000 UTC m=+1325.008079062" observedRunningTime="2025-12-11 08:37:57.946286659 +0000 UTC m=+1326.323655356" watchObservedRunningTime="2025-12-11 08:37:57.981125095 +0000 UTC m=+1326.358493792" Dec 11 08:37:57 crc kubenswrapper[4881]: I1211 08:37:57.996759 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5ccbc79968-kjfhq" event={"ID":"9cac4a82-5425-4f3a-bf86-26d9099432e3","Type":"ContainerStarted","Data":"b4787b60ea2d1af27f7a25e12d4c65e4fa7e8df398d8335843a14437ef100214"} Dec 11 08:37:57 crc kubenswrapper[4881]: I1211 08:37:57.996805 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5ccbc79968-kjfhq" event={"ID":"9cac4a82-5425-4f3a-bf86-26d9099432e3","Type":"ContainerStarted","Data":"989f77dfba60f8b047748455ff61be09fdb5fecc1425754e7501917b9b888581"} Dec 11 08:37:57 crc kubenswrapper[4881]: I1211 08:37:57.997700 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-5ccbc79968-kjfhq" Dec 11 08:37:58 crc kubenswrapper[4881]: I1211 08:37:58.027491 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8b5c85b87-lwp6f" event={"ID":"0fe972c7-ac62-43a9-87b0-17fb27c4c2c9","Type":"ContainerDied","Data":"b30ff406f73cf1864f1b0f1aa9750f17f35da33b9460d6685309a4a7014c0de1"} Dec 11 08:37:58 crc kubenswrapper[4881]: I1211 08:37:58.027562 4881 scope.go:117] "RemoveContainer" containerID="a66ec0b58c9521267d3a71d325213bd9f73d155aeb70dad119e448efb03b74b8" Dec 11 08:37:58 crc kubenswrapper[4881]: I1211 08:37:58.027984 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8b5c85b87-lwp6f" Dec 11 08:37:58 crc kubenswrapper[4881]: I1211 08:37:58.050012 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"52860b37-6c04-42e4-ada9-94c9a46f1773","Type":"ContainerStarted","Data":"bc26391021fb324f31517bbc60d528d2c90eb437b191ea229c4394e736cb60ba"} Dec 11 08:37:58 crc kubenswrapper[4881]: I1211 08:37:58.083440 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=28.083416415 podStartE2EDuration="28.083416415s" podCreationTimestamp="2025-12-11 08:37:30 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 08:37:58.002201233 +0000 UTC m=+1326.379569930" watchObservedRunningTime="2025-12-11 08:37:58.083416415 +0000 UTC m=+1326.460785112" Dec 11 08:37:58 crc kubenswrapper[4881]: I1211 08:37:58.097038 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-55fbb6c694-gw7p4" event={"ID":"30f691dc-faf6-411b-8cb8-db57047199b0","Type":"ContainerStarted","Data":"72e01cc42177ab2334f2cce8b492946cc96e03b02198bd815c68b3daca6a3e1b"} Dec 11 08:37:58 crc kubenswrapper[4881]: I1211 08:37:58.097478 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-55fbb6c694-gw7p4" Dec 11 08:37:58 crc kubenswrapper[4881]: I1211 08:37:58.097520 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-55fbb6c694-gw7p4" Dec 11 08:37:58 crc kubenswrapper[4881]: I1211 08:37:58.098257 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-5ccbc79968-kjfhq" podStartSLOduration=4.098236969 podStartE2EDuration="4.098236969s" podCreationTimestamp="2025-12-11 08:37:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 08:37:58.051072494 +0000 UTC m=+1326.428441191" watchObservedRunningTime="2025-12-11 08:37:58.098236969 +0000 UTC m=+1326.475605666" Dec 11 08:37:58 crc kubenswrapper[4881]: I1211 08:37:58.104692 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0074c6f2-5d03-406d-a8a3-19f87e5980d8","Type":"ContainerStarted","Data":"cfbaa890eb39174cd60e497974aeb6e480aa79256fdd9afb6abd5dfc43862e10"} Dec 11 08:37:58 crc kubenswrapper[4881]: I1211 08:37:58.146006 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-8b5c85b87-lwp6f"] Dec 11 08:37:58 crc kubenswrapper[4881]: I1211 08:37:58.153970 4881 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-8b5c85b87-lwp6f"] Dec 11 08:37:58 crc kubenswrapper[4881]: I1211 08:37:58.165784 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-55fbb6c694-gw7p4" podStartSLOduration=18.165765046 podStartE2EDuration="18.165765046s" podCreationTimestamp="2025-12-11 08:37:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 08:37:58.124300607 +0000 UTC m=+1326.501669304" watchObservedRunningTime="2025-12-11 08:37:58.165765046 +0000 UTC m=+1326.543133753" Dec 11 08:37:58 crc kubenswrapper[4881]: I1211 08:37:58.190160 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-6986b4b8b9-dlx84"] Dec 11 08:37:58 crc kubenswrapper[4881]: I1211 08:37:58.972450 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Dec 11 08:37:59 crc kubenswrapper[4881]: I1211 08:37:59.035971 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0fe972c7-ac62-43a9-87b0-17fb27c4c2c9" path="/var/lib/kubelet/pods/0fe972c7-ac62-43a9-87b0-17fb27c4c2c9/volumes" Dec 11 08:37:59 crc kubenswrapper[4881]: I1211 08:37:59.111376 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/99628143-5f10-4ebc-beea-4e33458d3eb3-logs\") pod \"99628143-5f10-4ebc-beea-4e33458d3eb3\" (UID: \"99628143-5f10-4ebc-beea-4e33458d3eb3\") " Dec 11 08:37:59 crc kubenswrapper[4881]: I1211 08:37:59.111434 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/99628143-5f10-4ebc-beea-4e33458d3eb3-httpd-run\") pod \"99628143-5f10-4ebc-beea-4e33458d3eb3\" (UID: \"99628143-5f10-4ebc-beea-4e33458d3eb3\") " Dec 11 08:37:59 crc kubenswrapper[4881]: I1211 08:37:59.111495 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/99628143-5f10-4ebc-beea-4e33458d3eb3-scripts\") pod \"99628143-5f10-4ebc-beea-4e33458d3eb3\" (UID: \"99628143-5f10-4ebc-beea-4e33458d3eb3\") " Dec 11 08:37:59 crc kubenswrapper[4881]: I1211 08:37:59.112077 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/99628143-5f10-4ebc-beea-4e33458d3eb3-logs" (OuterVolumeSpecName: "logs") pod "99628143-5f10-4ebc-beea-4e33458d3eb3" (UID: "99628143-5f10-4ebc-beea-4e33458d3eb3"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 08:37:59 crc kubenswrapper[4881]: I1211 08:37:59.112391 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/99628143-5f10-4ebc-beea-4e33458d3eb3-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "99628143-5f10-4ebc-beea-4e33458d3eb3" (UID: "99628143-5f10-4ebc-beea-4e33458d3eb3"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 08:37:59 crc kubenswrapper[4881]: I1211 08:37:59.114003 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/99628143-5f10-4ebc-beea-4e33458d3eb3-combined-ca-bundle\") pod \"99628143-5f10-4ebc-beea-4e33458d3eb3\" (UID: \"99628143-5f10-4ebc-beea-4e33458d3eb3\") " Dec 11 08:37:59 crc kubenswrapper[4881]: I1211 08:37:59.114104 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"99628143-5f10-4ebc-beea-4e33458d3eb3\" (UID: \"99628143-5f10-4ebc-beea-4e33458d3eb3\") " Dec 11 08:37:59 crc kubenswrapper[4881]: I1211 08:37:59.114167 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xbttd\" (UniqueName: \"kubernetes.io/projected/99628143-5f10-4ebc-beea-4e33458d3eb3-kube-api-access-xbttd\") pod \"99628143-5f10-4ebc-beea-4e33458d3eb3\" (UID: \"99628143-5f10-4ebc-beea-4e33458d3eb3\") " Dec 11 08:37:59 crc kubenswrapper[4881]: I1211 08:37:59.114197 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/99628143-5f10-4ebc-beea-4e33458d3eb3-config-data\") pod \"99628143-5f10-4ebc-beea-4e33458d3eb3\" (UID: \"99628143-5f10-4ebc-beea-4e33458d3eb3\") " Dec 11 08:37:59 crc kubenswrapper[4881]: I1211 08:37:59.115189 4881 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/99628143-5f10-4ebc-beea-4e33458d3eb3-logs\") on node \"crc\" DevicePath \"\"" Dec 11 08:37:59 crc kubenswrapper[4881]: I1211 08:37:59.115212 4881 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/99628143-5f10-4ebc-beea-4e33458d3eb3-httpd-run\") on node \"crc\" DevicePath \"\"" Dec 11 08:37:59 crc kubenswrapper[4881]: I1211 08:37:59.120193 4881 generic.go:334] "Generic (PLEG): container finished" podID="99628143-5f10-4ebc-beea-4e33458d3eb3" containerID="fffc15ad744dd26122404fd7132bd7ea47b15fc8cb648cc197dc66b920f982e4" exitCode=0 Dec 11 08:37:59 crc kubenswrapper[4881]: I1211 08:37:59.120248 4881 generic.go:334] "Generic (PLEG): container finished" podID="99628143-5f10-4ebc-beea-4e33458d3eb3" containerID="e2dcc92765468e5223506afbad1c5b420610a2bdf9bcafc4501419b80cccc1ef" exitCode=143 Dec 11 08:37:59 crc kubenswrapper[4881]: I1211 08:37:59.120310 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Dec 11 08:37:59 crc kubenswrapper[4881]: I1211 08:37:59.120325 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"99628143-5f10-4ebc-beea-4e33458d3eb3","Type":"ContainerDied","Data":"fffc15ad744dd26122404fd7132bd7ea47b15fc8cb648cc197dc66b920f982e4"} Dec 11 08:37:59 crc kubenswrapper[4881]: I1211 08:37:59.120408 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"99628143-5f10-4ebc-beea-4e33458d3eb3","Type":"ContainerDied","Data":"e2dcc92765468e5223506afbad1c5b420610a2bdf9bcafc4501419b80cccc1ef"} Dec 11 08:37:59 crc kubenswrapper[4881]: I1211 08:37:59.120422 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"99628143-5f10-4ebc-beea-4e33458d3eb3","Type":"ContainerDied","Data":"ea8e0d66da7a3d1e62ee204d23734280a51a82f6d86d6e7ed79f893b2b14ee73"} Dec 11 08:37:59 crc kubenswrapper[4881]: I1211 08:37:59.120450 4881 scope.go:117] "RemoveContainer" containerID="fffc15ad744dd26122404fd7132bd7ea47b15fc8cb648cc197dc66b920f982e4" Dec 11 08:37:59 crc kubenswrapper[4881]: I1211 08:37:59.120460 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage02-crc" (OuterVolumeSpecName: "glance") pod "99628143-5f10-4ebc-beea-4e33458d3eb3" (UID: "99628143-5f10-4ebc-beea-4e33458d3eb3"). InnerVolumeSpecName "local-storage02-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Dec 11 08:37:59 crc kubenswrapper[4881]: I1211 08:37:59.121484 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/99628143-5f10-4ebc-beea-4e33458d3eb3-kube-api-access-xbttd" (OuterVolumeSpecName: "kube-api-access-xbttd") pod "99628143-5f10-4ebc-beea-4e33458d3eb3" (UID: "99628143-5f10-4ebc-beea-4e33458d3eb3"). InnerVolumeSpecName "kube-api-access-xbttd". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:37:59 crc kubenswrapper[4881]: I1211 08:37:59.122294 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/99628143-5f10-4ebc-beea-4e33458d3eb3-scripts" (OuterVolumeSpecName: "scripts") pod "99628143-5f10-4ebc-beea-4e33458d3eb3" (UID: "99628143-5f10-4ebc-beea-4e33458d3eb3"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:37:59 crc kubenswrapper[4881]: I1211 08:37:59.122459 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-6986b4b8b9-dlx84" event={"ID":"8edd456d-09d4-46fc-97ef-68c44cb5320c","Type":"ContainerStarted","Data":"f152c32c230102afe7db1c0ec6896cfac4e2e0620f8e634d106cb5f1da6d900c"} Dec 11 08:37:59 crc kubenswrapper[4881]: I1211 08:37:59.142853 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="52860b37-6c04-42e4-ada9-94c9a46f1773" containerName="glance-log" containerID="cri-o://bc26391021fb324f31517bbc60d528d2c90eb437b191ea229c4394e736cb60ba" gracePeriod=30 Dec 11 08:37:59 crc kubenswrapper[4881]: I1211 08:37:59.142941 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="52860b37-6c04-42e4-ada9-94c9a46f1773" containerName="glance-httpd" containerID="cri-o://1caf79a505830b54c1a9c58bcd9a0322448f88ad809e3ede5d5aefad2293f254" gracePeriod=30 Dec 11 08:37:59 crc kubenswrapper[4881]: I1211 08:37:59.144012 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"52860b37-6c04-42e4-ada9-94c9a46f1773","Type":"ContainerStarted","Data":"1caf79a505830b54c1a9c58bcd9a0322448f88ad809e3ede5d5aefad2293f254"} Dec 11 08:37:59 crc kubenswrapper[4881]: I1211 08:37:59.170102 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/99628143-5f10-4ebc-beea-4e33458d3eb3-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "99628143-5f10-4ebc-beea-4e33458d3eb3" (UID: "99628143-5f10-4ebc-beea-4e33458d3eb3"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:37:59 crc kubenswrapper[4881]: I1211 08:37:59.176029 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=28.176010951 podStartE2EDuration="28.176010951s" podCreationTimestamp="2025-12-11 08:37:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 08:37:59.169501021 +0000 UTC m=+1327.546869718" watchObservedRunningTime="2025-12-11 08:37:59.176010951 +0000 UTC m=+1327.553379648" Dec 11 08:37:59 crc kubenswrapper[4881]: I1211 08:37:59.197228 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/99628143-5f10-4ebc-beea-4e33458d3eb3-config-data" (OuterVolumeSpecName: "config-data") pod "99628143-5f10-4ebc-beea-4e33458d3eb3" (UID: "99628143-5f10-4ebc-beea-4e33458d3eb3"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:37:59 crc kubenswrapper[4881]: I1211 08:37:59.219375 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xbttd\" (UniqueName: \"kubernetes.io/projected/99628143-5f10-4ebc-beea-4e33458d3eb3-kube-api-access-xbttd\") on node \"crc\" DevicePath \"\"" Dec 11 08:37:59 crc kubenswrapper[4881]: I1211 08:37:59.219413 4881 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/99628143-5f10-4ebc-beea-4e33458d3eb3-config-data\") on node \"crc\" DevicePath \"\"" Dec 11 08:37:59 crc kubenswrapper[4881]: I1211 08:37:59.219426 4881 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/99628143-5f10-4ebc-beea-4e33458d3eb3-scripts\") on node \"crc\" DevicePath \"\"" Dec 11 08:37:59 crc kubenswrapper[4881]: I1211 08:37:59.219436 4881 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/99628143-5f10-4ebc-beea-4e33458d3eb3-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 11 08:37:59 crc kubenswrapper[4881]: I1211 08:37:59.219462 4881 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") on node \"crc\" " Dec 11 08:37:59 crc kubenswrapper[4881]: I1211 08:37:59.256307 4881 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage02-crc" (UniqueName: "kubernetes.io/local-volume/local-storage02-crc") on node "crc" Dec 11 08:37:59 crc kubenswrapper[4881]: I1211 08:37:59.321219 4881 reconciler_common.go:293] "Volume detached for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") on node \"crc\" DevicePath \"\"" Dec 11 08:37:59 crc kubenswrapper[4881]: I1211 08:37:59.333901 4881 scope.go:117] "RemoveContainer" containerID="e2dcc92765468e5223506afbad1c5b420610a2bdf9bcafc4501419b80cccc1ef" Dec 11 08:37:59 crc kubenswrapper[4881]: I1211 08:37:59.362522 4881 scope.go:117] "RemoveContainer" containerID="fffc15ad744dd26122404fd7132bd7ea47b15fc8cb648cc197dc66b920f982e4" Dec 11 08:37:59 crc kubenswrapper[4881]: E1211 08:37:59.362959 4881 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fffc15ad744dd26122404fd7132bd7ea47b15fc8cb648cc197dc66b920f982e4\": container with ID starting with fffc15ad744dd26122404fd7132bd7ea47b15fc8cb648cc197dc66b920f982e4 not found: ID does not exist" containerID="fffc15ad744dd26122404fd7132bd7ea47b15fc8cb648cc197dc66b920f982e4" Dec 11 08:37:59 crc kubenswrapper[4881]: I1211 08:37:59.363012 4881 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fffc15ad744dd26122404fd7132bd7ea47b15fc8cb648cc197dc66b920f982e4"} err="failed to get container status \"fffc15ad744dd26122404fd7132bd7ea47b15fc8cb648cc197dc66b920f982e4\": rpc error: code = NotFound desc = could not find container \"fffc15ad744dd26122404fd7132bd7ea47b15fc8cb648cc197dc66b920f982e4\": container with ID starting with fffc15ad744dd26122404fd7132bd7ea47b15fc8cb648cc197dc66b920f982e4 not found: ID does not exist" Dec 11 08:37:59 crc kubenswrapper[4881]: I1211 08:37:59.365544 4881 scope.go:117] "RemoveContainer" containerID="e2dcc92765468e5223506afbad1c5b420610a2bdf9bcafc4501419b80cccc1ef" Dec 11 08:37:59 crc kubenswrapper[4881]: E1211 08:37:59.366201 4881 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e2dcc92765468e5223506afbad1c5b420610a2bdf9bcafc4501419b80cccc1ef\": container with ID starting with e2dcc92765468e5223506afbad1c5b420610a2bdf9bcafc4501419b80cccc1ef not found: ID does not exist" containerID="e2dcc92765468e5223506afbad1c5b420610a2bdf9bcafc4501419b80cccc1ef" Dec 11 08:37:59 crc kubenswrapper[4881]: I1211 08:37:59.366280 4881 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e2dcc92765468e5223506afbad1c5b420610a2bdf9bcafc4501419b80cccc1ef"} err="failed to get container status \"e2dcc92765468e5223506afbad1c5b420610a2bdf9bcafc4501419b80cccc1ef\": rpc error: code = NotFound desc = could not find container \"e2dcc92765468e5223506afbad1c5b420610a2bdf9bcafc4501419b80cccc1ef\": container with ID starting with e2dcc92765468e5223506afbad1c5b420610a2bdf9bcafc4501419b80cccc1ef not found: ID does not exist" Dec 11 08:37:59 crc kubenswrapper[4881]: I1211 08:37:59.366346 4881 scope.go:117] "RemoveContainer" containerID="fffc15ad744dd26122404fd7132bd7ea47b15fc8cb648cc197dc66b920f982e4" Dec 11 08:37:59 crc kubenswrapper[4881]: I1211 08:37:59.367809 4881 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fffc15ad744dd26122404fd7132bd7ea47b15fc8cb648cc197dc66b920f982e4"} err="failed to get container status \"fffc15ad744dd26122404fd7132bd7ea47b15fc8cb648cc197dc66b920f982e4\": rpc error: code = NotFound desc = could not find container \"fffc15ad744dd26122404fd7132bd7ea47b15fc8cb648cc197dc66b920f982e4\": container with ID starting with fffc15ad744dd26122404fd7132bd7ea47b15fc8cb648cc197dc66b920f982e4 not found: ID does not exist" Dec 11 08:37:59 crc kubenswrapper[4881]: I1211 08:37:59.367849 4881 scope.go:117] "RemoveContainer" containerID="e2dcc92765468e5223506afbad1c5b420610a2bdf9bcafc4501419b80cccc1ef" Dec 11 08:37:59 crc kubenswrapper[4881]: I1211 08:37:59.368579 4881 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e2dcc92765468e5223506afbad1c5b420610a2bdf9bcafc4501419b80cccc1ef"} err="failed to get container status \"e2dcc92765468e5223506afbad1c5b420610a2bdf9bcafc4501419b80cccc1ef\": rpc error: code = NotFound desc = could not find container \"e2dcc92765468e5223506afbad1c5b420610a2bdf9bcafc4501419b80cccc1ef\": container with ID starting with e2dcc92765468e5223506afbad1c5b420610a2bdf9bcafc4501419b80cccc1ef not found: ID does not exist" Dec 11 08:37:59 crc kubenswrapper[4881]: I1211 08:37:59.456844 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Dec 11 08:37:59 crc kubenswrapper[4881]: I1211 08:37:59.483378 4881 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Dec 11 08:37:59 crc kubenswrapper[4881]: I1211 08:37:59.503636 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Dec 11 08:37:59 crc kubenswrapper[4881]: E1211 08:37:59.504412 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="99628143-5f10-4ebc-beea-4e33458d3eb3" containerName="glance-httpd" Dec 11 08:37:59 crc kubenswrapper[4881]: I1211 08:37:59.504497 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="99628143-5f10-4ebc-beea-4e33458d3eb3" containerName="glance-httpd" Dec 11 08:37:59 crc kubenswrapper[4881]: E1211 08:37:59.504582 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="99628143-5f10-4ebc-beea-4e33458d3eb3" containerName="glance-log" Dec 11 08:37:59 crc kubenswrapper[4881]: I1211 08:37:59.504640 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="99628143-5f10-4ebc-beea-4e33458d3eb3" containerName="glance-log" Dec 11 08:37:59 crc kubenswrapper[4881]: E1211 08:37:59.504705 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0fe972c7-ac62-43a9-87b0-17fb27c4c2c9" containerName="init" Dec 11 08:37:59 crc kubenswrapper[4881]: I1211 08:37:59.504763 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="0fe972c7-ac62-43a9-87b0-17fb27c4c2c9" containerName="init" Dec 11 08:37:59 crc kubenswrapper[4881]: I1211 08:37:59.505037 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="0fe972c7-ac62-43a9-87b0-17fb27c4c2c9" containerName="init" Dec 11 08:37:59 crc kubenswrapper[4881]: I1211 08:37:59.505133 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="99628143-5f10-4ebc-beea-4e33458d3eb3" containerName="glance-httpd" Dec 11 08:37:59 crc kubenswrapper[4881]: I1211 08:37:59.505219 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="99628143-5f10-4ebc-beea-4e33458d3eb3" containerName="glance-log" Dec 11 08:37:59 crc kubenswrapper[4881]: I1211 08:37:59.506510 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Dec 11 08:37:59 crc kubenswrapper[4881]: I1211 08:37:59.508684 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Dec 11 08:37:59 crc kubenswrapper[4881]: I1211 08:37:59.508751 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Dec 11 08:37:59 crc kubenswrapper[4881]: I1211 08:37:59.509086 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Dec 11 08:37:59 crc kubenswrapper[4881]: I1211 08:37:59.627515 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/96a878c1-c8dc-443b-a6a2-e9e2b3833213-config-data\") pod \"glance-default-external-api-0\" (UID: \"96a878c1-c8dc-443b-a6a2-e9e2b3833213\") " pod="openstack/glance-default-external-api-0" Dec 11 08:37:59 crc kubenswrapper[4881]: I1211 08:37:59.627617 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/96a878c1-c8dc-443b-a6a2-e9e2b3833213-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"96a878c1-c8dc-443b-a6a2-e9e2b3833213\") " pod="openstack/glance-default-external-api-0" Dec 11 08:37:59 crc kubenswrapper[4881]: I1211 08:37:59.627652 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/96a878c1-c8dc-443b-a6a2-e9e2b3833213-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"96a878c1-c8dc-443b-a6a2-e9e2b3833213\") " pod="openstack/glance-default-external-api-0" Dec 11 08:37:59 crc kubenswrapper[4881]: I1211 08:37:59.627668 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/96a878c1-c8dc-443b-a6a2-e9e2b3833213-logs\") pod \"glance-default-external-api-0\" (UID: \"96a878c1-c8dc-443b-a6a2-e9e2b3833213\") " pod="openstack/glance-default-external-api-0" Dec 11 08:37:59 crc kubenswrapper[4881]: I1211 08:37:59.627702 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nwtqx\" (UniqueName: \"kubernetes.io/projected/96a878c1-c8dc-443b-a6a2-e9e2b3833213-kube-api-access-nwtqx\") pod \"glance-default-external-api-0\" (UID: \"96a878c1-c8dc-443b-a6a2-e9e2b3833213\") " pod="openstack/glance-default-external-api-0" Dec 11 08:37:59 crc kubenswrapper[4881]: I1211 08:37:59.627746 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"glance-default-external-api-0\" (UID: \"96a878c1-c8dc-443b-a6a2-e9e2b3833213\") " pod="openstack/glance-default-external-api-0" Dec 11 08:37:59 crc kubenswrapper[4881]: I1211 08:37:59.627791 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/96a878c1-c8dc-443b-a6a2-e9e2b3833213-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"96a878c1-c8dc-443b-a6a2-e9e2b3833213\") " pod="openstack/glance-default-external-api-0" Dec 11 08:37:59 crc kubenswrapper[4881]: I1211 08:37:59.627811 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/96a878c1-c8dc-443b-a6a2-e9e2b3833213-scripts\") pod \"glance-default-external-api-0\" (UID: \"96a878c1-c8dc-443b-a6a2-e9e2b3833213\") " pod="openstack/glance-default-external-api-0" Dec 11 08:37:59 crc kubenswrapper[4881]: I1211 08:37:59.729880 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/96a878c1-c8dc-443b-a6a2-e9e2b3833213-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"96a878c1-c8dc-443b-a6a2-e9e2b3833213\") " pod="openstack/glance-default-external-api-0" Dec 11 08:37:59 crc kubenswrapper[4881]: I1211 08:37:59.729963 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/96a878c1-c8dc-443b-a6a2-e9e2b3833213-scripts\") pod \"glance-default-external-api-0\" (UID: \"96a878c1-c8dc-443b-a6a2-e9e2b3833213\") " pod="openstack/glance-default-external-api-0" Dec 11 08:37:59 crc kubenswrapper[4881]: I1211 08:37:59.730026 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/96a878c1-c8dc-443b-a6a2-e9e2b3833213-config-data\") pod \"glance-default-external-api-0\" (UID: \"96a878c1-c8dc-443b-a6a2-e9e2b3833213\") " pod="openstack/glance-default-external-api-0" Dec 11 08:37:59 crc kubenswrapper[4881]: I1211 08:37:59.731669 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/96a878c1-c8dc-443b-a6a2-e9e2b3833213-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"96a878c1-c8dc-443b-a6a2-e9e2b3833213\") " pod="openstack/glance-default-external-api-0" Dec 11 08:37:59 crc kubenswrapper[4881]: I1211 08:37:59.731714 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/96a878c1-c8dc-443b-a6a2-e9e2b3833213-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"96a878c1-c8dc-443b-a6a2-e9e2b3833213\") " pod="openstack/glance-default-external-api-0" Dec 11 08:37:59 crc kubenswrapper[4881]: I1211 08:37:59.731742 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/96a878c1-c8dc-443b-a6a2-e9e2b3833213-logs\") pod \"glance-default-external-api-0\" (UID: \"96a878c1-c8dc-443b-a6a2-e9e2b3833213\") " pod="openstack/glance-default-external-api-0" Dec 11 08:37:59 crc kubenswrapper[4881]: I1211 08:37:59.731792 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nwtqx\" (UniqueName: \"kubernetes.io/projected/96a878c1-c8dc-443b-a6a2-e9e2b3833213-kube-api-access-nwtqx\") pod \"glance-default-external-api-0\" (UID: \"96a878c1-c8dc-443b-a6a2-e9e2b3833213\") " pod="openstack/glance-default-external-api-0" Dec 11 08:37:59 crc kubenswrapper[4881]: I1211 08:37:59.731862 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"glance-default-external-api-0\" (UID: \"96a878c1-c8dc-443b-a6a2-e9e2b3833213\") " pod="openstack/glance-default-external-api-0" Dec 11 08:37:59 crc kubenswrapper[4881]: I1211 08:37:59.732251 4881 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"glance-default-external-api-0\" (UID: \"96a878c1-c8dc-443b-a6a2-e9e2b3833213\") device mount path \"/mnt/openstack/pv02\"" pod="openstack/glance-default-external-api-0" Dec 11 08:37:59 crc kubenswrapper[4881]: I1211 08:37:59.733580 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/96a878c1-c8dc-443b-a6a2-e9e2b3833213-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"96a878c1-c8dc-443b-a6a2-e9e2b3833213\") " pod="openstack/glance-default-external-api-0" Dec 11 08:37:59 crc kubenswrapper[4881]: I1211 08:37:59.733861 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/96a878c1-c8dc-443b-a6a2-e9e2b3833213-logs\") pod \"glance-default-external-api-0\" (UID: \"96a878c1-c8dc-443b-a6a2-e9e2b3833213\") " pod="openstack/glance-default-external-api-0" Dec 11 08:37:59 crc kubenswrapper[4881]: I1211 08:37:59.737690 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/96a878c1-c8dc-443b-a6a2-e9e2b3833213-scripts\") pod \"glance-default-external-api-0\" (UID: \"96a878c1-c8dc-443b-a6a2-e9e2b3833213\") " pod="openstack/glance-default-external-api-0" Dec 11 08:37:59 crc kubenswrapper[4881]: I1211 08:37:59.738140 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/96a878c1-c8dc-443b-a6a2-e9e2b3833213-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"96a878c1-c8dc-443b-a6a2-e9e2b3833213\") " pod="openstack/glance-default-external-api-0" Dec 11 08:37:59 crc kubenswrapper[4881]: I1211 08:37:59.738945 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/96a878c1-c8dc-443b-a6a2-e9e2b3833213-config-data\") pod \"glance-default-external-api-0\" (UID: \"96a878c1-c8dc-443b-a6a2-e9e2b3833213\") " pod="openstack/glance-default-external-api-0" Dec 11 08:37:59 crc kubenswrapper[4881]: I1211 08:37:59.748655 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/96a878c1-c8dc-443b-a6a2-e9e2b3833213-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"96a878c1-c8dc-443b-a6a2-e9e2b3833213\") " pod="openstack/glance-default-external-api-0" Dec 11 08:37:59 crc kubenswrapper[4881]: I1211 08:37:59.753214 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nwtqx\" (UniqueName: \"kubernetes.io/projected/96a878c1-c8dc-443b-a6a2-e9e2b3833213-kube-api-access-nwtqx\") pod \"glance-default-external-api-0\" (UID: \"96a878c1-c8dc-443b-a6a2-e9e2b3833213\") " pod="openstack/glance-default-external-api-0" Dec 11 08:37:59 crc kubenswrapper[4881]: I1211 08:37:59.774053 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"glance-default-external-api-0\" (UID: \"96a878c1-c8dc-443b-a6a2-e9e2b3833213\") " pod="openstack/glance-default-external-api-0" Dec 11 08:37:59 crc kubenswrapper[4881]: I1211 08:37:59.829652 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Dec 11 08:38:00 crc kubenswrapper[4881]: I1211 08:38:00.162729 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-84b966f6c9-4kcvc" event={"ID":"d56a4394-101c-4fa1-ba3c-b9eb0907a5ec","Type":"ContainerStarted","Data":"fdc44938fe9bfb591e747eaa778d4f2426cc45cd13e7248882fa0693dcbc920e"} Dec 11 08:38:00 crc kubenswrapper[4881]: I1211 08:38:00.170658 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"52860b37-6c04-42e4-ada9-94c9a46f1773","Type":"ContainerDied","Data":"bc26391021fb324f31517bbc60d528d2c90eb437b191ea229c4394e736cb60ba"} Dec 11 08:38:00 crc kubenswrapper[4881]: I1211 08:38:00.170571 4881 generic.go:334] "Generic (PLEG): container finished" podID="52860b37-6c04-42e4-ada9-94c9a46f1773" containerID="bc26391021fb324f31517bbc60d528d2c90eb437b191ea229c4394e736cb60ba" exitCode=143 Dec 11 08:38:00 crc kubenswrapper[4881]: I1211 08:38:00.455778 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Dec 11 08:38:00 crc kubenswrapper[4881]: W1211 08:38:00.507730 4881 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod96a878c1_c8dc_443b_a6a2_e9e2b3833213.slice/crio-a9e31d49fbe0f2ec3596339c2ec3a43ca591201bb128abf3fd18d46382f6de51 WatchSource:0}: Error finding container a9e31d49fbe0f2ec3596339c2ec3a43ca591201bb128abf3fd18d46382f6de51: Status 404 returned error can't find the container with id a9e31d49fbe0f2ec3596339c2ec3a43ca591201bb128abf3fd18d46382f6de51 Dec 11 08:38:01 crc kubenswrapper[4881]: I1211 08:38:01.025240 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="99628143-5f10-4ebc-beea-4e33458d3eb3" path="/var/lib/kubelet/pods/99628143-5f10-4ebc-beea-4e33458d3eb3/volumes" Dec 11 08:38:01 crc kubenswrapper[4881]: I1211 08:38:01.198858 4881 generic.go:334] "Generic (PLEG): container finished" podID="52860b37-6c04-42e4-ada9-94c9a46f1773" containerID="1caf79a505830b54c1a9c58bcd9a0322448f88ad809e3ede5d5aefad2293f254" exitCode=0 Dec 11 08:38:01 crc kubenswrapper[4881]: I1211 08:38:01.198933 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"52860b37-6c04-42e4-ada9-94c9a46f1773","Type":"ContainerDied","Data":"1caf79a505830b54c1a9c58bcd9a0322448f88ad809e3ede5d5aefad2293f254"} Dec 11 08:38:01 crc kubenswrapper[4881]: I1211 08:38:01.200797 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"96a878c1-c8dc-443b-a6a2-e9e2b3833213","Type":"ContainerStarted","Data":"a9e31d49fbe0f2ec3596339c2ec3a43ca591201bb128abf3fd18d46382f6de51"} Dec 11 08:38:01 crc kubenswrapper[4881]: I1211 08:38:01.200978 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-84b966f6c9-4kcvc" Dec 11 08:38:01 crc kubenswrapper[4881]: I1211 08:38:01.220074 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-84b966f6c9-4kcvc" podStartSLOduration=7.220051074 podStartE2EDuration="7.220051074s" podCreationTimestamp="2025-12-11 08:37:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 08:38:01.218911134 +0000 UTC m=+1329.596279841" watchObservedRunningTime="2025-12-11 08:38:01.220051074 +0000 UTC m=+1329.597419771" Dec 11 08:38:02 crc kubenswrapper[4881]: I1211 08:38:02.211148 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-6986b4b8b9-dlx84" event={"ID":"8edd456d-09d4-46fc-97ef-68c44cb5320c","Type":"ContainerStarted","Data":"57efbce6224f5ea0a6515ec47b8053a99e174c2137a265c8c8ee82c81ddcc39e"} Dec 11 08:38:02 crc kubenswrapper[4881]: I1211 08:38:02.391226 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/prometheus-metric-storage-0" Dec 11 08:38:02 crc kubenswrapper[4881]: I1211 08:38:02.485314 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Dec 11 08:38:02 crc kubenswrapper[4881]: I1211 08:38:02.485776 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Dec 11 08:38:04 crc kubenswrapper[4881]: I1211 08:38:04.238960 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"52860b37-6c04-42e4-ada9-94c9a46f1773","Type":"ContainerDied","Data":"8831cbe9ab9ccd27f2f61e0baec07684212052c2808d3a1b4116518cc5678c57"} Dec 11 08:38:04 crc kubenswrapper[4881]: I1211 08:38:04.239258 4881 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8831cbe9ab9ccd27f2f61e0baec07684212052c2808d3a1b4116518cc5678c57" Dec 11 08:38:04 crc kubenswrapper[4881]: I1211 08:38:04.319876 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Dec 11 08:38:04 crc kubenswrapper[4881]: I1211 08:38:04.520131 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m9xcv\" (UniqueName: \"kubernetes.io/projected/52860b37-6c04-42e4-ada9-94c9a46f1773-kube-api-access-m9xcv\") pod \"52860b37-6c04-42e4-ada9-94c9a46f1773\" (UID: \"52860b37-6c04-42e4-ada9-94c9a46f1773\") " Dec 11 08:38:04 crc kubenswrapper[4881]: I1211 08:38:04.520201 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"52860b37-6c04-42e4-ada9-94c9a46f1773\" (UID: \"52860b37-6c04-42e4-ada9-94c9a46f1773\") " Dec 11 08:38:04 crc kubenswrapper[4881]: I1211 08:38:04.520252 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/52860b37-6c04-42e4-ada9-94c9a46f1773-scripts\") pod \"52860b37-6c04-42e4-ada9-94c9a46f1773\" (UID: \"52860b37-6c04-42e4-ada9-94c9a46f1773\") " Dec 11 08:38:04 crc kubenswrapper[4881]: I1211 08:38:04.520290 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/52860b37-6c04-42e4-ada9-94c9a46f1773-httpd-run\") pod \"52860b37-6c04-42e4-ada9-94c9a46f1773\" (UID: \"52860b37-6c04-42e4-ada9-94c9a46f1773\") " Dec 11 08:38:04 crc kubenswrapper[4881]: I1211 08:38:04.520439 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/52860b37-6c04-42e4-ada9-94c9a46f1773-config-data\") pod \"52860b37-6c04-42e4-ada9-94c9a46f1773\" (UID: \"52860b37-6c04-42e4-ada9-94c9a46f1773\") " Dec 11 08:38:04 crc kubenswrapper[4881]: I1211 08:38:04.520514 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/52860b37-6c04-42e4-ada9-94c9a46f1773-combined-ca-bundle\") pod \"52860b37-6c04-42e4-ada9-94c9a46f1773\" (UID: \"52860b37-6c04-42e4-ada9-94c9a46f1773\") " Dec 11 08:38:04 crc kubenswrapper[4881]: I1211 08:38:04.520561 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/52860b37-6c04-42e4-ada9-94c9a46f1773-logs\") pod \"52860b37-6c04-42e4-ada9-94c9a46f1773\" (UID: \"52860b37-6c04-42e4-ada9-94c9a46f1773\") " Dec 11 08:38:04 crc kubenswrapper[4881]: I1211 08:38:04.522203 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/52860b37-6c04-42e4-ada9-94c9a46f1773-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "52860b37-6c04-42e4-ada9-94c9a46f1773" (UID: "52860b37-6c04-42e4-ada9-94c9a46f1773"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 08:38:04 crc kubenswrapper[4881]: I1211 08:38:04.522291 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/52860b37-6c04-42e4-ada9-94c9a46f1773-logs" (OuterVolumeSpecName: "logs") pod "52860b37-6c04-42e4-ada9-94c9a46f1773" (UID: "52860b37-6c04-42e4-ada9-94c9a46f1773"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 08:38:04 crc kubenswrapper[4881]: I1211 08:38:04.527198 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/52860b37-6c04-42e4-ada9-94c9a46f1773-scripts" (OuterVolumeSpecName: "scripts") pod "52860b37-6c04-42e4-ada9-94c9a46f1773" (UID: "52860b37-6c04-42e4-ada9-94c9a46f1773"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:38:04 crc kubenswrapper[4881]: I1211 08:38:04.528599 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage05-crc" (OuterVolumeSpecName: "glance") pod "52860b37-6c04-42e4-ada9-94c9a46f1773" (UID: "52860b37-6c04-42e4-ada9-94c9a46f1773"). InnerVolumeSpecName "local-storage05-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Dec 11 08:38:04 crc kubenswrapper[4881]: I1211 08:38:04.541658 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/52860b37-6c04-42e4-ada9-94c9a46f1773-kube-api-access-m9xcv" (OuterVolumeSpecName: "kube-api-access-m9xcv") pod "52860b37-6c04-42e4-ada9-94c9a46f1773" (UID: "52860b37-6c04-42e4-ada9-94c9a46f1773"). InnerVolumeSpecName "kube-api-access-m9xcv". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:38:04 crc kubenswrapper[4881]: I1211 08:38:04.562490 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/52860b37-6c04-42e4-ada9-94c9a46f1773-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "52860b37-6c04-42e4-ada9-94c9a46f1773" (UID: "52860b37-6c04-42e4-ada9-94c9a46f1773"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:38:04 crc kubenswrapper[4881]: I1211 08:38:04.612622 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/52860b37-6c04-42e4-ada9-94c9a46f1773-config-data" (OuterVolumeSpecName: "config-data") pod "52860b37-6c04-42e4-ada9-94c9a46f1773" (UID: "52860b37-6c04-42e4-ada9-94c9a46f1773"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:38:04 crc kubenswrapper[4881]: I1211 08:38:04.623081 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-m9xcv\" (UniqueName: \"kubernetes.io/projected/52860b37-6c04-42e4-ada9-94c9a46f1773-kube-api-access-m9xcv\") on node \"crc\" DevicePath \"\"" Dec 11 08:38:04 crc kubenswrapper[4881]: I1211 08:38:04.623170 4881 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") on node \"crc\" " Dec 11 08:38:04 crc kubenswrapper[4881]: I1211 08:38:04.623185 4881 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/52860b37-6c04-42e4-ada9-94c9a46f1773-scripts\") on node \"crc\" DevicePath \"\"" Dec 11 08:38:04 crc kubenswrapper[4881]: I1211 08:38:04.623197 4881 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/52860b37-6c04-42e4-ada9-94c9a46f1773-httpd-run\") on node \"crc\" DevicePath \"\"" Dec 11 08:38:04 crc kubenswrapper[4881]: I1211 08:38:04.623209 4881 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/52860b37-6c04-42e4-ada9-94c9a46f1773-config-data\") on node \"crc\" DevicePath \"\"" Dec 11 08:38:04 crc kubenswrapper[4881]: I1211 08:38:04.623220 4881 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/52860b37-6c04-42e4-ada9-94c9a46f1773-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 11 08:38:04 crc kubenswrapper[4881]: I1211 08:38:04.623231 4881 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/52860b37-6c04-42e4-ada9-94c9a46f1773-logs\") on node \"crc\" DevicePath \"\"" Dec 11 08:38:04 crc kubenswrapper[4881]: I1211 08:38:04.658386 4881 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage05-crc" (UniqueName: "kubernetes.io/local-volume/local-storage05-crc") on node "crc" Dec 11 08:38:04 crc kubenswrapper[4881]: I1211 08:38:04.725257 4881 reconciler_common.go:293] "Volume detached for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") on node \"crc\" DevicePath \"\"" Dec 11 08:38:04 crc kubenswrapper[4881]: I1211 08:38:04.761170 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-84b966f6c9-4kcvc" Dec 11 08:38:04 crc kubenswrapper[4881]: I1211 08:38:04.841554 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-76fcf4b695-628mp"] Dec 11 08:38:04 crc kubenswrapper[4881]: I1211 08:38:04.842910 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-76fcf4b695-628mp" podUID="4414ed0e-d99f-4f9b-8888-e73cf1d47834" containerName="dnsmasq-dns" containerID="cri-o://0235f7d2b1778dfb438c3a15c2da77b5e7352d840bed0db998a72cb71adb59ac" gracePeriod=10 Dec 11 08:38:05 crc kubenswrapper[4881]: I1211 08:38:05.253502 4881 generic.go:334] "Generic (PLEG): container finished" podID="422faa6a-f2ed-4015-87cd-7878bac246e4" containerID="0bafc15cd1a4a159ece5b6186664af0b35f78a1f3442fe0b6442a32e1917de5f" exitCode=0 Dec 11 08:38:05 crc kubenswrapper[4881]: I1211 08:38:05.253593 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-vmjdf" event={"ID":"422faa6a-f2ed-4015-87cd-7878bac246e4","Type":"ContainerDied","Data":"0bafc15cd1a4a159ece5b6186664af0b35f78a1f3442fe0b6442a32e1917de5f"} Dec 11 08:38:05 crc kubenswrapper[4881]: I1211 08:38:05.255991 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"96a878c1-c8dc-443b-a6a2-e9e2b3833213","Type":"ContainerStarted","Data":"4468e46bc7e99e4bd0e2b6b2c10187478a1315d51fac1b2b9e5dec3b360d5c8f"} Dec 11 08:38:05 crc kubenswrapper[4881]: I1211 08:38:05.261190 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0074c6f2-5d03-406d-a8a3-19f87e5980d8","Type":"ContainerStarted","Data":"fc0e0f13955e8f26f942126958994aa607d18e71140c9af36d62770bcb6b8588"} Dec 11 08:38:05 crc kubenswrapper[4881]: I1211 08:38:05.264988 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-6986b4b8b9-dlx84" event={"ID":"8edd456d-09d4-46fc-97ef-68c44cb5320c","Type":"ContainerStarted","Data":"81deb2fc39277e928ed1d1846a94eb979a89f92d8c08c61f438b9b04650bd70d"} Dec 11 08:38:05 crc kubenswrapper[4881]: I1211 08:38:05.266045 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-6986b4b8b9-dlx84" Dec 11 08:38:05 crc kubenswrapper[4881]: I1211 08:38:05.277410 4881 generic.go:334] "Generic (PLEG): container finished" podID="4414ed0e-d99f-4f9b-8888-e73cf1d47834" containerID="0235f7d2b1778dfb438c3a15c2da77b5e7352d840bed0db998a72cb71adb59ac" exitCode=0 Dec 11 08:38:05 crc kubenswrapper[4881]: I1211 08:38:05.277532 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Dec 11 08:38:05 crc kubenswrapper[4881]: I1211 08:38:05.277617 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-76fcf4b695-628mp" event={"ID":"4414ed0e-d99f-4f9b-8888-e73cf1d47834","Type":"ContainerDied","Data":"0235f7d2b1778dfb438c3a15c2da77b5e7352d840bed0db998a72cb71adb59ac"} Dec 11 08:38:05 crc kubenswrapper[4881]: I1211 08:38:05.322945 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-6986b4b8b9-dlx84" podStartSLOduration=8.322692108 podStartE2EDuration="8.322692108s" podCreationTimestamp="2025-12-11 08:37:57 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 08:38:05.298662024 +0000 UTC m=+1333.676030721" watchObservedRunningTime="2025-12-11 08:38:05.322692108 +0000 UTC m=+1333.700060815" Dec 11 08:38:05 crc kubenswrapper[4881]: I1211 08:38:05.387189 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Dec 11 08:38:05 crc kubenswrapper[4881]: I1211 08:38:05.408393 4881 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Dec 11 08:38:05 crc kubenswrapper[4881]: I1211 08:38:05.431052 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Dec 11 08:38:05 crc kubenswrapper[4881]: E1211 08:38:05.431581 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="52860b37-6c04-42e4-ada9-94c9a46f1773" containerName="glance-log" Dec 11 08:38:05 crc kubenswrapper[4881]: I1211 08:38:05.431599 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="52860b37-6c04-42e4-ada9-94c9a46f1773" containerName="glance-log" Dec 11 08:38:05 crc kubenswrapper[4881]: E1211 08:38:05.431625 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="52860b37-6c04-42e4-ada9-94c9a46f1773" containerName="glance-httpd" Dec 11 08:38:05 crc kubenswrapper[4881]: I1211 08:38:05.431632 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="52860b37-6c04-42e4-ada9-94c9a46f1773" containerName="glance-httpd" Dec 11 08:38:05 crc kubenswrapper[4881]: I1211 08:38:05.431847 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="52860b37-6c04-42e4-ada9-94c9a46f1773" containerName="glance-httpd" Dec 11 08:38:05 crc kubenswrapper[4881]: I1211 08:38:05.431874 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="52860b37-6c04-42e4-ada9-94c9a46f1773" containerName="glance-log" Dec 11 08:38:05 crc kubenswrapper[4881]: I1211 08:38:05.433019 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Dec 11 08:38:05 crc kubenswrapper[4881]: I1211 08:38:05.435268 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Dec 11 08:38:05 crc kubenswrapper[4881]: I1211 08:38:05.435849 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Dec 11 08:38:05 crc kubenswrapper[4881]: I1211 08:38:05.452533 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Dec 11 08:38:05 crc kubenswrapper[4881]: I1211 08:38:05.495071 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-76fcf4b695-628mp" Dec 11 08:38:05 crc kubenswrapper[4881]: I1211 08:38:05.548052 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"glance-default-internal-api-0\" (UID: \"fb741205-ce03-4c1d-9181-b2efc3c92319\") " pod="openstack/glance-default-internal-api-0" Dec 11 08:38:05 crc kubenswrapper[4881]: I1211 08:38:05.548163 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fb741205-ce03-4c1d-9181-b2efc3c92319-scripts\") pod \"glance-default-internal-api-0\" (UID: \"fb741205-ce03-4c1d-9181-b2efc3c92319\") " pod="openstack/glance-default-internal-api-0" Dec 11 08:38:05 crc kubenswrapper[4881]: I1211 08:38:05.548191 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/fb741205-ce03-4c1d-9181-b2efc3c92319-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"fb741205-ce03-4c1d-9181-b2efc3c92319\") " pod="openstack/glance-default-internal-api-0" Dec 11 08:38:05 crc kubenswrapper[4881]: I1211 08:38:05.548232 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/fb741205-ce03-4c1d-9181-b2efc3c92319-logs\") pod \"glance-default-internal-api-0\" (UID: \"fb741205-ce03-4c1d-9181-b2efc3c92319\") " pod="openstack/glance-default-internal-api-0" Dec 11 08:38:05 crc kubenswrapper[4881]: I1211 08:38:05.548276 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fb741205-ce03-4c1d-9181-b2efc3c92319-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"fb741205-ce03-4c1d-9181-b2efc3c92319\") " pod="openstack/glance-default-internal-api-0" Dec 11 08:38:05 crc kubenswrapper[4881]: I1211 08:38:05.548297 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fb741205-ce03-4c1d-9181-b2efc3c92319-config-data\") pod \"glance-default-internal-api-0\" (UID: \"fb741205-ce03-4c1d-9181-b2efc3c92319\") " pod="openstack/glance-default-internal-api-0" Dec 11 08:38:05 crc kubenswrapper[4881]: I1211 08:38:05.548323 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/fb741205-ce03-4c1d-9181-b2efc3c92319-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"fb741205-ce03-4c1d-9181-b2efc3c92319\") " pod="openstack/glance-default-internal-api-0" Dec 11 08:38:05 crc kubenswrapper[4881]: I1211 08:38:05.548386 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9fbd9\" (UniqueName: \"kubernetes.io/projected/fb741205-ce03-4c1d-9181-b2efc3c92319-kube-api-access-9fbd9\") pod \"glance-default-internal-api-0\" (UID: \"fb741205-ce03-4c1d-9181-b2efc3c92319\") " pod="openstack/glance-default-internal-api-0" Dec 11 08:38:05 crc kubenswrapper[4881]: I1211 08:38:05.649710 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4414ed0e-d99f-4f9b-8888-e73cf1d47834-config\") pod \"4414ed0e-d99f-4f9b-8888-e73cf1d47834\" (UID: \"4414ed0e-d99f-4f9b-8888-e73cf1d47834\") " Dec 11 08:38:05 crc kubenswrapper[4881]: I1211 08:38:05.649864 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/4414ed0e-d99f-4f9b-8888-e73cf1d47834-ovsdbserver-nb\") pod \"4414ed0e-d99f-4f9b-8888-e73cf1d47834\" (UID: \"4414ed0e-d99f-4f9b-8888-e73cf1d47834\") " Dec 11 08:38:05 crc kubenswrapper[4881]: I1211 08:38:05.649946 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/4414ed0e-d99f-4f9b-8888-e73cf1d47834-ovsdbserver-sb\") pod \"4414ed0e-d99f-4f9b-8888-e73cf1d47834\" (UID: \"4414ed0e-d99f-4f9b-8888-e73cf1d47834\") " Dec 11 08:38:05 crc kubenswrapper[4881]: I1211 08:38:05.649968 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4414ed0e-d99f-4f9b-8888-e73cf1d47834-dns-svc\") pod \"4414ed0e-d99f-4f9b-8888-e73cf1d47834\" (UID: \"4414ed0e-d99f-4f9b-8888-e73cf1d47834\") " Dec 11 08:38:05 crc kubenswrapper[4881]: I1211 08:38:05.649992 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/4414ed0e-d99f-4f9b-8888-e73cf1d47834-dns-swift-storage-0\") pod \"4414ed0e-d99f-4f9b-8888-e73cf1d47834\" (UID: \"4414ed0e-d99f-4f9b-8888-e73cf1d47834\") " Dec 11 08:38:05 crc kubenswrapper[4881]: I1211 08:38:05.650034 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lz9lz\" (UniqueName: \"kubernetes.io/projected/4414ed0e-d99f-4f9b-8888-e73cf1d47834-kube-api-access-lz9lz\") pod \"4414ed0e-d99f-4f9b-8888-e73cf1d47834\" (UID: \"4414ed0e-d99f-4f9b-8888-e73cf1d47834\") " Dec 11 08:38:05 crc kubenswrapper[4881]: I1211 08:38:05.650429 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/fb741205-ce03-4c1d-9181-b2efc3c92319-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"fb741205-ce03-4c1d-9181-b2efc3c92319\") " pod="openstack/glance-default-internal-api-0" Dec 11 08:38:05 crc kubenswrapper[4881]: I1211 08:38:05.650498 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/fb741205-ce03-4c1d-9181-b2efc3c92319-logs\") pod \"glance-default-internal-api-0\" (UID: \"fb741205-ce03-4c1d-9181-b2efc3c92319\") " pod="openstack/glance-default-internal-api-0" Dec 11 08:38:05 crc kubenswrapper[4881]: I1211 08:38:05.650558 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fb741205-ce03-4c1d-9181-b2efc3c92319-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"fb741205-ce03-4c1d-9181-b2efc3c92319\") " pod="openstack/glance-default-internal-api-0" Dec 11 08:38:05 crc kubenswrapper[4881]: I1211 08:38:05.650600 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fb741205-ce03-4c1d-9181-b2efc3c92319-config-data\") pod \"glance-default-internal-api-0\" (UID: \"fb741205-ce03-4c1d-9181-b2efc3c92319\") " pod="openstack/glance-default-internal-api-0" Dec 11 08:38:05 crc kubenswrapper[4881]: I1211 08:38:05.650640 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/fb741205-ce03-4c1d-9181-b2efc3c92319-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"fb741205-ce03-4c1d-9181-b2efc3c92319\") " pod="openstack/glance-default-internal-api-0" Dec 11 08:38:05 crc kubenswrapper[4881]: I1211 08:38:05.650700 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9fbd9\" (UniqueName: \"kubernetes.io/projected/fb741205-ce03-4c1d-9181-b2efc3c92319-kube-api-access-9fbd9\") pod \"glance-default-internal-api-0\" (UID: \"fb741205-ce03-4c1d-9181-b2efc3c92319\") " pod="openstack/glance-default-internal-api-0" Dec 11 08:38:05 crc kubenswrapper[4881]: I1211 08:38:05.650827 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"glance-default-internal-api-0\" (UID: \"fb741205-ce03-4c1d-9181-b2efc3c92319\") " pod="openstack/glance-default-internal-api-0" Dec 11 08:38:05 crc kubenswrapper[4881]: I1211 08:38:05.650903 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fb741205-ce03-4c1d-9181-b2efc3c92319-scripts\") pod \"glance-default-internal-api-0\" (UID: \"fb741205-ce03-4c1d-9181-b2efc3c92319\") " pod="openstack/glance-default-internal-api-0" Dec 11 08:38:05 crc kubenswrapper[4881]: I1211 08:38:05.651993 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/fb741205-ce03-4c1d-9181-b2efc3c92319-logs\") pod \"glance-default-internal-api-0\" (UID: \"fb741205-ce03-4c1d-9181-b2efc3c92319\") " pod="openstack/glance-default-internal-api-0" Dec 11 08:38:05 crc kubenswrapper[4881]: I1211 08:38:05.652042 4881 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"glance-default-internal-api-0\" (UID: \"fb741205-ce03-4c1d-9181-b2efc3c92319\") device mount path \"/mnt/openstack/pv05\"" pod="openstack/glance-default-internal-api-0" Dec 11 08:38:05 crc kubenswrapper[4881]: I1211 08:38:05.652132 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/fb741205-ce03-4c1d-9181-b2efc3c92319-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"fb741205-ce03-4c1d-9181-b2efc3c92319\") " pod="openstack/glance-default-internal-api-0" Dec 11 08:38:05 crc kubenswrapper[4881]: I1211 08:38:05.657190 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fb741205-ce03-4c1d-9181-b2efc3c92319-config-data\") pod \"glance-default-internal-api-0\" (UID: \"fb741205-ce03-4c1d-9181-b2efc3c92319\") " pod="openstack/glance-default-internal-api-0" Dec 11 08:38:05 crc kubenswrapper[4881]: I1211 08:38:05.660177 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fb741205-ce03-4c1d-9181-b2efc3c92319-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"fb741205-ce03-4c1d-9181-b2efc3c92319\") " pod="openstack/glance-default-internal-api-0" Dec 11 08:38:05 crc kubenswrapper[4881]: I1211 08:38:05.670573 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4414ed0e-d99f-4f9b-8888-e73cf1d47834-kube-api-access-lz9lz" (OuterVolumeSpecName: "kube-api-access-lz9lz") pod "4414ed0e-d99f-4f9b-8888-e73cf1d47834" (UID: "4414ed0e-d99f-4f9b-8888-e73cf1d47834"). InnerVolumeSpecName "kube-api-access-lz9lz". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:38:05 crc kubenswrapper[4881]: I1211 08:38:05.671070 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/fb741205-ce03-4c1d-9181-b2efc3c92319-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"fb741205-ce03-4c1d-9181-b2efc3c92319\") " pod="openstack/glance-default-internal-api-0" Dec 11 08:38:05 crc kubenswrapper[4881]: I1211 08:38:05.673739 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fb741205-ce03-4c1d-9181-b2efc3c92319-scripts\") pod \"glance-default-internal-api-0\" (UID: \"fb741205-ce03-4c1d-9181-b2efc3c92319\") " pod="openstack/glance-default-internal-api-0" Dec 11 08:38:05 crc kubenswrapper[4881]: I1211 08:38:05.677051 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9fbd9\" (UniqueName: \"kubernetes.io/projected/fb741205-ce03-4c1d-9181-b2efc3c92319-kube-api-access-9fbd9\") pod \"glance-default-internal-api-0\" (UID: \"fb741205-ce03-4c1d-9181-b2efc3c92319\") " pod="openstack/glance-default-internal-api-0" Dec 11 08:38:05 crc kubenswrapper[4881]: I1211 08:38:05.718398 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4414ed0e-d99f-4f9b-8888-e73cf1d47834-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "4414ed0e-d99f-4f9b-8888-e73cf1d47834" (UID: "4414ed0e-d99f-4f9b-8888-e73cf1d47834"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:38:05 crc kubenswrapper[4881]: I1211 08:38:05.720012 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"glance-default-internal-api-0\" (UID: \"fb741205-ce03-4c1d-9181-b2efc3c92319\") " pod="openstack/glance-default-internal-api-0" Dec 11 08:38:05 crc kubenswrapper[4881]: I1211 08:38:05.728808 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4414ed0e-d99f-4f9b-8888-e73cf1d47834-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "4414ed0e-d99f-4f9b-8888-e73cf1d47834" (UID: "4414ed0e-d99f-4f9b-8888-e73cf1d47834"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:38:05 crc kubenswrapper[4881]: I1211 08:38:05.729267 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4414ed0e-d99f-4f9b-8888-e73cf1d47834-config" (OuterVolumeSpecName: "config") pod "4414ed0e-d99f-4f9b-8888-e73cf1d47834" (UID: "4414ed0e-d99f-4f9b-8888-e73cf1d47834"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:38:05 crc kubenswrapper[4881]: I1211 08:38:05.740048 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4414ed0e-d99f-4f9b-8888-e73cf1d47834-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "4414ed0e-d99f-4f9b-8888-e73cf1d47834" (UID: "4414ed0e-d99f-4f9b-8888-e73cf1d47834"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:38:05 crc kubenswrapper[4881]: I1211 08:38:05.752897 4881 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/4414ed0e-d99f-4f9b-8888-e73cf1d47834-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Dec 11 08:38:05 crc kubenswrapper[4881]: I1211 08:38:05.752942 4881 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/4414ed0e-d99f-4f9b-8888-e73cf1d47834-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Dec 11 08:38:05 crc kubenswrapper[4881]: I1211 08:38:05.752957 4881 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4414ed0e-d99f-4f9b-8888-e73cf1d47834-dns-svc\") on node \"crc\" DevicePath \"\"" Dec 11 08:38:05 crc kubenswrapper[4881]: I1211 08:38:05.752969 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lz9lz\" (UniqueName: \"kubernetes.io/projected/4414ed0e-d99f-4f9b-8888-e73cf1d47834-kube-api-access-lz9lz\") on node \"crc\" DevicePath \"\"" Dec 11 08:38:05 crc kubenswrapper[4881]: I1211 08:38:05.752983 4881 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4414ed0e-d99f-4f9b-8888-e73cf1d47834-config\") on node \"crc\" DevicePath \"\"" Dec 11 08:38:05 crc kubenswrapper[4881]: I1211 08:38:05.764678 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4414ed0e-d99f-4f9b-8888-e73cf1d47834-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "4414ed0e-d99f-4f9b-8888-e73cf1d47834" (UID: "4414ed0e-d99f-4f9b-8888-e73cf1d47834"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:38:05 crc kubenswrapper[4881]: I1211 08:38:05.807601 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Dec 11 08:38:05 crc kubenswrapper[4881]: I1211 08:38:05.854577 4881 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/4414ed0e-d99f-4f9b-8888-e73cf1d47834-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Dec 11 08:38:06 crc kubenswrapper[4881]: I1211 08:38:06.293169 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-76fcf4b695-628mp" event={"ID":"4414ed0e-d99f-4f9b-8888-e73cf1d47834","Type":"ContainerDied","Data":"fa2a4b103870d6c0d0cfc1f5c6936516d910176698a6ac649a93235deaeace76"} Dec 11 08:38:06 crc kubenswrapper[4881]: I1211 08:38:06.293210 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-76fcf4b695-628mp" Dec 11 08:38:06 crc kubenswrapper[4881]: I1211 08:38:06.293228 4881 scope.go:117] "RemoveContainer" containerID="0235f7d2b1778dfb438c3a15c2da77b5e7352d840bed0db998a72cb71adb59ac" Dec 11 08:38:06 crc kubenswrapper[4881]: I1211 08:38:06.295029 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"96a878c1-c8dc-443b-a6a2-e9e2b3833213","Type":"ContainerStarted","Data":"9f2b61792cd2e899dae69b7bf99ab2a846b2cf8cd5c7bc92f5683c5e3af317f1"} Dec 11 08:38:06 crc kubenswrapper[4881]: I1211 08:38:06.347972 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=7.347949454 podStartE2EDuration="7.347949454s" podCreationTimestamp="2025-12-11 08:37:59 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 08:38:06.325147371 +0000 UTC m=+1334.702516068" watchObservedRunningTime="2025-12-11 08:38:06.347949454 +0000 UTC m=+1334.725318151" Dec 11 08:38:06 crc kubenswrapper[4881]: I1211 08:38:06.366897 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-76fcf4b695-628mp"] Dec 11 08:38:06 crc kubenswrapper[4881]: I1211 08:38:06.373164 4881 scope.go:117] "RemoveContainer" containerID="4b0cc2df4e12ea55c5671984d70098bf238ee2595e52235b5ba6c4f67ec1cbbc" Dec 11 08:38:06 crc kubenswrapper[4881]: I1211 08:38:06.384846 4881 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-76fcf4b695-628mp"] Dec 11 08:38:06 crc kubenswrapper[4881]: I1211 08:38:06.470026 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Dec 11 08:38:06 crc kubenswrapper[4881]: W1211 08:38:06.484786 4881 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podfb741205_ce03_4c1d_9181_b2efc3c92319.slice/crio-fe9b40206aeabc8792f161749c582ed8e2de3507772f4fd9e5b141bfae06ac29 WatchSource:0}: Error finding container fe9b40206aeabc8792f161749c582ed8e2de3507772f4fd9e5b141bfae06ac29: Status 404 returned error can't find the container with id fe9b40206aeabc8792f161749c582ed8e2de3507772f4fd9e5b141bfae06ac29 Dec 11 08:38:06 crc kubenswrapper[4881]: I1211 08:38:06.735626 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-vmjdf" Dec 11 08:38:06 crc kubenswrapper[4881]: I1211 08:38:06.879218 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/422faa6a-f2ed-4015-87cd-7878bac246e4-db-sync-config-data\") pod \"422faa6a-f2ed-4015-87cd-7878bac246e4\" (UID: \"422faa6a-f2ed-4015-87cd-7878bac246e4\") " Dec 11 08:38:06 crc kubenswrapper[4881]: I1211 08:38:06.880034 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/422faa6a-f2ed-4015-87cd-7878bac246e4-combined-ca-bundle\") pod \"422faa6a-f2ed-4015-87cd-7878bac246e4\" (UID: \"422faa6a-f2ed-4015-87cd-7878bac246e4\") " Dec 11 08:38:06 crc kubenswrapper[4881]: I1211 08:38:06.880605 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dpbqz\" (UniqueName: \"kubernetes.io/projected/422faa6a-f2ed-4015-87cd-7878bac246e4-kube-api-access-dpbqz\") pod \"422faa6a-f2ed-4015-87cd-7878bac246e4\" (UID: \"422faa6a-f2ed-4015-87cd-7878bac246e4\") " Dec 11 08:38:06 crc kubenswrapper[4881]: I1211 08:38:06.886417 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/422faa6a-f2ed-4015-87cd-7878bac246e4-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "422faa6a-f2ed-4015-87cd-7878bac246e4" (UID: "422faa6a-f2ed-4015-87cd-7878bac246e4"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:38:06 crc kubenswrapper[4881]: I1211 08:38:06.886613 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/422faa6a-f2ed-4015-87cd-7878bac246e4-kube-api-access-dpbqz" (OuterVolumeSpecName: "kube-api-access-dpbqz") pod "422faa6a-f2ed-4015-87cd-7878bac246e4" (UID: "422faa6a-f2ed-4015-87cd-7878bac246e4"). InnerVolumeSpecName "kube-api-access-dpbqz". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:38:06 crc kubenswrapper[4881]: I1211 08:38:06.928905 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/422faa6a-f2ed-4015-87cd-7878bac246e4-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "422faa6a-f2ed-4015-87cd-7878bac246e4" (UID: "422faa6a-f2ed-4015-87cd-7878bac246e4"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:38:06 crc kubenswrapper[4881]: I1211 08:38:06.983853 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dpbqz\" (UniqueName: \"kubernetes.io/projected/422faa6a-f2ed-4015-87cd-7878bac246e4-kube-api-access-dpbqz\") on node \"crc\" DevicePath \"\"" Dec 11 08:38:06 crc kubenswrapper[4881]: I1211 08:38:06.983902 4881 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/422faa6a-f2ed-4015-87cd-7878bac246e4-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Dec 11 08:38:06 crc kubenswrapper[4881]: I1211 08:38:06.983918 4881 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/422faa6a-f2ed-4015-87cd-7878bac246e4-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 11 08:38:07 crc kubenswrapper[4881]: I1211 08:38:07.021850 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4414ed0e-d99f-4f9b-8888-e73cf1d47834" path="/var/lib/kubelet/pods/4414ed0e-d99f-4f9b-8888-e73cf1d47834/volumes" Dec 11 08:38:07 crc kubenswrapper[4881]: I1211 08:38:07.022898 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="52860b37-6c04-42e4-ada9-94c9a46f1773" path="/var/lib/kubelet/pods/52860b37-6c04-42e4-ada9-94c9a46f1773/volumes" Dec 11 08:38:07 crc kubenswrapper[4881]: I1211 08:38:07.352202 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"fb741205-ce03-4c1d-9181-b2efc3c92319","Type":"ContainerStarted","Data":"c4006b0e738d45d323e12ce6001a2110fff8d26a20bbcb970da9a399dd8e3e62"} Dec 11 08:38:07 crc kubenswrapper[4881]: I1211 08:38:07.352266 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"fb741205-ce03-4c1d-9181-b2efc3c92319","Type":"ContainerStarted","Data":"fe9b40206aeabc8792f161749c582ed8e2de3507772f4fd9e5b141bfae06ac29"} Dec 11 08:38:07 crc kubenswrapper[4881]: I1211 08:38:07.360134 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-vmjdf" event={"ID":"422faa6a-f2ed-4015-87cd-7878bac246e4","Type":"ContainerDied","Data":"4dff55711a9ea928a36047afb2d3ccf0107f427a994b77b9be3b15a05784469e"} Dec 11 08:38:07 crc kubenswrapper[4881]: I1211 08:38:07.360202 4881 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4dff55711a9ea928a36047afb2d3ccf0107f427a994b77b9be3b15a05784469e" Dec 11 08:38:07 crc kubenswrapper[4881]: I1211 08:38:07.360538 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-vmjdf" Dec 11 08:38:07 crc kubenswrapper[4881]: I1211 08:38:07.485395 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-worker-9778dfbb5-mls2j"] Dec 11 08:38:07 crc kubenswrapper[4881]: E1211 08:38:07.486472 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="422faa6a-f2ed-4015-87cd-7878bac246e4" containerName="barbican-db-sync" Dec 11 08:38:07 crc kubenswrapper[4881]: I1211 08:38:07.486497 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="422faa6a-f2ed-4015-87cd-7878bac246e4" containerName="barbican-db-sync" Dec 11 08:38:07 crc kubenswrapper[4881]: E1211 08:38:07.486548 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4414ed0e-d99f-4f9b-8888-e73cf1d47834" containerName="dnsmasq-dns" Dec 11 08:38:07 crc kubenswrapper[4881]: I1211 08:38:07.486557 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="4414ed0e-d99f-4f9b-8888-e73cf1d47834" containerName="dnsmasq-dns" Dec 11 08:38:07 crc kubenswrapper[4881]: E1211 08:38:07.486570 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4414ed0e-d99f-4f9b-8888-e73cf1d47834" containerName="init" Dec 11 08:38:07 crc kubenswrapper[4881]: I1211 08:38:07.486577 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="4414ed0e-d99f-4f9b-8888-e73cf1d47834" containerName="init" Dec 11 08:38:07 crc kubenswrapper[4881]: I1211 08:38:07.486979 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="4414ed0e-d99f-4f9b-8888-e73cf1d47834" containerName="dnsmasq-dns" Dec 11 08:38:07 crc kubenswrapper[4881]: I1211 08:38:07.487055 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="422faa6a-f2ed-4015-87cd-7878bac246e4" containerName="barbican-db-sync" Dec 11 08:38:07 crc kubenswrapper[4881]: I1211 08:38:07.488753 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-9778dfbb5-mls2j" Dec 11 08:38:07 crc kubenswrapper[4881]: I1211 08:38:07.493986 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-worker-config-data" Dec 11 08:38:07 crc kubenswrapper[4881]: I1211 08:38:07.494437 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Dec 11 08:38:07 crc kubenswrapper[4881]: I1211 08:38:07.499864 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-ggs6l" Dec 11 08:38:07 crc kubenswrapper[4881]: I1211 08:38:07.516298 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-9778dfbb5-mls2j"] Dec 11 08:38:07 crc kubenswrapper[4881]: I1211 08:38:07.579217 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-keystone-listener-5766655fb4-qmcpd"] Dec 11 08:38:07 crc kubenswrapper[4881]: I1211 08:38:07.582810 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-5766655fb4-qmcpd" Dec 11 08:38:07 crc kubenswrapper[4881]: I1211 08:38:07.588146 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-keystone-listener-config-data" Dec 11 08:38:07 crc kubenswrapper[4881]: I1211 08:38:07.603987 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0fec4cf8-f794-4f69-9645-38b0dd1ef593-combined-ca-bundle\") pod \"barbican-worker-9778dfbb5-mls2j\" (UID: \"0fec4cf8-f794-4f69-9645-38b0dd1ef593\") " pod="openstack/barbican-worker-9778dfbb5-mls2j" Dec 11 08:38:07 crc kubenswrapper[4881]: I1211 08:38:07.604039 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fszp5\" (UniqueName: \"kubernetes.io/projected/0fec4cf8-f794-4f69-9645-38b0dd1ef593-kube-api-access-fszp5\") pod \"barbican-worker-9778dfbb5-mls2j\" (UID: \"0fec4cf8-f794-4f69-9645-38b0dd1ef593\") " pod="openstack/barbican-worker-9778dfbb5-mls2j" Dec 11 08:38:07 crc kubenswrapper[4881]: I1211 08:38:07.604141 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0fec4cf8-f794-4f69-9645-38b0dd1ef593-config-data\") pod \"barbican-worker-9778dfbb5-mls2j\" (UID: \"0fec4cf8-f794-4f69-9645-38b0dd1ef593\") " pod="openstack/barbican-worker-9778dfbb5-mls2j" Dec 11 08:38:07 crc kubenswrapper[4881]: I1211 08:38:07.604182 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0fec4cf8-f794-4f69-9645-38b0dd1ef593-logs\") pod \"barbican-worker-9778dfbb5-mls2j\" (UID: \"0fec4cf8-f794-4f69-9645-38b0dd1ef593\") " pod="openstack/barbican-worker-9778dfbb5-mls2j" Dec 11 08:38:07 crc kubenswrapper[4881]: I1211 08:38:07.604402 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0fec4cf8-f794-4f69-9645-38b0dd1ef593-config-data-custom\") pod \"barbican-worker-9778dfbb5-mls2j\" (UID: \"0fec4cf8-f794-4f69-9645-38b0dd1ef593\") " pod="openstack/barbican-worker-9778dfbb5-mls2j" Dec 11 08:38:07 crc kubenswrapper[4881]: I1211 08:38:07.611816 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-75c8ddd69c-js2tw"] Dec 11 08:38:07 crc kubenswrapper[4881]: I1211 08:38:07.613754 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-75c8ddd69c-js2tw" Dec 11 08:38:07 crc kubenswrapper[4881]: I1211 08:38:07.632655 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-75c8ddd69c-js2tw"] Dec 11 08:38:07 crc kubenswrapper[4881]: I1211 08:38:07.652659 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-5766655fb4-qmcpd"] Dec 11 08:38:07 crc kubenswrapper[4881]: I1211 08:38:07.705865 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/b3f6375d-3379-4a1a-b875-286687315947-config-data-custom\") pod \"barbican-keystone-listener-5766655fb4-qmcpd\" (UID: \"b3f6375d-3379-4a1a-b875-286687315947\") " pod="openstack/barbican-keystone-listener-5766655fb4-qmcpd" Dec 11 08:38:07 crc kubenswrapper[4881]: I1211 08:38:07.705914 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/edab6e89-cb03-44e6-b511-d64ba764b857-ovsdbserver-nb\") pod \"dnsmasq-dns-75c8ddd69c-js2tw\" (UID: \"edab6e89-cb03-44e6-b511-d64ba764b857\") " pod="openstack/dnsmasq-dns-75c8ddd69c-js2tw" Dec 11 08:38:07 crc kubenswrapper[4881]: I1211 08:38:07.705960 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/edab6e89-cb03-44e6-b511-d64ba764b857-dns-svc\") pod \"dnsmasq-dns-75c8ddd69c-js2tw\" (UID: \"edab6e89-cb03-44e6-b511-d64ba764b857\") " pod="openstack/dnsmasq-dns-75c8ddd69c-js2tw" Dec 11 08:38:07 crc kubenswrapper[4881]: I1211 08:38:07.706007 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b3f6375d-3379-4a1a-b875-286687315947-config-data\") pod \"barbican-keystone-listener-5766655fb4-qmcpd\" (UID: \"b3f6375d-3379-4a1a-b875-286687315947\") " pod="openstack/barbican-keystone-listener-5766655fb4-qmcpd" Dec 11 08:38:07 crc kubenswrapper[4881]: I1211 08:38:07.706034 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0fec4cf8-f794-4f69-9645-38b0dd1ef593-config-data-custom\") pod \"barbican-worker-9778dfbb5-mls2j\" (UID: \"0fec4cf8-f794-4f69-9645-38b0dd1ef593\") " pod="openstack/barbican-worker-9778dfbb5-mls2j" Dec 11 08:38:07 crc kubenswrapper[4881]: I1211 08:38:07.706051 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/edab6e89-cb03-44e6-b511-d64ba764b857-dns-swift-storage-0\") pod \"dnsmasq-dns-75c8ddd69c-js2tw\" (UID: \"edab6e89-cb03-44e6-b511-d64ba764b857\") " pod="openstack/dnsmasq-dns-75c8ddd69c-js2tw" Dec 11 08:38:07 crc kubenswrapper[4881]: I1211 08:38:07.706091 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/edab6e89-cb03-44e6-b511-d64ba764b857-config\") pod \"dnsmasq-dns-75c8ddd69c-js2tw\" (UID: \"edab6e89-cb03-44e6-b511-d64ba764b857\") " pod="openstack/dnsmasq-dns-75c8ddd69c-js2tw" Dec 11 08:38:07 crc kubenswrapper[4881]: I1211 08:38:07.706111 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f5dpk\" (UniqueName: \"kubernetes.io/projected/edab6e89-cb03-44e6-b511-d64ba764b857-kube-api-access-f5dpk\") pod \"dnsmasq-dns-75c8ddd69c-js2tw\" (UID: \"edab6e89-cb03-44e6-b511-d64ba764b857\") " pod="openstack/dnsmasq-dns-75c8ddd69c-js2tw" Dec 11 08:38:07 crc kubenswrapper[4881]: I1211 08:38:07.706128 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0fec4cf8-f794-4f69-9645-38b0dd1ef593-combined-ca-bundle\") pod \"barbican-worker-9778dfbb5-mls2j\" (UID: \"0fec4cf8-f794-4f69-9645-38b0dd1ef593\") " pod="openstack/barbican-worker-9778dfbb5-mls2j" Dec 11 08:38:07 crc kubenswrapper[4881]: I1211 08:38:07.706144 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/edab6e89-cb03-44e6-b511-d64ba764b857-ovsdbserver-sb\") pod \"dnsmasq-dns-75c8ddd69c-js2tw\" (UID: \"edab6e89-cb03-44e6-b511-d64ba764b857\") " pod="openstack/dnsmasq-dns-75c8ddd69c-js2tw" Dec 11 08:38:07 crc kubenswrapper[4881]: I1211 08:38:07.706162 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fszp5\" (UniqueName: \"kubernetes.io/projected/0fec4cf8-f794-4f69-9645-38b0dd1ef593-kube-api-access-fszp5\") pod \"barbican-worker-9778dfbb5-mls2j\" (UID: \"0fec4cf8-f794-4f69-9645-38b0dd1ef593\") " pod="openstack/barbican-worker-9778dfbb5-mls2j" Dec 11 08:38:07 crc kubenswrapper[4881]: I1211 08:38:07.706195 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b3f6375d-3379-4a1a-b875-286687315947-combined-ca-bundle\") pod \"barbican-keystone-listener-5766655fb4-qmcpd\" (UID: \"b3f6375d-3379-4a1a-b875-286687315947\") " pod="openstack/barbican-keystone-listener-5766655fb4-qmcpd" Dec 11 08:38:07 crc kubenswrapper[4881]: I1211 08:38:07.706223 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4gjf5\" (UniqueName: \"kubernetes.io/projected/b3f6375d-3379-4a1a-b875-286687315947-kube-api-access-4gjf5\") pod \"barbican-keystone-listener-5766655fb4-qmcpd\" (UID: \"b3f6375d-3379-4a1a-b875-286687315947\") " pod="openstack/barbican-keystone-listener-5766655fb4-qmcpd" Dec 11 08:38:07 crc kubenswrapper[4881]: I1211 08:38:07.706257 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0fec4cf8-f794-4f69-9645-38b0dd1ef593-config-data\") pod \"barbican-worker-9778dfbb5-mls2j\" (UID: \"0fec4cf8-f794-4f69-9645-38b0dd1ef593\") " pod="openstack/barbican-worker-9778dfbb5-mls2j" Dec 11 08:38:07 crc kubenswrapper[4881]: I1211 08:38:07.706277 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b3f6375d-3379-4a1a-b875-286687315947-logs\") pod \"barbican-keystone-listener-5766655fb4-qmcpd\" (UID: \"b3f6375d-3379-4a1a-b875-286687315947\") " pod="openstack/barbican-keystone-listener-5766655fb4-qmcpd" Dec 11 08:38:07 crc kubenswrapper[4881]: I1211 08:38:07.706298 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0fec4cf8-f794-4f69-9645-38b0dd1ef593-logs\") pod \"barbican-worker-9778dfbb5-mls2j\" (UID: \"0fec4cf8-f794-4f69-9645-38b0dd1ef593\") " pod="openstack/barbican-worker-9778dfbb5-mls2j" Dec 11 08:38:07 crc kubenswrapper[4881]: I1211 08:38:07.706680 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0fec4cf8-f794-4f69-9645-38b0dd1ef593-logs\") pod \"barbican-worker-9778dfbb5-mls2j\" (UID: \"0fec4cf8-f794-4f69-9645-38b0dd1ef593\") " pod="openstack/barbican-worker-9778dfbb5-mls2j" Dec 11 08:38:07 crc kubenswrapper[4881]: I1211 08:38:07.714316 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0fec4cf8-f794-4f69-9645-38b0dd1ef593-config-data-custom\") pod \"barbican-worker-9778dfbb5-mls2j\" (UID: \"0fec4cf8-f794-4f69-9645-38b0dd1ef593\") " pod="openstack/barbican-worker-9778dfbb5-mls2j" Dec 11 08:38:07 crc kubenswrapper[4881]: I1211 08:38:07.724700 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0fec4cf8-f794-4f69-9645-38b0dd1ef593-combined-ca-bundle\") pod \"barbican-worker-9778dfbb5-mls2j\" (UID: \"0fec4cf8-f794-4f69-9645-38b0dd1ef593\") " pod="openstack/barbican-worker-9778dfbb5-mls2j" Dec 11 08:38:07 crc kubenswrapper[4881]: I1211 08:38:07.724783 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-api-78cc5875b6-mr4tb"] Dec 11 08:38:07 crc kubenswrapper[4881]: I1211 08:38:07.726730 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-78cc5875b6-mr4tb" Dec 11 08:38:07 crc kubenswrapper[4881]: I1211 08:38:07.728396 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0fec4cf8-f794-4f69-9645-38b0dd1ef593-config-data\") pod \"barbican-worker-9778dfbb5-mls2j\" (UID: \"0fec4cf8-f794-4f69-9645-38b0dd1ef593\") " pod="openstack/barbican-worker-9778dfbb5-mls2j" Dec 11 08:38:07 crc kubenswrapper[4881]: I1211 08:38:07.730564 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-api-config-data" Dec 11 08:38:07 crc kubenswrapper[4881]: I1211 08:38:07.738106 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-78cc5875b6-mr4tb"] Dec 11 08:38:07 crc kubenswrapper[4881]: I1211 08:38:07.744041 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fszp5\" (UniqueName: \"kubernetes.io/projected/0fec4cf8-f794-4f69-9645-38b0dd1ef593-kube-api-access-fszp5\") pod \"barbican-worker-9778dfbb5-mls2j\" (UID: \"0fec4cf8-f794-4f69-9645-38b0dd1ef593\") " pod="openstack/barbican-worker-9778dfbb5-mls2j" Dec 11 08:38:07 crc kubenswrapper[4881]: I1211 08:38:07.808576 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/edab6e89-cb03-44e6-b511-d64ba764b857-dns-svc\") pod \"dnsmasq-dns-75c8ddd69c-js2tw\" (UID: \"edab6e89-cb03-44e6-b511-d64ba764b857\") " pod="openstack/dnsmasq-dns-75c8ddd69c-js2tw" Dec 11 08:38:07 crc kubenswrapper[4881]: I1211 08:38:07.808675 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/47f7a1b2-62e4-4963-b09c-bed44acc3c4a-combined-ca-bundle\") pod \"barbican-api-78cc5875b6-mr4tb\" (UID: \"47f7a1b2-62e4-4963-b09c-bed44acc3c4a\") " pod="openstack/barbican-api-78cc5875b6-mr4tb" Dec 11 08:38:07 crc kubenswrapper[4881]: I1211 08:38:07.808721 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b3f6375d-3379-4a1a-b875-286687315947-config-data\") pod \"barbican-keystone-listener-5766655fb4-qmcpd\" (UID: \"b3f6375d-3379-4a1a-b875-286687315947\") " pod="openstack/barbican-keystone-listener-5766655fb4-qmcpd" Dec 11 08:38:07 crc kubenswrapper[4881]: I1211 08:38:07.808751 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/edab6e89-cb03-44e6-b511-d64ba764b857-dns-swift-storage-0\") pod \"dnsmasq-dns-75c8ddd69c-js2tw\" (UID: \"edab6e89-cb03-44e6-b511-d64ba764b857\") " pod="openstack/dnsmasq-dns-75c8ddd69c-js2tw" Dec 11 08:38:07 crc kubenswrapper[4881]: I1211 08:38:07.808813 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/edab6e89-cb03-44e6-b511-d64ba764b857-config\") pod \"dnsmasq-dns-75c8ddd69c-js2tw\" (UID: \"edab6e89-cb03-44e6-b511-d64ba764b857\") " pod="openstack/dnsmasq-dns-75c8ddd69c-js2tw" Dec 11 08:38:07 crc kubenswrapper[4881]: I1211 08:38:07.808846 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f5dpk\" (UniqueName: \"kubernetes.io/projected/edab6e89-cb03-44e6-b511-d64ba764b857-kube-api-access-f5dpk\") pod \"dnsmasq-dns-75c8ddd69c-js2tw\" (UID: \"edab6e89-cb03-44e6-b511-d64ba764b857\") " pod="openstack/dnsmasq-dns-75c8ddd69c-js2tw" Dec 11 08:38:07 crc kubenswrapper[4881]: I1211 08:38:07.808869 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/edab6e89-cb03-44e6-b511-d64ba764b857-ovsdbserver-sb\") pod \"dnsmasq-dns-75c8ddd69c-js2tw\" (UID: \"edab6e89-cb03-44e6-b511-d64ba764b857\") " pod="openstack/dnsmasq-dns-75c8ddd69c-js2tw" Dec 11 08:38:07 crc kubenswrapper[4881]: I1211 08:38:07.808922 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b3f6375d-3379-4a1a-b875-286687315947-combined-ca-bundle\") pod \"barbican-keystone-listener-5766655fb4-qmcpd\" (UID: \"b3f6375d-3379-4a1a-b875-286687315947\") " pod="openstack/barbican-keystone-listener-5766655fb4-qmcpd" Dec 11 08:38:07 crc kubenswrapper[4881]: I1211 08:38:07.808969 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4gjf5\" (UniqueName: \"kubernetes.io/projected/b3f6375d-3379-4a1a-b875-286687315947-kube-api-access-4gjf5\") pod \"barbican-keystone-listener-5766655fb4-qmcpd\" (UID: \"b3f6375d-3379-4a1a-b875-286687315947\") " pod="openstack/barbican-keystone-listener-5766655fb4-qmcpd" Dec 11 08:38:07 crc kubenswrapper[4881]: I1211 08:38:07.809012 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/47f7a1b2-62e4-4963-b09c-bed44acc3c4a-logs\") pod \"barbican-api-78cc5875b6-mr4tb\" (UID: \"47f7a1b2-62e4-4963-b09c-bed44acc3c4a\") " pod="openstack/barbican-api-78cc5875b6-mr4tb" Dec 11 08:38:07 crc kubenswrapper[4881]: I1211 08:38:07.809052 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b3f6375d-3379-4a1a-b875-286687315947-logs\") pod \"barbican-keystone-listener-5766655fb4-qmcpd\" (UID: \"b3f6375d-3379-4a1a-b875-286687315947\") " pod="openstack/barbican-keystone-listener-5766655fb4-qmcpd" Dec 11 08:38:07 crc kubenswrapper[4881]: I1211 08:38:07.809105 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/47f7a1b2-62e4-4963-b09c-bed44acc3c4a-config-data\") pod \"barbican-api-78cc5875b6-mr4tb\" (UID: \"47f7a1b2-62e4-4963-b09c-bed44acc3c4a\") " pod="openstack/barbican-api-78cc5875b6-mr4tb" Dec 11 08:38:07 crc kubenswrapper[4881]: I1211 08:38:07.809135 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mpqx6\" (UniqueName: \"kubernetes.io/projected/47f7a1b2-62e4-4963-b09c-bed44acc3c4a-kube-api-access-mpqx6\") pod \"barbican-api-78cc5875b6-mr4tb\" (UID: \"47f7a1b2-62e4-4963-b09c-bed44acc3c4a\") " pod="openstack/barbican-api-78cc5875b6-mr4tb" Dec 11 08:38:07 crc kubenswrapper[4881]: I1211 08:38:07.809187 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/47f7a1b2-62e4-4963-b09c-bed44acc3c4a-config-data-custom\") pod \"barbican-api-78cc5875b6-mr4tb\" (UID: \"47f7a1b2-62e4-4963-b09c-bed44acc3c4a\") " pod="openstack/barbican-api-78cc5875b6-mr4tb" Dec 11 08:38:07 crc kubenswrapper[4881]: I1211 08:38:07.809248 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/b3f6375d-3379-4a1a-b875-286687315947-config-data-custom\") pod \"barbican-keystone-listener-5766655fb4-qmcpd\" (UID: \"b3f6375d-3379-4a1a-b875-286687315947\") " pod="openstack/barbican-keystone-listener-5766655fb4-qmcpd" Dec 11 08:38:07 crc kubenswrapper[4881]: I1211 08:38:07.809283 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/edab6e89-cb03-44e6-b511-d64ba764b857-ovsdbserver-nb\") pod \"dnsmasq-dns-75c8ddd69c-js2tw\" (UID: \"edab6e89-cb03-44e6-b511-d64ba764b857\") " pod="openstack/dnsmasq-dns-75c8ddd69c-js2tw" Dec 11 08:38:07 crc kubenswrapper[4881]: I1211 08:38:07.810376 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/edab6e89-cb03-44e6-b511-d64ba764b857-ovsdbserver-nb\") pod \"dnsmasq-dns-75c8ddd69c-js2tw\" (UID: \"edab6e89-cb03-44e6-b511-d64ba764b857\") " pod="openstack/dnsmasq-dns-75c8ddd69c-js2tw" Dec 11 08:38:07 crc kubenswrapper[4881]: I1211 08:38:07.811010 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b3f6375d-3379-4a1a-b875-286687315947-logs\") pod \"barbican-keystone-listener-5766655fb4-qmcpd\" (UID: \"b3f6375d-3379-4a1a-b875-286687315947\") " pod="openstack/barbican-keystone-listener-5766655fb4-qmcpd" Dec 11 08:38:07 crc kubenswrapper[4881]: I1211 08:38:07.811297 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/edab6e89-cb03-44e6-b511-d64ba764b857-config\") pod \"dnsmasq-dns-75c8ddd69c-js2tw\" (UID: \"edab6e89-cb03-44e6-b511-d64ba764b857\") " pod="openstack/dnsmasq-dns-75c8ddd69c-js2tw" Dec 11 08:38:07 crc kubenswrapper[4881]: I1211 08:38:07.811500 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/edab6e89-cb03-44e6-b511-d64ba764b857-ovsdbserver-sb\") pod \"dnsmasq-dns-75c8ddd69c-js2tw\" (UID: \"edab6e89-cb03-44e6-b511-d64ba764b857\") " pod="openstack/dnsmasq-dns-75c8ddd69c-js2tw" Dec 11 08:38:07 crc kubenswrapper[4881]: I1211 08:38:07.811856 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/edab6e89-cb03-44e6-b511-d64ba764b857-dns-swift-storage-0\") pod \"dnsmasq-dns-75c8ddd69c-js2tw\" (UID: \"edab6e89-cb03-44e6-b511-d64ba764b857\") " pod="openstack/dnsmasq-dns-75c8ddd69c-js2tw" Dec 11 08:38:07 crc kubenswrapper[4881]: I1211 08:38:07.812015 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/edab6e89-cb03-44e6-b511-d64ba764b857-dns-svc\") pod \"dnsmasq-dns-75c8ddd69c-js2tw\" (UID: \"edab6e89-cb03-44e6-b511-d64ba764b857\") " pod="openstack/dnsmasq-dns-75c8ddd69c-js2tw" Dec 11 08:38:07 crc kubenswrapper[4881]: I1211 08:38:07.816052 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b3f6375d-3379-4a1a-b875-286687315947-combined-ca-bundle\") pod \"barbican-keystone-listener-5766655fb4-qmcpd\" (UID: \"b3f6375d-3379-4a1a-b875-286687315947\") " pod="openstack/barbican-keystone-listener-5766655fb4-qmcpd" Dec 11 08:38:07 crc kubenswrapper[4881]: I1211 08:38:07.818076 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/b3f6375d-3379-4a1a-b875-286687315947-config-data-custom\") pod \"barbican-keystone-listener-5766655fb4-qmcpd\" (UID: \"b3f6375d-3379-4a1a-b875-286687315947\") " pod="openstack/barbican-keystone-listener-5766655fb4-qmcpd" Dec 11 08:38:07 crc kubenswrapper[4881]: I1211 08:38:07.820669 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b3f6375d-3379-4a1a-b875-286687315947-config-data\") pod \"barbican-keystone-listener-5766655fb4-qmcpd\" (UID: \"b3f6375d-3379-4a1a-b875-286687315947\") " pod="openstack/barbican-keystone-listener-5766655fb4-qmcpd" Dec 11 08:38:07 crc kubenswrapper[4881]: I1211 08:38:07.826151 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4gjf5\" (UniqueName: \"kubernetes.io/projected/b3f6375d-3379-4a1a-b875-286687315947-kube-api-access-4gjf5\") pod \"barbican-keystone-listener-5766655fb4-qmcpd\" (UID: \"b3f6375d-3379-4a1a-b875-286687315947\") " pod="openstack/barbican-keystone-listener-5766655fb4-qmcpd" Dec 11 08:38:07 crc kubenswrapper[4881]: I1211 08:38:07.830944 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-9778dfbb5-mls2j" Dec 11 08:38:07 crc kubenswrapper[4881]: I1211 08:38:07.833091 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f5dpk\" (UniqueName: \"kubernetes.io/projected/edab6e89-cb03-44e6-b511-d64ba764b857-kube-api-access-f5dpk\") pod \"dnsmasq-dns-75c8ddd69c-js2tw\" (UID: \"edab6e89-cb03-44e6-b511-d64ba764b857\") " pod="openstack/dnsmasq-dns-75c8ddd69c-js2tw" Dec 11 08:38:07 crc kubenswrapper[4881]: I1211 08:38:07.914594 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/47f7a1b2-62e4-4963-b09c-bed44acc3c4a-combined-ca-bundle\") pod \"barbican-api-78cc5875b6-mr4tb\" (UID: \"47f7a1b2-62e4-4963-b09c-bed44acc3c4a\") " pod="openstack/barbican-api-78cc5875b6-mr4tb" Dec 11 08:38:07 crc kubenswrapper[4881]: I1211 08:38:07.915210 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/47f7a1b2-62e4-4963-b09c-bed44acc3c4a-logs\") pod \"barbican-api-78cc5875b6-mr4tb\" (UID: \"47f7a1b2-62e4-4963-b09c-bed44acc3c4a\") " pod="openstack/barbican-api-78cc5875b6-mr4tb" Dec 11 08:38:07 crc kubenswrapper[4881]: I1211 08:38:07.915353 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mpqx6\" (UniqueName: \"kubernetes.io/projected/47f7a1b2-62e4-4963-b09c-bed44acc3c4a-kube-api-access-mpqx6\") pod \"barbican-api-78cc5875b6-mr4tb\" (UID: \"47f7a1b2-62e4-4963-b09c-bed44acc3c4a\") " pod="openstack/barbican-api-78cc5875b6-mr4tb" Dec 11 08:38:07 crc kubenswrapper[4881]: I1211 08:38:07.915384 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/47f7a1b2-62e4-4963-b09c-bed44acc3c4a-config-data\") pod \"barbican-api-78cc5875b6-mr4tb\" (UID: \"47f7a1b2-62e4-4963-b09c-bed44acc3c4a\") " pod="openstack/barbican-api-78cc5875b6-mr4tb" Dec 11 08:38:07 crc kubenswrapper[4881]: I1211 08:38:07.915431 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/47f7a1b2-62e4-4963-b09c-bed44acc3c4a-config-data-custom\") pod \"barbican-api-78cc5875b6-mr4tb\" (UID: \"47f7a1b2-62e4-4963-b09c-bed44acc3c4a\") " pod="openstack/barbican-api-78cc5875b6-mr4tb" Dec 11 08:38:07 crc kubenswrapper[4881]: I1211 08:38:07.915732 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/47f7a1b2-62e4-4963-b09c-bed44acc3c4a-logs\") pod \"barbican-api-78cc5875b6-mr4tb\" (UID: \"47f7a1b2-62e4-4963-b09c-bed44acc3c4a\") " pod="openstack/barbican-api-78cc5875b6-mr4tb" Dec 11 08:38:07 crc kubenswrapper[4881]: I1211 08:38:07.919217 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/47f7a1b2-62e4-4963-b09c-bed44acc3c4a-config-data\") pod \"barbican-api-78cc5875b6-mr4tb\" (UID: \"47f7a1b2-62e4-4963-b09c-bed44acc3c4a\") " pod="openstack/barbican-api-78cc5875b6-mr4tb" Dec 11 08:38:07 crc kubenswrapper[4881]: I1211 08:38:07.919665 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/47f7a1b2-62e4-4963-b09c-bed44acc3c4a-config-data-custom\") pod \"barbican-api-78cc5875b6-mr4tb\" (UID: \"47f7a1b2-62e4-4963-b09c-bed44acc3c4a\") " pod="openstack/barbican-api-78cc5875b6-mr4tb" Dec 11 08:38:07 crc kubenswrapper[4881]: I1211 08:38:07.935989 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/47f7a1b2-62e4-4963-b09c-bed44acc3c4a-combined-ca-bundle\") pod \"barbican-api-78cc5875b6-mr4tb\" (UID: \"47f7a1b2-62e4-4963-b09c-bed44acc3c4a\") " pod="openstack/barbican-api-78cc5875b6-mr4tb" Dec 11 08:38:07 crc kubenswrapper[4881]: I1211 08:38:07.938492 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mpqx6\" (UniqueName: \"kubernetes.io/projected/47f7a1b2-62e4-4963-b09c-bed44acc3c4a-kube-api-access-mpqx6\") pod \"barbican-api-78cc5875b6-mr4tb\" (UID: \"47f7a1b2-62e4-4963-b09c-bed44acc3c4a\") " pod="openstack/barbican-api-78cc5875b6-mr4tb" Dec 11 08:38:07 crc kubenswrapper[4881]: I1211 08:38:07.946890 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-5766655fb4-qmcpd" Dec 11 08:38:07 crc kubenswrapper[4881]: I1211 08:38:07.962219 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-75c8ddd69c-js2tw" Dec 11 08:38:08 crc kubenswrapper[4881]: I1211 08:38:08.098203 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-78cc5875b6-mr4tb" Dec 11 08:38:08 crc kubenswrapper[4881]: I1211 08:38:08.394046 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"fb741205-ce03-4c1d-9181-b2efc3c92319","Type":"ContainerStarted","Data":"e63123dac3e0c7900898c6e9b6eb12295de5e928c5abc16df766d433bbaf73cb"} Dec 11 08:38:08 crc kubenswrapper[4881]: I1211 08:38:08.443086 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=3.4430592349999998 podStartE2EDuration="3.443059235s" podCreationTimestamp="2025-12-11 08:38:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 08:38:08.429944604 +0000 UTC m=+1336.807313301" watchObservedRunningTime="2025-12-11 08:38:08.443059235 +0000 UTC m=+1336.820427942" Dec 11 08:38:08 crc kubenswrapper[4881]: I1211 08:38:08.706163 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-9778dfbb5-mls2j"] Dec 11 08:38:08 crc kubenswrapper[4881]: I1211 08:38:08.835081 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-5766655fb4-qmcpd"] Dec 11 08:38:08 crc kubenswrapper[4881]: I1211 08:38:08.848638 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-75c8ddd69c-js2tw"] Dec 11 08:38:09 crc kubenswrapper[4881]: I1211 08:38:09.048894 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-78cc5875b6-mr4tb"] Dec 11 08:38:09 crc kubenswrapper[4881]: I1211 08:38:09.419621 4881 generic.go:334] "Generic (PLEG): container finished" podID="edab6e89-cb03-44e6-b511-d64ba764b857" containerID="eba0ce956a1aac2cf1a8ee1e04ac0ea51d7e0612e1999ee09dbf2f2cb3531029" exitCode=0 Dec 11 08:38:09 crc kubenswrapper[4881]: I1211 08:38:09.419814 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-75c8ddd69c-js2tw" event={"ID":"edab6e89-cb03-44e6-b511-d64ba764b857","Type":"ContainerDied","Data":"eba0ce956a1aac2cf1a8ee1e04ac0ea51d7e0612e1999ee09dbf2f2cb3531029"} Dec 11 08:38:09 crc kubenswrapper[4881]: I1211 08:38:09.420014 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-75c8ddd69c-js2tw" event={"ID":"edab6e89-cb03-44e6-b511-d64ba764b857","Type":"ContainerStarted","Data":"141f11d310bb26aad6c2c23dbddcbdf3591e6f650607cdee70bf1a32088569b3"} Dec 11 08:38:09 crc kubenswrapper[4881]: I1211 08:38:09.430694 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-78cc5875b6-mr4tb" event={"ID":"47f7a1b2-62e4-4963-b09c-bed44acc3c4a","Type":"ContainerStarted","Data":"bd02cc3a17b6d3398858fbb78249a674d2e353dbbe9ff0da2cfbb1a56a43636a"} Dec 11 08:38:09 crc kubenswrapper[4881]: I1211 08:38:09.432603 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-9778dfbb5-mls2j" event={"ID":"0fec4cf8-f794-4f69-9645-38b0dd1ef593","Type":"ContainerStarted","Data":"c099acedab811b3bd312c97fe6b6b65345ba2c69fad5eec85f2a22bd200aa2b5"} Dec 11 08:38:09 crc kubenswrapper[4881]: I1211 08:38:09.437159 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-5766655fb4-qmcpd" event={"ID":"b3f6375d-3379-4a1a-b875-286687315947","Type":"ContainerStarted","Data":"6b85b1355120ea4ecdc84501509b37439da60e2314070df1307b4b847d00c80e"} Dec 11 08:38:09 crc kubenswrapper[4881]: I1211 08:38:09.833146 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Dec 11 08:38:09 crc kubenswrapper[4881]: I1211 08:38:09.833470 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Dec 11 08:38:09 crc kubenswrapper[4881]: I1211 08:38:09.892197 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Dec 11 08:38:09 crc kubenswrapper[4881]: I1211 08:38:09.899349 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Dec 11 08:38:10 crc kubenswrapper[4881]: I1211 08:38:10.455390 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-78cc5875b6-mr4tb" event={"ID":"47f7a1b2-62e4-4963-b09c-bed44acc3c4a","Type":"ContainerStarted","Data":"4619f9566660bb1526549a3b2acad0cfae04db286cf9f43cf27cce2b2aa931f7"} Dec 11 08:38:10 crc kubenswrapper[4881]: I1211 08:38:10.455727 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-78cc5875b6-mr4tb" event={"ID":"47f7a1b2-62e4-4963-b09c-bed44acc3c4a","Type":"ContainerStarted","Data":"6bc1a39d2a37c39394f31cd46fb1f773b6cedde4eec1c84f1f3c870e6caeb7d1"} Dec 11 08:38:10 crc kubenswrapper[4881]: I1211 08:38:10.461257 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-78cc5875b6-mr4tb" Dec 11 08:38:10 crc kubenswrapper[4881]: I1211 08:38:10.461310 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-78cc5875b6-mr4tb" Dec 11 08:38:10 crc kubenswrapper[4881]: I1211 08:38:10.467189 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-bcjrt" event={"ID":"cc1ec075-9e84-4cfd-9f5a-b29d5af0d610","Type":"ContainerStarted","Data":"e93ae53a5a85e5ec74f3458cf16b09906e27fc5d031ed76c51db24d593d938a3"} Dec 11 08:38:10 crc kubenswrapper[4881]: I1211 08:38:10.472160 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-sync-jvkz7" event={"ID":"2dde239c-3502-4b29-8f5d-1893f53819bd","Type":"ContainerStarted","Data":"d01bab017020799c07f038c20a04ac7e7800ea0cb58b7a1e1a1d0d58303d7d10"} Dec 11 08:38:10 crc kubenswrapper[4881]: I1211 08:38:10.477782 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-75c8ddd69c-js2tw" event={"ID":"edab6e89-cb03-44e6-b511-d64ba764b857","Type":"ContainerStarted","Data":"f5af75019d1bfcfe57fda0fa3426711b5e30dbf674c78914eeb3c9a13b15e860"} Dec 11 08:38:10 crc kubenswrapper[4881]: I1211 08:38:10.478162 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Dec 11 08:38:10 crc kubenswrapper[4881]: I1211 08:38:10.478274 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-75c8ddd69c-js2tw" Dec 11 08:38:10 crc kubenswrapper[4881]: I1211 08:38:10.478390 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Dec 11 08:38:10 crc kubenswrapper[4881]: I1211 08:38:10.505182 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-api-78cc5875b6-mr4tb" podStartSLOduration=3.505164488 podStartE2EDuration="3.505164488s" podCreationTimestamp="2025-12-11 08:38:07 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 08:38:10.497897089 +0000 UTC m=+1338.875265786" watchObservedRunningTime="2025-12-11 08:38:10.505164488 +0000 UTC m=+1338.882533185" Dec 11 08:38:10 crc kubenswrapper[4881]: I1211 08:38:10.560128 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-75c8ddd69c-js2tw" podStartSLOduration=3.560106847 podStartE2EDuration="3.560106847s" podCreationTimestamp="2025-12-11 08:38:07 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 08:38:10.554747047 +0000 UTC m=+1338.932115744" watchObservedRunningTime="2025-12-11 08:38:10.560106847 +0000 UTC m=+1338.937475544" Dec 11 08:38:10 crc kubenswrapper[4881]: I1211 08:38:10.576485 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-db-sync-bcjrt" podStartSLOduration=3.520100395 podStartE2EDuration="52.576462881s" podCreationTimestamp="2025-12-11 08:37:18 +0000 UTC" firstStartedPulling="2025-12-11 08:37:19.526301958 +0000 UTC m=+1287.903670655" lastFinishedPulling="2025-12-11 08:38:08.582664444 +0000 UTC m=+1336.960033141" observedRunningTime="2025-12-11 08:38:10.572877048 +0000 UTC m=+1338.950245745" watchObservedRunningTime="2025-12-11 08:38:10.576462881 +0000 UTC m=+1338.953831598" Dec 11 08:38:10 crc kubenswrapper[4881]: I1211 08:38:10.612521 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-db-sync-jvkz7" podStartSLOduration=3.117061235 podStartE2EDuration="52.612503668s" podCreationTimestamp="2025-12-11 08:37:18 +0000 UTC" firstStartedPulling="2025-12-11 08:37:19.794150071 +0000 UTC m=+1288.171518768" lastFinishedPulling="2025-12-11 08:38:09.289592504 +0000 UTC m=+1337.666961201" observedRunningTime="2025-12-11 08:38:10.597112738 +0000 UTC m=+1338.974481425" watchObservedRunningTime="2025-12-11 08:38:10.612503668 +0000 UTC m=+1338.989872365" Dec 11 08:38:11 crc kubenswrapper[4881]: I1211 08:38:11.269029 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-api-67b8f6bb8b-gk4v6"] Dec 11 08:38:11 crc kubenswrapper[4881]: I1211 08:38:11.274220 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-67b8f6bb8b-gk4v6" Dec 11 08:38:11 crc kubenswrapper[4881]: I1211 08:38:11.279831 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-barbican-public-svc" Dec 11 08:38:11 crc kubenswrapper[4881]: I1211 08:38:11.280123 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-barbican-internal-svc" Dec 11 08:38:11 crc kubenswrapper[4881]: I1211 08:38:11.298552 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-67b8f6bb8b-gk4v6"] Dec 11 08:38:11 crc kubenswrapper[4881]: I1211 08:38:11.346906 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/24358cec-f24b-4eeb-ad37-069245596b56-combined-ca-bundle\") pod \"barbican-api-67b8f6bb8b-gk4v6\" (UID: \"24358cec-f24b-4eeb-ad37-069245596b56\") " pod="openstack/barbican-api-67b8f6bb8b-gk4v6" Dec 11 08:38:11 crc kubenswrapper[4881]: I1211 08:38:11.347009 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/24358cec-f24b-4eeb-ad37-069245596b56-config-data-custom\") pod \"barbican-api-67b8f6bb8b-gk4v6\" (UID: \"24358cec-f24b-4eeb-ad37-069245596b56\") " pod="openstack/barbican-api-67b8f6bb8b-gk4v6" Dec 11 08:38:11 crc kubenswrapper[4881]: I1211 08:38:11.347053 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/24358cec-f24b-4eeb-ad37-069245596b56-internal-tls-certs\") pod \"barbican-api-67b8f6bb8b-gk4v6\" (UID: \"24358cec-f24b-4eeb-ad37-069245596b56\") " pod="openstack/barbican-api-67b8f6bb8b-gk4v6" Dec 11 08:38:11 crc kubenswrapper[4881]: I1211 08:38:11.347467 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/24358cec-f24b-4eeb-ad37-069245596b56-public-tls-certs\") pod \"barbican-api-67b8f6bb8b-gk4v6\" (UID: \"24358cec-f24b-4eeb-ad37-069245596b56\") " pod="openstack/barbican-api-67b8f6bb8b-gk4v6" Dec 11 08:38:11 crc kubenswrapper[4881]: I1211 08:38:11.347568 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/24358cec-f24b-4eeb-ad37-069245596b56-config-data\") pod \"barbican-api-67b8f6bb8b-gk4v6\" (UID: \"24358cec-f24b-4eeb-ad37-069245596b56\") " pod="openstack/barbican-api-67b8f6bb8b-gk4v6" Dec 11 08:38:11 crc kubenswrapper[4881]: I1211 08:38:11.347621 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/24358cec-f24b-4eeb-ad37-069245596b56-logs\") pod \"barbican-api-67b8f6bb8b-gk4v6\" (UID: \"24358cec-f24b-4eeb-ad37-069245596b56\") " pod="openstack/barbican-api-67b8f6bb8b-gk4v6" Dec 11 08:38:11 crc kubenswrapper[4881]: I1211 08:38:11.347694 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5h86q\" (UniqueName: \"kubernetes.io/projected/24358cec-f24b-4eeb-ad37-069245596b56-kube-api-access-5h86q\") pod \"barbican-api-67b8f6bb8b-gk4v6\" (UID: \"24358cec-f24b-4eeb-ad37-069245596b56\") " pod="openstack/barbican-api-67b8f6bb8b-gk4v6" Dec 11 08:38:11 crc kubenswrapper[4881]: I1211 08:38:11.451260 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/24358cec-f24b-4eeb-ad37-069245596b56-combined-ca-bundle\") pod \"barbican-api-67b8f6bb8b-gk4v6\" (UID: \"24358cec-f24b-4eeb-ad37-069245596b56\") " pod="openstack/barbican-api-67b8f6bb8b-gk4v6" Dec 11 08:38:11 crc kubenswrapper[4881]: I1211 08:38:11.451386 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/24358cec-f24b-4eeb-ad37-069245596b56-config-data-custom\") pod \"barbican-api-67b8f6bb8b-gk4v6\" (UID: \"24358cec-f24b-4eeb-ad37-069245596b56\") " pod="openstack/barbican-api-67b8f6bb8b-gk4v6" Dec 11 08:38:11 crc kubenswrapper[4881]: I1211 08:38:11.451423 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/24358cec-f24b-4eeb-ad37-069245596b56-internal-tls-certs\") pod \"barbican-api-67b8f6bb8b-gk4v6\" (UID: \"24358cec-f24b-4eeb-ad37-069245596b56\") " pod="openstack/barbican-api-67b8f6bb8b-gk4v6" Dec 11 08:38:11 crc kubenswrapper[4881]: I1211 08:38:11.451630 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/24358cec-f24b-4eeb-ad37-069245596b56-public-tls-certs\") pod \"barbican-api-67b8f6bb8b-gk4v6\" (UID: \"24358cec-f24b-4eeb-ad37-069245596b56\") " pod="openstack/barbican-api-67b8f6bb8b-gk4v6" Dec 11 08:38:11 crc kubenswrapper[4881]: I1211 08:38:11.451667 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/24358cec-f24b-4eeb-ad37-069245596b56-config-data\") pod \"barbican-api-67b8f6bb8b-gk4v6\" (UID: \"24358cec-f24b-4eeb-ad37-069245596b56\") " pod="openstack/barbican-api-67b8f6bb8b-gk4v6" Dec 11 08:38:11 crc kubenswrapper[4881]: I1211 08:38:11.451695 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/24358cec-f24b-4eeb-ad37-069245596b56-logs\") pod \"barbican-api-67b8f6bb8b-gk4v6\" (UID: \"24358cec-f24b-4eeb-ad37-069245596b56\") " pod="openstack/barbican-api-67b8f6bb8b-gk4v6" Dec 11 08:38:11 crc kubenswrapper[4881]: I1211 08:38:11.451744 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5h86q\" (UniqueName: \"kubernetes.io/projected/24358cec-f24b-4eeb-ad37-069245596b56-kube-api-access-5h86q\") pod \"barbican-api-67b8f6bb8b-gk4v6\" (UID: \"24358cec-f24b-4eeb-ad37-069245596b56\") " pod="openstack/barbican-api-67b8f6bb8b-gk4v6" Dec 11 08:38:11 crc kubenswrapper[4881]: I1211 08:38:11.456051 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/24358cec-f24b-4eeb-ad37-069245596b56-logs\") pod \"barbican-api-67b8f6bb8b-gk4v6\" (UID: \"24358cec-f24b-4eeb-ad37-069245596b56\") " pod="openstack/barbican-api-67b8f6bb8b-gk4v6" Dec 11 08:38:11 crc kubenswrapper[4881]: I1211 08:38:11.464281 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/24358cec-f24b-4eeb-ad37-069245596b56-config-data\") pod \"barbican-api-67b8f6bb8b-gk4v6\" (UID: \"24358cec-f24b-4eeb-ad37-069245596b56\") " pod="openstack/barbican-api-67b8f6bb8b-gk4v6" Dec 11 08:38:11 crc kubenswrapper[4881]: I1211 08:38:11.465308 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/24358cec-f24b-4eeb-ad37-069245596b56-config-data-custom\") pod \"barbican-api-67b8f6bb8b-gk4v6\" (UID: \"24358cec-f24b-4eeb-ad37-069245596b56\") " pod="openstack/barbican-api-67b8f6bb8b-gk4v6" Dec 11 08:38:11 crc kubenswrapper[4881]: I1211 08:38:11.471835 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/24358cec-f24b-4eeb-ad37-069245596b56-public-tls-certs\") pod \"barbican-api-67b8f6bb8b-gk4v6\" (UID: \"24358cec-f24b-4eeb-ad37-069245596b56\") " pod="openstack/barbican-api-67b8f6bb8b-gk4v6" Dec 11 08:38:11 crc kubenswrapper[4881]: I1211 08:38:11.481321 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/24358cec-f24b-4eeb-ad37-069245596b56-combined-ca-bundle\") pod \"barbican-api-67b8f6bb8b-gk4v6\" (UID: \"24358cec-f24b-4eeb-ad37-069245596b56\") " pod="openstack/barbican-api-67b8f6bb8b-gk4v6" Dec 11 08:38:11 crc kubenswrapper[4881]: I1211 08:38:11.485901 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/24358cec-f24b-4eeb-ad37-069245596b56-internal-tls-certs\") pod \"barbican-api-67b8f6bb8b-gk4v6\" (UID: \"24358cec-f24b-4eeb-ad37-069245596b56\") " pod="openstack/barbican-api-67b8f6bb8b-gk4v6" Dec 11 08:38:11 crc kubenswrapper[4881]: I1211 08:38:11.486435 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5h86q\" (UniqueName: \"kubernetes.io/projected/24358cec-f24b-4eeb-ad37-069245596b56-kube-api-access-5h86q\") pod \"barbican-api-67b8f6bb8b-gk4v6\" (UID: \"24358cec-f24b-4eeb-ad37-069245596b56\") " pod="openstack/barbican-api-67b8f6bb8b-gk4v6" Dec 11 08:38:11 crc kubenswrapper[4881]: I1211 08:38:11.621941 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-67b8f6bb8b-gk4v6" Dec 11 08:38:12 crc kubenswrapper[4881]: I1211 08:38:12.392179 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/prometheus-metric-storage-0" Dec 11 08:38:12 crc kubenswrapper[4881]: I1211 08:38:12.410412 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/prometheus-metric-storage-0" Dec 11 08:38:12 crc kubenswrapper[4881]: I1211 08:38:12.547307 4881 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Dec 11 08:38:12 crc kubenswrapper[4881]: I1211 08:38:12.547354 4881 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Dec 11 08:38:12 crc kubenswrapper[4881]: I1211 08:38:12.563785 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/prometheus-metric-storage-0" Dec 11 08:38:12 crc kubenswrapper[4881]: I1211 08:38:12.586015 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-55fbb6c694-gw7p4" Dec 11 08:38:12 crc kubenswrapper[4881]: I1211 08:38:12.605392 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-55fbb6c694-gw7p4" Dec 11 08:38:14 crc kubenswrapper[4881]: I1211 08:38:14.024395 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Dec 11 08:38:14 crc kubenswrapper[4881]: I1211 08:38:14.024744 4881 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Dec 11 08:38:14 crc kubenswrapper[4881]: I1211 08:38:14.272263 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Dec 11 08:38:15 crc kubenswrapper[4881]: I1211 08:38:15.809611 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Dec 11 08:38:15 crc kubenswrapper[4881]: I1211 08:38:15.809973 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Dec 11 08:38:15 crc kubenswrapper[4881]: I1211 08:38:15.990905 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Dec 11 08:38:15 crc kubenswrapper[4881]: I1211 08:38:15.994531 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Dec 11 08:38:16 crc kubenswrapper[4881]: I1211 08:38:16.165028 4881 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/barbican-api-78cc5875b6-mr4tb" podUID="47f7a1b2-62e4-4963-b09c-bed44acc3c4a" containerName="barbican-api-log" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 11 08:38:16 crc kubenswrapper[4881]: I1211 08:38:16.588107 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Dec 11 08:38:16 crc kubenswrapper[4881]: I1211 08:38:16.588161 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Dec 11 08:38:17 crc kubenswrapper[4881]: I1211 08:38:17.964438 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-75c8ddd69c-js2tw" Dec 11 08:38:18 crc kubenswrapper[4881]: I1211 08:38:18.038586 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-84b966f6c9-4kcvc"] Dec 11 08:38:18 crc kubenswrapper[4881]: I1211 08:38:18.043704 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-84b966f6c9-4kcvc" podUID="d56a4394-101c-4fa1-ba3c-b9eb0907a5ec" containerName="dnsmasq-dns" containerID="cri-o://fdc44938fe9bfb591e747eaa778d4f2426cc45cd13e7248882fa0693dcbc920e" gracePeriod=10 Dec 11 08:38:19 crc kubenswrapper[4881]: I1211 08:38:19.080307 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Dec 11 08:38:19 crc kubenswrapper[4881]: I1211 08:38:19.080455 4881 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Dec 11 08:38:19 crc kubenswrapper[4881]: I1211 08:38:19.086930 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Dec 11 08:38:19 crc kubenswrapper[4881]: I1211 08:38:19.730452 4881 generic.go:334] "Generic (PLEG): container finished" podID="d56a4394-101c-4fa1-ba3c-b9eb0907a5ec" containerID="fdc44938fe9bfb591e747eaa778d4f2426cc45cd13e7248882fa0693dcbc920e" exitCode=0 Dec 11 08:38:19 crc kubenswrapper[4881]: I1211 08:38:19.730563 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-84b966f6c9-4kcvc" event={"ID":"d56a4394-101c-4fa1-ba3c-b9eb0907a5ec","Type":"ContainerDied","Data":"fdc44938fe9bfb591e747eaa778d4f2426cc45cd13e7248882fa0693dcbc920e"} Dec 11 08:38:19 crc kubenswrapper[4881]: I1211 08:38:19.758682 4881 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-84b966f6c9-4kcvc" podUID="d56a4394-101c-4fa1-ba3c-b9eb0907a5ec" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.188:5353: connect: connection refused" Dec 11 08:38:20 crc kubenswrapper[4881]: I1211 08:38:20.082417 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-67b8f6bb8b-gk4v6"] Dec 11 08:38:20 crc kubenswrapper[4881]: I1211 08:38:20.933837 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-78cc5875b6-mr4tb" Dec 11 08:38:20 crc kubenswrapper[4881]: I1211 08:38:20.964645 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-78cc5875b6-mr4tb" Dec 11 08:38:21 crc kubenswrapper[4881]: I1211 08:38:21.084726 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-84b966f6c9-4kcvc" Dec 11 08:38:21 crc kubenswrapper[4881]: I1211 08:38:21.248265 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pjnjs\" (UniqueName: \"kubernetes.io/projected/d56a4394-101c-4fa1-ba3c-b9eb0907a5ec-kube-api-access-pjnjs\") pod \"d56a4394-101c-4fa1-ba3c-b9eb0907a5ec\" (UID: \"d56a4394-101c-4fa1-ba3c-b9eb0907a5ec\") " Dec 11 08:38:21 crc kubenswrapper[4881]: I1211 08:38:21.248309 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d56a4394-101c-4fa1-ba3c-b9eb0907a5ec-dns-svc\") pod \"d56a4394-101c-4fa1-ba3c-b9eb0907a5ec\" (UID: \"d56a4394-101c-4fa1-ba3c-b9eb0907a5ec\") " Dec 11 08:38:21 crc kubenswrapper[4881]: I1211 08:38:21.248367 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d56a4394-101c-4fa1-ba3c-b9eb0907a5ec-config\") pod \"d56a4394-101c-4fa1-ba3c-b9eb0907a5ec\" (UID: \"d56a4394-101c-4fa1-ba3c-b9eb0907a5ec\") " Dec 11 08:38:21 crc kubenswrapper[4881]: I1211 08:38:21.248402 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d56a4394-101c-4fa1-ba3c-b9eb0907a5ec-ovsdbserver-sb\") pod \"d56a4394-101c-4fa1-ba3c-b9eb0907a5ec\" (UID: \"d56a4394-101c-4fa1-ba3c-b9eb0907a5ec\") " Dec 11 08:38:21 crc kubenswrapper[4881]: I1211 08:38:21.248426 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/d56a4394-101c-4fa1-ba3c-b9eb0907a5ec-dns-swift-storage-0\") pod \"d56a4394-101c-4fa1-ba3c-b9eb0907a5ec\" (UID: \"d56a4394-101c-4fa1-ba3c-b9eb0907a5ec\") " Dec 11 08:38:21 crc kubenswrapper[4881]: I1211 08:38:21.248516 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d56a4394-101c-4fa1-ba3c-b9eb0907a5ec-ovsdbserver-nb\") pod \"d56a4394-101c-4fa1-ba3c-b9eb0907a5ec\" (UID: \"d56a4394-101c-4fa1-ba3c-b9eb0907a5ec\") " Dec 11 08:38:21 crc kubenswrapper[4881]: I1211 08:38:21.273530 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d56a4394-101c-4fa1-ba3c-b9eb0907a5ec-kube-api-access-pjnjs" (OuterVolumeSpecName: "kube-api-access-pjnjs") pod "d56a4394-101c-4fa1-ba3c-b9eb0907a5ec" (UID: "d56a4394-101c-4fa1-ba3c-b9eb0907a5ec"). InnerVolumeSpecName "kube-api-access-pjnjs". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:38:21 crc kubenswrapper[4881]: I1211 08:38:21.370355 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pjnjs\" (UniqueName: \"kubernetes.io/projected/d56a4394-101c-4fa1-ba3c-b9eb0907a5ec-kube-api-access-pjnjs\") on node \"crc\" DevicePath \"\"" Dec 11 08:38:21 crc kubenswrapper[4881]: I1211 08:38:21.377597 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d56a4394-101c-4fa1-ba3c-b9eb0907a5ec-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "d56a4394-101c-4fa1-ba3c-b9eb0907a5ec" (UID: "d56a4394-101c-4fa1-ba3c-b9eb0907a5ec"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:38:21 crc kubenswrapper[4881]: I1211 08:38:21.384321 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d56a4394-101c-4fa1-ba3c-b9eb0907a5ec-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "d56a4394-101c-4fa1-ba3c-b9eb0907a5ec" (UID: "d56a4394-101c-4fa1-ba3c-b9eb0907a5ec"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:38:21 crc kubenswrapper[4881]: I1211 08:38:21.393596 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d56a4394-101c-4fa1-ba3c-b9eb0907a5ec-config" (OuterVolumeSpecName: "config") pod "d56a4394-101c-4fa1-ba3c-b9eb0907a5ec" (UID: "d56a4394-101c-4fa1-ba3c-b9eb0907a5ec"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:38:21 crc kubenswrapper[4881]: I1211 08:38:21.401722 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d56a4394-101c-4fa1-ba3c-b9eb0907a5ec-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "d56a4394-101c-4fa1-ba3c-b9eb0907a5ec" (UID: "d56a4394-101c-4fa1-ba3c-b9eb0907a5ec"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:38:21 crc kubenswrapper[4881]: I1211 08:38:21.443789 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d56a4394-101c-4fa1-ba3c-b9eb0907a5ec-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "d56a4394-101c-4fa1-ba3c-b9eb0907a5ec" (UID: "d56a4394-101c-4fa1-ba3c-b9eb0907a5ec"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:38:21 crc kubenswrapper[4881]: I1211 08:38:21.472230 4881 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d56a4394-101c-4fa1-ba3c-b9eb0907a5ec-dns-svc\") on node \"crc\" DevicePath \"\"" Dec 11 08:38:21 crc kubenswrapper[4881]: I1211 08:38:21.472272 4881 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d56a4394-101c-4fa1-ba3c-b9eb0907a5ec-config\") on node \"crc\" DevicePath \"\"" Dec 11 08:38:21 crc kubenswrapper[4881]: I1211 08:38:21.472285 4881 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d56a4394-101c-4fa1-ba3c-b9eb0907a5ec-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Dec 11 08:38:21 crc kubenswrapper[4881]: I1211 08:38:21.472297 4881 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/d56a4394-101c-4fa1-ba3c-b9eb0907a5ec-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Dec 11 08:38:21 crc kubenswrapper[4881]: I1211 08:38:21.472308 4881 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d56a4394-101c-4fa1-ba3c-b9eb0907a5ec-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Dec 11 08:38:21 crc kubenswrapper[4881]: I1211 08:38:21.773383 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-9778dfbb5-mls2j" event={"ID":"0fec4cf8-f794-4f69-9645-38b0dd1ef593","Type":"ContainerStarted","Data":"5b073db779bc2237217fc9eefd171cdd448bc8d6f2339344a9f699ca139a237f"} Dec 11 08:38:21 crc kubenswrapper[4881]: I1211 08:38:21.775545 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-5766655fb4-qmcpd" event={"ID":"b3f6375d-3379-4a1a-b875-286687315947","Type":"ContainerStarted","Data":"93adacb828d1bce771deda2b07bbc78c3f0f0f6bc0ca396be720b51f1f16f671"} Dec 11 08:38:21 crc kubenswrapper[4881]: I1211 08:38:21.777267 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-67b8f6bb8b-gk4v6" event={"ID":"24358cec-f24b-4eeb-ad37-069245596b56","Type":"ContainerStarted","Data":"82b815d47213d51abffef6a4fa341299b19f32fd1cfab1fa7e69ef341f27653b"} Dec 11 08:38:21 crc kubenswrapper[4881]: I1211 08:38:21.779586 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-84b966f6c9-4kcvc" event={"ID":"d56a4394-101c-4fa1-ba3c-b9eb0907a5ec","Type":"ContainerDied","Data":"69d0bb914bdc4ee02bb01c8f476bc1747e6b62a4a66cc42e8d4da98133bd85dd"} Dec 11 08:38:21 crc kubenswrapper[4881]: I1211 08:38:21.779628 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-84b966f6c9-4kcvc" Dec 11 08:38:21 crc kubenswrapper[4881]: I1211 08:38:21.779701 4881 scope.go:117] "RemoveContainer" containerID="fdc44938fe9bfb591e747eaa778d4f2426cc45cd13e7248882fa0693dcbc920e" Dec 11 08:38:21 crc kubenswrapper[4881]: I1211 08:38:21.829395 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-84b966f6c9-4kcvc"] Dec 11 08:38:21 crc kubenswrapper[4881]: I1211 08:38:21.839773 4881 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-84b966f6c9-4kcvc"] Dec 11 08:38:21 crc kubenswrapper[4881]: I1211 08:38:21.849525 4881 scope.go:117] "RemoveContainer" containerID="66166530f41eca8f38ad0d100510c7d6652a0e107bdab21546d7e959198b045d" Dec 11 08:38:22 crc kubenswrapper[4881]: E1211 08:38:22.042583 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/ceilometer-0" podUID="0074c6f2-5d03-406d-a8a3-19f87e5980d8" Dec 11 08:38:22 crc kubenswrapper[4881]: I1211 08:38:22.793827 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-67b8f6bb8b-gk4v6" event={"ID":"24358cec-f24b-4eeb-ad37-069245596b56","Type":"ContainerStarted","Data":"f515e1f31962f4725bce4595f079d8e857334c0891654f6645e31c485633b7df"} Dec 11 08:38:22 crc kubenswrapper[4881]: I1211 08:38:22.793882 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-67b8f6bb8b-gk4v6" event={"ID":"24358cec-f24b-4eeb-ad37-069245596b56","Type":"ContainerStarted","Data":"9aa3e0f153f305001e5a007b3a336a0e7d56c035b6a03a8486aaeaee266d937e"} Dec 11 08:38:22 crc kubenswrapper[4881]: I1211 08:38:22.794772 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-67b8f6bb8b-gk4v6" Dec 11 08:38:22 crc kubenswrapper[4881]: I1211 08:38:22.794809 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-67b8f6bb8b-gk4v6" Dec 11 08:38:22 crc kubenswrapper[4881]: I1211 08:38:22.799548 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-9778dfbb5-mls2j" event={"ID":"0fec4cf8-f794-4f69-9645-38b0dd1ef593","Type":"ContainerStarted","Data":"6a7edf26533c8c33a5c9a2b806bd442eda37e9bcb635916e023db6da8a3df77e"} Dec 11 08:38:22 crc kubenswrapper[4881]: I1211 08:38:22.811794 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0074c6f2-5d03-406d-a8a3-19f87e5980d8","Type":"ContainerStarted","Data":"5717d0de5676b96297cf2bd65ce4a4be8677cc71f4e9a1b7ec795e2d09a43453"} Dec 11 08:38:22 crc kubenswrapper[4881]: I1211 08:38:22.812159 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Dec 11 08:38:22 crc kubenswrapper[4881]: I1211 08:38:22.815470 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-5766655fb4-qmcpd" event={"ID":"b3f6375d-3379-4a1a-b875-286687315947","Type":"ContainerStarted","Data":"36970a374defe7d481ca9f2f7c877cf5d23d7e33c38289cd61963e7388730daf"} Dec 11 08:38:22 crc kubenswrapper[4881]: I1211 08:38:22.833811 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-api-67b8f6bb8b-gk4v6" podStartSLOduration=11.83379406 podStartE2EDuration="11.83379406s" podCreationTimestamp="2025-12-11 08:38:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 08:38:22.823077811 +0000 UTC m=+1351.200446518" watchObservedRunningTime="2025-12-11 08:38:22.83379406 +0000 UTC m=+1351.211162747" Dec 11 08:38:22 crc kubenswrapper[4881]: I1211 08:38:22.842924 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-worker-9778dfbb5-mls2j" podStartSLOduration=4.9930424 podStartE2EDuration="15.842907817s" podCreationTimestamp="2025-12-11 08:38:07 +0000 UTC" firstStartedPulling="2025-12-11 08:38:08.719893432 +0000 UTC m=+1337.097262129" lastFinishedPulling="2025-12-11 08:38:19.569758849 +0000 UTC m=+1347.947127546" observedRunningTime="2025-12-11 08:38:22.840664449 +0000 UTC m=+1351.218033156" watchObservedRunningTime="2025-12-11 08:38:22.842907817 +0000 UTC m=+1351.220276514" Dec 11 08:38:22 crc kubenswrapper[4881]: I1211 08:38:22.862138 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-keystone-listener-5766655fb4-qmcpd" podStartSLOduration=7.243008228 podStartE2EDuration="15.862121537s" podCreationTimestamp="2025-12-11 08:38:07 +0000 UTC" firstStartedPulling="2025-12-11 08:38:08.860624121 +0000 UTC m=+1337.237992818" lastFinishedPulling="2025-12-11 08:38:17.47973743 +0000 UTC m=+1345.857106127" observedRunningTime="2025-12-11 08:38:22.857187318 +0000 UTC m=+1351.234556015" watchObservedRunningTime="2025-12-11 08:38:22.862121537 +0000 UTC m=+1351.239490234" Dec 11 08:38:23 crc kubenswrapper[4881]: I1211 08:38:23.021805 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d56a4394-101c-4fa1-ba3c-b9eb0907a5ec" path="/var/lib/kubelet/pods/d56a4394-101c-4fa1-ba3c-b9eb0907a5ec/volumes" Dec 11 08:38:24 crc kubenswrapper[4881]: I1211 08:38:24.847648 4881 generic.go:334] "Generic (PLEG): container finished" podID="2dde239c-3502-4b29-8f5d-1893f53819bd" containerID="d01bab017020799c07f038c20a04ac7e7800ea0cb58b7a1e1a1d0d58303d7d10" exitCode=0 Dec 11 08:38:24 crc kubenswrapper[4881]: I1211 08:38:24.847744 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-sync-jvkz7" event={"ID":"2dde239c-3502-4b29-8f5d-1893f53819bd","Type":"ContainerDied","Data":"d01bab017020799c07f038c20a04ac7e7800ea0cb58b7a1e1a1d0d58303d7d10"} Dec 11 08:38:24 crc kubenswrapper[4881]: I1211 08:38:24.856973 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0074c6f2-5d03-406d-a8a3-19f87e5980d8","Type":"ContainerStarted","Data":"a950c6b7c7510594a27d4cb885b3933b226c40a3677f64a01bbf119ce5d9e0ee"} Dec 11 08:38:24 crc kubenswrapper[4881]: I1211 08:38:24.916084 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=3.160096743 podStartE2EDuration="1m5.916052847s" podCreationTimestamp="2025-12-11 08:37:19 +0000 UTC" firstStartedPulling="2025-12-11 08:37:20.722549948 +0000 UTC m=+1289.099918645" lastFinishedPulling="2025-12-11 08:38:23.478506062 +0000 UTC m=+1351.855874749" observedRunningTime="2025-12-11 08:38:24.894726593 +0000 UTC m=+1353.272095350" watchObservedRunningTime="2025-12-11 08:38:24.916052847 +0000 UTC m=+1353.293421564" Dec 11 08:38:24 crc kubenswrapper[4881]: I1211 08:38:24.927876 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/neutron-5ccbc79968-kjfhq" Dec 11 08:38:25 crc kubenswrapper[4881]: I1211 08:38:25.236754 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/keystone-7fbbb6db6c-bqwjn" Dec 11 08:38:26 crc kubenswrapper[4881]: I1211 08:38:26.348013 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-sync-jvkz7" Dec 11 08:38:26 crc kubenswrapper[4881]: I1211 08:38:26.512399 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qd2dc\" (UniqueName: \"kubernetes.io/projected/2dde239c-3502-4b29-8f5d-1893f53819bd-kube-api-access-qd2dc\") pod \"2dde239c-3502-4b29-8f5d-1893f53819bd\" (UID: \"2dde239c-3502-4b29-8f5d-1893f53819bd\") " Dec 11 08:38:26 crc kubenswrapper[4881]: I1211 08:38:26.512471 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2dde239c-3502-4b29-8f5d-1893f53819bd-config-data\") pod \"2dde239c-3502-4b29-8f5d-1893f53819bd\" (UID: \"2dde239c-3502-4b29-8f5d-1893f53819bd\") " Dec 11 08:38:26 crc kubenswrapper[4881]: I1211 08:38:26.512614 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2dde239c-3502-4b29-8f5d-1893f53819bd-combined-ca-bundle\") pod \"2dde239c-3502-4b29-8f5d-1893f53819bd\" (UID: \"2dde239c-3502-4b29-8f5d-1893f53819bd\") " Dec 11 08:38:26 crc kubenswrapper[4881]: I1211 08:38:26.524913 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2dde239c-3502-4b29-8f5d-1893f53819bd-kube-api-access-qd2dc" (OuterVolumeSpecName: "kube-api-access-qd2dc") pod "2dde239c-3502-4b29-8f5d-1893f53819bd" (UID: "2dde239c-3502-4b29-8f5d-1893f53819bd"). InnerVolumeSpecName "kube-api-access-qd2dc". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:38:26 crc kubenswrapper[4881]: I1211 08:38:26.551216 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2dde239c-3502-4b29-8f5d-1893f53819bd-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "2dde239c-3502-4b29-8f5d-1893f53819bd" (UID: "2dde239c-3502-4b29-8f5d-1893f53819bd"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:38:26 crc kubenswrapper[4881]: I1211 08:38:26.603390 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2dde239c-3502-4b29-8f5d-1893f53819bd-config-data" (OuterVolumeSpecName: "config-data") pod "2dde239c-3502-4b29-8f5d-1893f53819bd" (UID: "2dde239c-3502-4b29-8f5d-1893f53819bd"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:38:26 crc kubenswrapper[4881]: I1211 08:38:26.616319 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qd2dc\" (UniqueName: \"kubernetes.io/projected/2dde239c-3502-4b29-8f5d-1893f53819bd-kube-api-access-qd2dc\") on node \"crc\" DevicePath \"\"" Dec 11 08:38:26 crc kubenswrapper[4881]: I1211 08:38:26.616533 4881 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2dde239c-3502-4b29-8f5d-1893f53819bd-config-data\") on node \"crc\" DevicePath \"\"" Dec 11 08:38:26 crc kubenswrapper[4881]: I1211 08:38:26.616552 4881 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2dde239c-3502-4b29-8f5d-1893f53819bd-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 11 08:38:26 crc kubenswrapper[4881]: I1211 08:38:26.894159 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-sync-jvkz7" event={"ID":"2dde239c-3502-4b29-8f5d-1893f53819bd","Type":"ContainerDied","Data":"ae456e6ac12e1365bac8ea21e4bbfb88a0c67b634a3e7710e3758cf72bb0f884"} Dec 11 08:38:26 crc kubenswrapper[4881]: I1211 08:38:26.894199 4881 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ae456e6ac12e1365bac8ea21e4bbfb88a0c67b634a3e7710e3758cf72bb0f884" Dec 11 08:38:26 crc kubenswrapper[4881]: I1211 08:38:26.894256 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-sync-jvkz7" Dec 11 08:38:27 crc kubenswrapper[4881]: I1211 08:38:27.528528 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/neutron-6986b4b8b9-dlx84" Dec 11 08:38:27 crc kubenswrapper[4881]: I1211 08:38:27.603854 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-5ccbc79968-kjfhq"] Dec 11 08:38:27 crc kubenswrapper[4881]: I1211 08:38:27.604180 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-5ccbc79968-kjfhq" podUID="9cac4a82-5425-4f3a-bf86-26d9099432e3" containerName="neutron-httpd" containerID="cri-o://b4787b60ea2d1af27f7a25e12d4c65e4fa7e8df398d8335843a14437ef100214" gracePeriod=30 Dec 11 08:38:27 crc kubenswrapper[4881]: I1211 08:38:27.608161 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-5ccbc79968-kjfhq" podUID="9cac4a82-5425-4f3a-bf86-26d9099432e3" containerName="neutron-api" containerID="cri-o://989f77dfba60f8b047748455ff61be09fdb5fecc1425754e7501917b9b888581" gracePeriod=30 Dec 11 08:38:27 crc kubenswrapper[4881]: I1211 08:38:27.713610 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstackclient"] Dec 11 08:38:27 crc kubenswrapper[4881]: E1211 08:38:27.714054 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d56a4394-101c-4fa1-ba3c-b9eb0907a5ec" containerName="init" Dec 11 08:38:27 crc kubenswrapper[4881]: I1211 08:38:27.714073 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="d56a4394-101c-4fa1-ba3c-b9eb0907a5ec" containerName="init" Dec 11 08:38:27 crc kubenswrapper[4881]: E1211 08:38:27.714113 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d56a4394-101c-4fa1-ba3c-b9eb0907a5ec" containerName="dnsmasq-dns" Dec 11 08:38:27 crc kubenswrapper[4881]: I1211 08:38:27.714120 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="d56a4394-101c-4fa1-ba3c-b9eb0907a5ec" containerName="dnsmasq-dns" Dec 11 08:38:27 crc kubenswrapper[4881]: E1211 08:38:27.714128 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2dde239c-3502-4b29-8f5d-1893f53819bd" containerName="heat-db-sync" Dec 11 08:38:27 crc kubenswrapper[4881]: I1211 08:38:27.714134 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="2dde239c-3502-4b29-8f5d-1893f53819bd" containerName="heat-db-sync" Dec 11 08:38:27 crc kubenswrapper[4881]: I1211 08:38:27.716031 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="d56a4394-101c-4fa1-ba3c-b9eb0907a5ec" containerName="dnsmasq-dns" Dec 11 08:38:27 crc kubenswrapper[4881]: I1211 08:38:27.716063 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="2dde239c-3502-4b29-8f5d-1893f53819bd" containerName="heat-db-sync" Dec 11 08:38:27 crc kubenswrapper[4881]: I1211 08:38:27.717552 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Dec 11 08:38:27 crc kubenswrapper[4881]: I1211 08:38:27.719153 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config" Dec 11 08:38:27 crc kubenswrapper[4881]: I1211 08:38:27.722247 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-config-secret" Dec 11 08:38:27 crc kubenswrapper[4881]: I1211 08:38:27.722576 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstackclient-openstackclient-dockercfg-wzmfz" Dec 11 08:38:27 crc kubenswrapper[4881]: I1211 08:38:27.732703 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Dec 11 08:38:27 crc kubenswrapper[4881]: I1211 08:38:27.844449 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b9e508c6-cbef-4749-adb9-eee328d49eec-combined-ca-bundle\") pod \"openstackclient\" (UID: \"b9e508c6-cbef-4749-adb9-eee328d49eec\") " pod="openstack/openstackclient" Dec 11 08:38:27 crc kubenswrapper[4881]: I1211 08:38:27.844563 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/b9e508c6-cbef-4749-adb9-eee328d49eec-openstack-config\") pod \"openstackclient\" (UID: \"b9e508c6-cbef-4749-adb9-eee328d49eec\") " pod="openstack/openstackclient" Dec 11 08:38:27 crc kubenswrapper[4881]: I1211 08:38:27.844625 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/b9e508c6-cbef-4749-adb9-eee328d49eec-openstack-config-secret\") pod \"openstackclient\" (UID: \"b9e508c6-cbef-4749-adb9-eee328d49eec\") " pod="openstack/openstackclient" Dec 11 08:38:27 crc kubenswrapper[4881]: I1211 08:38:27.844680 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zbnww\" (UniqueName: \"kubernetes.io/projected/b9e508c6-cbef-4749-adb9-eee328d49eec-kube-api-access-zbnww\") pod \"openstackclient\" (UID: \"b9e508c6-cbef-4749-adb9-eee328d49eec\") " pod="openstack/openstackclient" Dec 11 08:38:27 crc kubenswrapper[4881]: I1211 08:38:27.909162 4881 generic.go:334] "Generic (PLEG): container finished" podID="9cac4a82-5425-4f3a-bf86-26d9099432e3" containerID="b4787b60ea2d1af27f7a25e12d4c65e4fa7e8df398d8335843a14437ef100214" exitCode=0 Dec 11 08:38:27 crc kubenswrapper[4881]: I1211 08:38:27.909207 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5ccbc79968-kjfhq" event={"ID":"9cac4a82-5425-4f3a-bf86-26d9099432e3","Type":"ContainerDied","Data":"b4787b60ea2d1af27f7a25e12d4c65e4fa7e8df398d8335843a14437ef100214"} Dec 11 08:38:27 crc kubenswrapper[4881]: I1211 08:38:27.912781 4881 generic.go:334] "Generic (PLEG): container finished" podID="cc1ec075-9e84-4cfd-9f5a-b29d5af0d610" containerID="e93ae53a5a85e5ec74f3458cf16b09906e27fc5d031ed76c51db24d593d938a3" exitCode=0 Dec 11 08:38:27 crc kubenswrapper[4881]: I1211 08:38:27.912824 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-bcjrt" event={"ID":"cc1ec075-9e84-4cfd-9f5a-b29d5af0d610","Type":"ContainerDied","Data":"e93ae53a5a85e5ec74f3458cf16b09906e27fc5d031ed76c51db24d593d938a3"} Dec 11 08:38:27 crc kubenswrapper[4881]: I1211 08:38:27.946999 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zbnww\" (UniqueName: \"kubernetes.io/projected/b9e508c6-cbef-4749-adb9-eee328d49eec-kube-api-access-zbnww\") pod \"openstackclient\" (UID: \"b9e508c6-cbef-4749-adb9-eee328d49eec\") " pod="openstack/openstackclient" Dec 11 08:38:27 crc kubenswrapper[4881]: I1211 08:38:27.947460 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b9e508c6-cbef-4749-adb9-eee328d49eec-combined-ca-bundle\") pod \"openstackclient\" (UID: \"b9e508c6-cbef-4749-adb9-eee328d49eec\") " pod="openstack/openstackclient" Dec 11 08:38:27 crc kubenswrapper[4881]: I1211 08:38:27.948248 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/b9e508c6-cbef-4749-adb9-eee328d49eec-openstack-config\") pod \"openstackclient\" (UID: \"b9e508c6-cbef-4749-adb9-eee328d49eec\") " pod="openstack/openstackclient" Dec 11 08:38:27 crc kubenswrapper[4881]: I1211 08:38:27.948316 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/b9e508c6-cbef-4749-adb9-eee328d49eec-openstack-config-secret\") pod \"openstackclient\" (UID: \"b9e508c6-cbef-4749-adb9-eee328d49eec\") " pod="openstack/openstackclient" Dec 11 08:38:27 crc kubenswrapper[4881]: I1211 08:38:27.949595 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/b9e508c6-cbef-4749-adb9-eee328d49eec-openstack-config\") pod \"openstackclient\" (UID: \"b9e508c6-cbef-4749-adb9-eee328d49eec\") " pod="openstack/openstackclient" Dec 11 08:38:27 crc kubenswrapper[4881]: I1211 08:38:27.951511 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b9e508c6-cbef-4749-adb9-eee328d49eec-combined-ca-bundle\") pod \"openstackclient\" (UID: \"b9e508c6-cbef-4749-adb9-eee328d49eec\") " pod="openstack/openstackclient" Dec 11 08:38:27 crc kubenswrapper[4881]: I1211 08:38:27.951932 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/b9e508c6-cbef-4749-adb9-eee328d49eec-openstack-config-secret\") pod \"openstackclient\" (UID: \"b9e508c6-cbef-4749-adb9-eee328d49eec\") " pod="openstack/openstackclient" Dec 11 08:38:27 crc kubenswrapper[4881]: I1211 08:38:27.962622 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zbnww\" (UniqueName: \"kubernetes.io/projected/b9e508c6-cbef-4749-adb9-eee328d49eec-kube-api-access-zbnww\") pod \"openstackclient\" (UID: \"b9e508c6-cbef-4749-adb9-eee328d49eec\") " pod="openstack/openstackclient" Dec 11 08:38:28 crc kubenswrapper[4881]: I1211 08:38:28.043746 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/openstackclient"] Dec 11 08:38:28 crc kubenswrapper[4881]: I1211 08:38:28.045261 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Dec 11 08:38:28 crc kubenswrapper[4881]: I1211 08:38:28.059876 4881 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/openstackclient"] Dec 11 08:38:28 crc kubenswrapper[4881]: I1211 08:38:28.105674 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstackclient"] Dec 11 08:38:28 crc kubenswrapper[4881]: I1211 08:38:28.108312 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Dec 11 08:38:28 crc kubenswrapper[4881]: I1211 08:38:28.150077 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Dec 11 08:38:28 crc kubenswrapper[4881]: I1211 08:38:28.256645 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/5e8c400d-4f6c-4e16-8ed3-45d19b56a0bf-openstack-config-secret\") pod \"openstackclient\" (UID: \"5e8c400d-4f6c-4e16-8ed3-45d19b56a0bf\") " pod="openstack/openstackclient" Dec 11 08:38:28 crc kubenswrapper[4881]: I1211 08:38:28.256864 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5e8c400d-4f6c-4e16-8ed3-45d19b56a0bf-combined-ca-bundle\") pod \"openstackclient\" (UID: \"5e8c400d-4f6c-4e16-8ed3-45d19b56a0bf\") " pod="openstack/openstackclient" Dec 11 08:38:28 crc kubenswrapper[4881]: I1211 08:38:28.256912 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6n5gf\" (UniqueName: \"kubernetes.io/projected/5e8c400d-4f6c-4e16-8ed3-45d19b56a0bf-kube-api-access-6n5gf\") pod \"openstackclient\" (UID: \"5e8c400d-4f6c-4e16-8ed3-45d19b56a0bf\") " pod="openstack/openstackclient" Dec 11 08:38:28 crc kubenswrapper[4881]: I1211 08:38:28.256983 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/5e8c400d-4f6c-4e16-8ed3-45d19b56a0bf-openstack-config\") pod \"openstackclient\" (UID: \"5e8c400d-4f6c-4e16-8ed3-45d19b56a0bf\") " pod="openstack/openstackclient" Dec 11 08:38:28 crc kubenswrapper[4881]: E1211 08:38:28.302783 4881 log.go:32] "RunPodSandbox from runtime service failed" err=< Dec 11 08:38:28 crc kubenswrapper[4881]: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_openstackclient_openstack_b9e508c6-cbef-4749-adb9-eee328d49eec_0(ecdd93931f94345b6fd9d6a82d487847344d478ce5a277b6abecf65192b714f2): error adding pod openstack_openstackclient to CNI network "multus-cni-network": plugin type="multus-shim" name="multus-cni-network" failed (add): CmdAdd (shim): CNI request failed with status 400: 'ContainerID:"ecdd93931f94345b6fd9d6a82d487847344d478ce5a277b6abecf65192b714f2" Netns:"/var/run/netns/44be3f59-0232-4981-908e-550f065949b6" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openstack;K8S_POD_NAME=openstackclient;K8S_POD_INFRA_CONTAINER_ID=ecdd93931f94345b6fd9d6a82d487847344d478ce5a277b6abecf65192b714f2;K8S_POD_UID=b9e508c6-cbef-4749-adb9-eee328d49eec" Path:"" ERRORED: error configuring pod [openstack/openstackclient] networking: Multus: [openstack/openstackclient/b9e508c6-cbef-4749-adb9-eee328d49eec]: expected pod UID "b9e508c6-cbef-4749-adb9-eee328d49eec" but got "5e8c400d-4f6c-4e16-8ed3-45d19b56a0bf" from Kube API Dec 11 08:38:28 crc kubenswrapper[4881]: ': StdinData: {"binDir":"/var/lib/cni/bin","clusterNetwork":"/host/run/multus/cni/net.d/10-ovn-kubernetes.conf","cniVersion":"0.3.1","daemonSocketDir":"/run/multus/socket","globalNamespaces":"default,openshift-multus,openshift-sriov-network-operator,openshift-cnv","logLevel":"verbose","logToStderr":true,"name":"multus-cni-network","namespaceIsolation":true,"type":"multus-shim"} Dec 11 08:38:28 crc kubenswrapper[4881]: > Dec 11 08:38:28 crc kubenswrapper[4881]: E1211 08:38:28.303129 4881 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err=< Dec 11 08:38:28 crc kubenswrapper[4881]: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_openstackclient_openstack_b9e508c6-cbef-4749-adb9-eee328d49eec_0(ecdd93931f94345b6fd9d6a82d487847344d478ce5a277b6abecf65192b714f2): error adding pod openstack_openstackclient to CNI network "multus-cni-network": plugin type="multus-shim" name="multus-cni-network" failed (add): CmdAdd (shim): CNI request failed with status 400: 'ContainerID:"ecdd93931f94345b6fd9d6a82d487847344d478ce5a277b6abecf65192b714f2" Netns:"/var/run/netns/44be3f59-0232-4981-908e-550f065949b6" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openstack;K8S_POD_NAME=openstackclient;K8S_POD_INFRA_CONTAINER_ID=ecdd93931f94345b6fd9d6a82d487847344d478ce5a277b6abecf65192b714f2;K8S_POD_UID=b9e508c6-cbef-4749-adb9-eee328d49eec" Path:"" ERRORED: error configuring pod [openstack/openstackclient] networking: Multus: [openstack/openstackclient/b9e508c6-cbef-4749-adb9-eee328d49eec]: expected pod UID "b9e508c6-cbef-4749-adb9-eee328d49eec" but got "5e8c400d-4f6c-4e16-8ed3-45d19b56a0bf" from Kube API Dec 11 08:38:28 crc kubenswrapper[4881]: ': StdinData: {"binDir":"/var/lib/cni/bin","clusterNetwork":"/host/run/multus/cni/net.d/10-ovn-kubernetes.conf","cniVersion":"0.3.1","daemonSocketDir":"/run/multus/socket","globalNamespaces":"default,openshift-multus,openshift-sriov-network-operator,openshift-cnv","logLevel":"verbose","logToStderr":true,"name":"multus-cni-network","namespaceIsolation":true,"type":"multus-shim"} Dec 11 08:38:28 crc kubenswrapper[4881]: > pod="openstack/openstackclient" Dec 11 08:38:28 crc kubenswrapper[4881]: I1211 08:38:28.359175 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/5e8c400d-4f6c-4e16-8ed3-45d19b56a0bf-openstack-config\") pod \"openstackclient\" (UID: \"5e8c400d-4f6c-4e16-8ed3-45d19b56a0bf\") " pod="openstack/openstackclient" Dec 11 08:38:28 crc kubenswrapper[4881]: I1211 08:38:28.359316 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/5e8c400d-4f6c-4e16-8ed3-45d19b56a0bf-openstack-config-secret\") pod \"openstackclient\" (UID: \"5e8c400d-4f6c-4e16-8ed3-45d19b56a0bf\") " pod="openstack/openstackclient" Dec 11 08:38:28 crc kubenswrapper[4881]: I1211 08:38:28.359531 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5e8c400d-4f6c-4e16-8ed3-45d19b56a0bf-combined-ca-bundle\") pod \"openstackclient\" (UID: \"5e8c400d-4f6c-4e16-8ed3-45d19b56a0bf\") " pod="openstack/openstackclient" Dec 11 08:38:28 crc kubenswrapper[4881]: I1211 08:38:28.359582 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6n5gf\" (UniqueName: \"kubernetes.io/projected/5e8c400d-4f6c-4e16-8ed3-45d19b56a0bf-kube-api-access-6n5gf\") pod \"openstackclient\" (UID: \"5e8c400d-4f6c-4e16-8ed3-45d19b56a0bf\") " pod="openstack/openstackclient" Dec 11 08:38:28 crc kubenswrapper[4881]: I1211 08:38:28.360123 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/5e8c400d-4f6c-4e16-8ed3-45d19b56a0bf-openstack-config\") pod \"openstackclient\" (UID: \"5e8c400d-4f6c-4e16-8ed3-45d19b56a0bf\") " pod="openstack/openstackclient" Dec 11 08:38:28 crc kubenswrapper[4881]: I1211 08:38:28.365973 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5e8c400d-4f6c-4e16-8ed3-45d19b56a0bf-combined-ca-bundle\") pod \"openstackclient\" (UID: \"5e8c400d-4f6c-4e16-8ed3-45d19b56a0bf\") " pod="openstack/openstackclient" Dec 11 08:38:28 crc kubenswrapper[4881]: I1211 08:38:28.365977 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/5e8c400d-4f6c-4e16-8ed3-45d19b56a0bf-openstack-config-secret\") pod \"openstackclient\" (UID: \"5e8c400d-4f6c-4e16-8ed3-45d19b56a0bf\") " pod="openstack/openstackclient" Dec 11 08:38:28 crc kubenswrapper[4881]: I1211 08:38:28.376779 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6n5gf\" (UniqueName: \"kubernetes.io/projected/5e8c400d-4f6c-4e16-8ed3-45d19b56a0bf-kube-api-access-6n5gf\") pod \"openstackclient\" (UID: \"5e8c400d-4f6c-4e16-8ed3-45d19b56a0bf\") " pod="openstack/openstackclient" Dec 11 08:38:28 crc kubenswrapper[4881]: I1211 08:38:28.381841 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-67b8f6bb8b-gk4v6" Dec 11 08:38:28 crc kubenswrapper[4881]: I1211 08:38:28.549155 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Dec 11 08:38:28 crc kubenswrapper[4881]: I1211 08:38:28.927966 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Dec 11 08:38:28 crc kubenswrapper[4881]: I1211 08:38:28.931323 4881 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openstack/openstackclient" oldPodUID="b9e508c6-cbef-4749-adb9-eee328d49eec" podUID="5e8c400d-4f6c-4e16-8ed3-45d19b56a0bf" Dec 11 08:38:28 crc kubenswrapper[4881]: I1211 08:38:28.950590 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Dec 11 08:38:29 crc kubenswrapper[4881]: I1211 08:38:29.074791 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/b9e508c6-cbef-4749-adb9-eee328d49eec-openstack-config-secret\") pod \"b9e508c6-cbef-4749-adb9-eee328d49eec\" (UID: \"b9e508c6-cbef-4749-adb9-eee328d49eec\") " Dec 11 08:38:29 crc kubenswrapper[4881]: I1211 08:38:29.074863 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b9e508c6-cbef-4749-adb9-eee328d49eec-combined-ca-bundle\") pod \"b9e508c6-cbef-4749-adb9-eee328d49eec\" (UID: \"b9e508c6-cbef-4749-adb9-eee328d49eec\") " Dec 11 08:38:29 crc kubenswrapper[4881]: I1211 08:38:29.075234 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zbnww\" (UniqueName: \"kubernetes.io/projected/b9e508c6-cbef-4749-adb9-eee328d49eec-kube-api-access-zbnww\") pod \"b9e508c6-cbef-4749-adb9-eee328d49eec\" (UID: \"b9e508c6-cbef-4749-adb9-eee328d49eec\") " Dec 11 08:38:29 crc kubenswrapper[4881]: I1211 08:38:29.075286 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/b9e508c6-cbef-4749-adb9-eee328d49eec-openstack-config\") pod \"b9e508c6-cbef-4749-adb9-eee328d49eec\" (UID: \"b9e508c6-cbef-4749-adb9-eee328d49eec\") " Dec 11 08:38:29 crc kubenswrapper[4881]: I1211 08:38:29.075935 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b9e508c6-cbef-4749-adb9-eee328d49eec-openstack-config" (OuterVolumeSpecName: "openstack-config") pod "b9e508c6-cbef-4749-adb9-eee328d49eec" (UID: "b9e508c6-cbef-4749-adb9-eee328d49eec"). InnerVolumeSpecName "openstack-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:38:29 crc kubenswrapper[4881]: I1211 08:38:29.087892 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Dec 11 08:38:29 crc kubenswrapper[4881]: I1211 08:38:29.090410 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b9e508c6-cbef-4749-adb9-eee328d49eec-openstack-config-secret" (OuterVolumeSpecName: "openstack-config-secret") pod "b9e508c6-cbef-4749-adb9-eee328d49eec" (UID: "b9e508c6-cbef-4749-adb9-eee328d49eec"). InnerVolumeSpecName "openstack-config-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:38:29 crc kubenswrapper[4881]: I1211 08:38:29.090470 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b9e508c6-cbef-4749-adb9-eee328d49eec-kube-api-access-zbnww" (OuterVolumeSpecName: "kube-api-access-zbnww") pod "b9e508c6-cbef-4749-adb9-eee328d49eec" (UID: "b9e508c6-cbef-4749-adb9-eee328d49eec"). InnerVolumeSpecName "kube-api-access-zbnww". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:38:29 crc kubenswrapper[4881]: I1211 08:38:29.090506 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b9e508c6-cbef-4749-adb9-eee328d49eec-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b9e508c6-cbef-4749-adb9-eee328d49eec" (UID: "b9e508c6-cbef-4749-adb9-eee328d49eec"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:38:29 crc kubenswrapper[4881]: I1211 08:38:29.177498 4881 reconciler_common.go:293] "Volume detached for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/b9e508c6-cbef-4749-adb9-eee328d49eec-openstack-config\") on node \"crc\" DevicePath \"\"" Dec 11 08:38:29 crc kubenswrapper[4881]: I1211 08:38:29.177533 4881 reconciler_common.go:293] "Volume detached for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/b9e508c6-cbef-4749-adb9-eee328d49eec-openstack-config-secret\") on node \"crc\" DevicePath \"\"" Dec 11 08:38:29 crc kubenswrapper[4881]: I1211 08:38:29.177546 4881 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b9e508c6-cbef-4749-adb9-eee328d49eec-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 11 08:38:29 crc kubenswrapper[4881]: I1211 08:38:29.177555 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zbnww\" (UniqueName: \"kubernetes.io/projected/b9e508c6-cbef-4749-adb9-eee328d49eec-kube-api-access-zbnww\") on node \"crc\" DevicePath \"\"" Dec 11 08:38:29 crc kubenswrapper[4881]: I1211 08:38:29.414658 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-bcjrt" Dec 11 08:38:29 crc kubenswrapper[4881]: I1211 08:38:29.496015 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cc1ec075-9e84-4cfd-9f5a-b29d5af0d610-scripts\") pod \"cc1ec075-9e84-4cfd-9f5a-b29d5af0d610\" (UID: \"cc1ec075-9e84-4cfd-9f5a-b29d5af0d610\") " Dec 11 08:38:29 crc kubenswrapper[4881]: I1211 08:38:29.496390 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cc1ec075-9e84-4cfd-9f5a-b29d5af0d610-combined-ca-bundle\") pod \"cc1ec075-9e84-4cfd-9f5a-b29d5af0d610\" (UID: \"cc1ec075-9e84-4cfd-9f5a-b29d5af0d610\") " Dec 11 08:38:29 crc kubenswrapper[4881]: I1211 08:38:29.496544 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-c7s5v\" (UniqueName: \"kubernetes.io/projected/cc1ec075-9e84-4cfd-9f5a-b29d5af0d610-kube-api-access-c7s5v\") pod \"cc1ec075-9e84-4cfd-9f5a-b29d5af0d610\" (UID: \"cc1ec075-9e84-4cfd-9f5a-b29d5af0d610\") " Dec 11 08:38:29 crc kubenswrapper[4881]: I1211 08:38:29.496581 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/cc1ec075-9e84-4cfd-9f5a-b29d5af0d610-etc-machine-id\") pod \"cc1ec075-9e84-4cfd-9f5a-b29d5af0d610\" (UID: \"cc1ec075-9e84-4cfd-9f5a-b29d5af0d610\") " Dec 11 08:38:29 crc kubenswrapper[4881]: I1211 08:38:29.496685 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/cc1ec075-9e84-4cfd-9f5a-b29d5af0d610-db-sync-config-data\") pod \"cc1ec075-9e84-4cfd-9f5a-b29d5af0d610\" (UID: \"cc1ec075-9e84-4cfd-9f5a-b29d5af0d610\") " Dec 11 08:38:29 crc kubenswrapper[4881]: I1211 08:38:29.496752 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cc1ec075-9e84-4cfd-9f5a-b29d5af0d610-config-data\") pod \"cc1ec075-9e84-4cfd-9f5a-b29d5af0d610\" (UID: \"cc1ec075-9e84-4cfd-9f5a-b29d5af0d610\") " Dec 11 08:38:29 crc kubenswrapper[4881]: I1211 08:38:29.496884 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/cc1ec075-9e84-4cfd-9f5a-b29d5af0d610-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "cc1ec075-9e84-4cfd-9f5a-b29d5af0d610" (UID: "cc1ec075-9e84-4cfd-9f5a-b29d5af0d610"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 11 08:38:29 crc kubenswrapper[4881]: I1211 08:38:29.497875 4881 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/cc1ec075-9e84-4cfd-9f5a-b29d5af0d610-etc-machine-id\") on node \"crc\" DevicePath \"\"" Dec 11 08:38:29 crc kubenswrapper[4881]: I1211 08:38:29.502304 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cc1ec075-9e84-4cfd-9f5a-b29d5af0d610-kube-api-access-c7s5v" (OuterVolumeSpecName: "kube-api-access-c7s5v") pod "cc1ec075-9e84-4cfd-9f5a-b29d5af0d610" (UID: "cc1ec075-9e84-4cfd-9f5a-b29d5af0d610"). InnerVolumeSpecName "kube-api-access-c7s5v". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:38:29 crc kubenswrapper[4881]: I1211 08:38:29.502607 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cc1ec075-9e84-4cfd-9f5a-b29d5af0d610-scripts" (OuterVolumeSpecName: "scripts") pod "cc1ec075-9e84-4cfd-9f5a-b29d5af0d610" (UID: "cc1ec075-9e84-4cfd-9f5a-b29d5af0d610"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:38:29 crc kubenswrapper[4881]: I1211 08:38:29.503079 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cc1ec075-9e84-4cfd-9f5a-b29d5af0d610-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "cc1ec075-9e84-4cfd-9f5a-b29d5af0d610" (UID: "cc1ec075-9e84-4cfd-9f5a-b29d5af0d610"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:38:29 crc kubenswrapper[4881]: I1211 08:38:29.537425 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cc1ec075-9e84-4cfd-9f5a-b29d5af0d610-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "cc1ec075-9e84-4cfd-9f5a-b29d5af0d610" (UID: "cc1ec075-9e84-4cfd-9f5a-b29d5af0d610"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:38:29 crc kubenswrapper[4881]: I1211 08:38:29.577668 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cc1ec075-9e84-4cfd-9f5a-b29d5af0d610-config-data" (OuterVolumeSpecName: "config-data") pod "cc1ec075-9e84-4cfd-9f5a-b29d5af0d610" (UID: "cc1ec075-9e84-4cfd-9f5a-b29d5af0d610"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:38:29 crc kubenswrapper[4881]: I1211 08:38:29.608395 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-c7s5v\" (UniqueName: \"kubernetes.io/projected/cc1ec075-9e84-4cfd-9f5a-b29d5af0d610-kube-api-access-c7s5v\") on node \"crc\" DevicePath \"\"" Dec 11 08:38:29 crc kubenswrapper[4881]: I1211 08:38:29.608435 4881 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/cc1ec075-9e84-4cfd-9f5a-b29d5af0d610-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Dec 11 08:38:29 crc kubenswrapper[4881]: I1211 08:38:29.608447 4881 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cc1ec075-9e84-4cfd-9f5a-b29d5af0d610-config-data\") on node \"crc\" DevicePath \"\"" Dec 11 08:38:29 crc kubenswrapper[4881]: I1211 08:38:29.608459 4881 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cc1ec075-9e84-4cfd-9f5a-b29d5af0d610-scripts\") on node \"crc\" DevicePath \"\"" Dec 11 08:38:29 crc kubenswrapper[4881]: I1211 08:38:29.608475 4881 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cc1ec075-9e84-4cfd-9f5a-b29d5af0d610-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 11 08:38:29 crc kubenswrapper[4881]: I1211 08:38:29.942385 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"5e8c400d-4f6c-4e16-8ed3-45d19b56a0bf","Type":"ContainerStarted","Data":"b34df0c19a78c32baf319c27e2571ff6d301542081881755ff0e30ff48055b34"} Dec 11 08:38:29 crc kubenswrapper[4881]: I1211 08:38:29.944737 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Dec 11 08:38:29 crc kubenswrapper[4881]: I1211 08:38:29.944758 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-bcjrt" Dec 11 08:38:29 crc kubenswrapper[4881]: I1211 08:38:29.944732 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-bcjrt" event={"ID":"cc1ec075-9e84-4cfd-9f5a-b29d5af0d610","Type":"ContainerDied","Data":"5a7e5ca3250301990b5064d6d2a24967ea49f54786fa1b754c2659ead514f72f"} Dec 11 08:38:29 crc kubenswrapper[4881]: I1211 08:38:29.944881 4881 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5a7e5ca3250301990b5064d6d2a24967ea49f54786fa1b754c2659ead514f72f" Dec 11 08:38:29 crc kubenswrapper[4881]: I1211 08:38:29.960979 4881 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openstack/openstackclient" oldPodUID="b9e508c6-cbef-4749-adb9-eee328d49eec" podUID="5e8c400d-4f6c-4e16-8ed3-45d19b56a0bf" Dec 11 08:38:29 crc kubenswrapper[4881]: I1211 08:38:29.993268 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-67b8f6bb8b-gk4v6" Dec 11 08:38:30 crc kubenswrapper[4881]: I1211 08:38:30.064864 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-78cc5875b6-mr4tb"] Dec 11 08:38:30 crc kubenswrapper[4881]: I1211 08:38:30.065209 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-78cc5875b6-mr4tb" podUID="47f7a1b2-62e4-4963-b09c-bed44acc3c4a" containerName="barbican-api-log" containerID="cri-o://6bc1a39d2a37c39394f31cd46fb1f773b6cedde4eec1c84f1f3c870e6caeb7d1" gracePeriod=30 Dec 11 08:38:30 crc kubenswrapper[4881]: I1211 08:38:30.068922 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-78cc5875b6-mr4tb" podUID="47f7a1b2-62e4-4963-b09c-bed44acc3c4a" containerName="barbican-api" containerID="cri-o://4619f9566660bb1526549a3b2acad0cfae04db286cf9f43cf27cce2b2aa931f7" gracePeriod=30 Dec 11 08:38:30 crc kubenswrapper[4881]: I1211 08:38:30.302698 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-scheduler-0"] Dec 11 08:38:30 crc kubenswrapper[4881]: E1211 08:38:30.303492 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cc1ec075-9e84-4cfd-9f5a-b29d5af0d610" containerName="cinder-db-sync" Dec 11 08:38:30 crc kubenswrapper[4881]: I1211 08:38:30.303512 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="cc1ec075-9e84-4cfd-9f5a-b29d5af0d610" containerName="cinder-db-sync" Dec 11 08:38:30 crc kubenswrapper[4881]: I1211 08:38:30.303747 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="cc1ec075-9e84-4cfd-9f5a-b29d5af0d610" containerName="cinder-db-sync" Dec 11 08:38:30 crc kubenswrapper[4881]: I1211 08:38:30.305094 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Dec 11 08:38:30 crc kubenswrapper[4881]: I1211 08:38:30.311585 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-cinder-dockercfg-q8l6s" Dec 11 08:38:30 crc kubenswrapper[4881]: I1211 08:38:30.311792 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scheduler-config-data" Dec 11 08:38:30 crc kubenswrapper[4881]: I1211 08:38:30.311891 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-config-data" Dec 11 08:38:30 crc kubenswrapper[4881]: I1211 08:38:30.312053 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scripts" Dec 11 08:38:30 crc kubenswrapper[4881]: I1211 08:38:30.348408 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Dec 11 08:38:30 crc kubenswrapper[4881]: I1211 08:38:30.369905 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5784cf869f-2hhvl"] Dec 11 08:38:30 crc kubenswrapper[4881]: I1211 08:38:30.372516 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5784cf869f-2hhvl" Dec 11 08:38:30 crc kubenswrapper[4881]: I1211 08:38:30.415726 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5784cf869f-2hhvl"] Dec 11 08:38:30 crc kubenswrapper[4881]: I1211 08:38:30.435819 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4vrct\" (UniqueName: \"kubernetes.io/projected/a982ebde-b33a-4397-bd71-34da9b1efc2e-kube-api-access-4vrct\") pod \"cinder-scheduler-0\" (UID: \"a982ebde-b33a-4397-bd71-34da9b1efc2e\") " pod="openstack/cinder-scheduler-0" Dec 11 08:38:30 crc kubenswrapper[4881]: I1211 08:38:30.435894 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j78wt\" (UniqueName: \"kubernetes.io/projected/efb1e391-d2a3-45e1-b5c8-9075a353bda5-kube-api-access-j78wt\") pod \"dnsmasq-dns-5784cf869f-2hhvl\" (UID: \"efb1e391-d2a3-45e1-b5c8-9075a353bda5\") " pod="openstack/dnsmasq-dns-5784cf869f-2hhvl" Dec 11 08:38:30 crc kubenswrapper[4881]: I1211 08:38:30.435944 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/efb1e391-d2a3-45e1-b5c8-9075a353bda5-ovsdbserver-sb\") pod \"dnsmasq-dns-5784cf869f-2hhvl\" (UID: \"efb1e391-d2a3-45e1-b5c8-9075a353bda5\") " pod="openstack/dnsmasq-dns-5784cf869f-2hhvl" Dec 11 08:38:30 crc kubenswrapper[4881]: I1211 08:38:30.435986 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a982ebde-b33a-4397-bd71-34da9b1efc2e-config-data\") pod \"cinder-scheduler-0\" (UID: \"a982ebde-b33a-4397-bd71-34da9b1efc2e\") " pod="openstack/cinder-scheduler-0" Dec 11 08:38:30 crc kubenswrapper[4881]: I1211 08:38:30.436033 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a982ebde-b33a-4397-bd71-34da9b1efc2e-scripts\") pod \"cinder-scheduler-0\" (UID: \"a982ebde-b33a-4397-bd71-34da9b1efc2e\") " pod="openstack/cinder-scheduler-0" Dec 11 08:38:30 crc kubenswrapper[4881]: I1211 08:38:30.436093 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a982ebde-b33a-4397-bd71-34da9b1efc2e-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"a982ebde-b33a-4397-bd71-34da9b1efc2e\") " pod="openstack/cinder-scheduler-0" Dec 11 08:38:30 crc kubenswrapper[4881]: I1211 08:38:30.436154 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/efb1e391-d2a3-45e1-b5c8-9075a353bda5-config\") pod \"dnsmasq-dns-5784cf869f-2hhvl\" (UID: \"efb1e391-d2a3-45e1-b5c8-9075a353bda5\") " pod="openstack/dnsmasq-dns-5784cf869f-2hhvl" Dec 11 08:38:30 crc kubenswrapper[4881]: I1211 08:38:30.436219 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a982ebde-b33a-4397-bd71-34da9b1efc2e-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"a982ebde-b33a-4397-bd71-34da9b1efc2e\") " pod="openstack/cinder-scheduler-0" Dec 11 08:38:30 crc kubenswrapper[4881]: I1211 08:38:30.436272 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/efb1e391-d2a3-45e1-b5c8-9075a353bda5-dns-swift-storage-0\") pod \"dnsmasq-dns-5784cf869f-2hhvl\" (UID: \"efb1e391-d2a3-45e1-b5c8-9075a353bda5\") " pod="openstack/dnsmasq-dns-5784cf869f-2hhvl" Dec 11 08:38:30 crc kubenswrapper[4881]: I1211 08:38:30.436317 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/a982ebde-b33a-4397-bd71-34da9b1efc2e-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"a982ebde-b33a-4397-bd71-34da9b1efc2e\") " pod="openstack/cinder-scheduler-0" Dec 11 08:38:30 crc kubenswrapper[4881]: I1211 08:38:30.437561 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/efb1e391-d2a3-45e1-b5c8-9075a353bda5-dns-svc\") pod \"dnsmasq-dns-5784cf869f-2hhvl\" (UID: \"efb1e391-d2a3-45e1-b5c8-9075a353bda5\") " pod="openstack/dnsmasq-dns-5784cf869f-2hhvl" Dec 11 08:38:30 crc kubenswrapper[4881]: I1211 08:38:30.437610 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/efb1e391-d2a3-45e1-b5c8-9075a353bda5-ovsdbserver-nb\") pod \"dnsmasq-dns-5784cf869f-2hhvl\" (UID: \"efb1e391-d2a3-45e1-b5c8-9075a353bda5\") " pod="openstack/dnsmasq-dns-5784cf869f-2hhvl" Dec 11 08:38:30 crc kubenswrapper[4881]: I1211 08:38:30.457416 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-api-0"] Dec 11 08:38:30 crc kubenswrapper[4881]: I1211 08:38:30.460102 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Dec 11 08:38:30 crc kubenswrapper[4881]: I1211 08:38:30.467456 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Dec 11 08:38:30 crc kubenswrapper[4881]: I1211 08:38:30.475313 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Dec 11 08:38:30 crc kubenswrapper[4881]: I1211 08:38:30.539634 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a982ebde-b33a-4397-bd71-34da9b1efc2e-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"a982ebde-b33a-4397-bd71-34da9b1efc2e\") " pod="openstack/cinder-scheduler-0" Dec 11 08:38:30 crc kubenswrapper[4881]: I1211 08:38:30.539712 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/93406a4d-f987-4191-acf2-4a5d1fa63457-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"93406a4d-f987-4191-acf2-4a5d1fa63457\") " pod="openstack/cinder-api-0" Dec 11 08:38:30 crc kubenswrapper[4881]: I1211 08:38:30.539758 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/efb1e391-d2a3-45e1-b5c8-9075a353bda5-dns-swift-storage-0\") pod \"dnsmasq-dns-5784cf869f-2hhvl\" (UID: \"efb1e391-d2a3-45e1-b5c8-9075a353bda5\") " pod="openstack/dnsmasq-dns-5784cf869f-2hhvl" Dec 11 08:38:30 crc kubenswrapper[4881]: I1211 08:38:30.539786 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/93406a4d-f987-4191-acf2-4a5d1fa63457-scripts\") pod \"cinder-api-0\" (UID: \"93406a4d-f987-4191-acf2-4a5d1fa63457\") " pod="openstack/cinder-api-0" Dec 11 08:38:30 crc kubenswrapper[4881]: I1211 08:38:30.539833 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/a982ebde-b33a-4397-bd71-34da9b1efc2e-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"a982ebde-b33a-4397-bd71-34da9b1efc2e\") " pod="openstack/cinder-scheduler-0" Dec 11 08:38:30 crc kubenswrapper[4881]: I1211 08:38:30.539857 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/93406a4d-f987-4191-acf2-4a5d1fa63457-config-data\") pod \"cinder-api-0\" (UID: \"93406a4d-f987-4191-acf2-4a5d1fa63457\") " pod="openstack/cinder-api-0" Dec 11 08:38:30 crc kubenswrapper[4881]: I1211 08:38:30.539908 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/efb1e391-d2a3-45e1-b5c8-9075a353bda5-dns-svc\") pod \"dnsmasq-dns-5784cf869f-2hhvl\" (UID: \"efb1e391-d2a3-45e1-b5c8-9075a353bda5\") " pod="openstack/dnsmasq-dns-5784cf869f-2hhvl" Dec 11 08:38:30 crc kubenswrapper[4881]: I1211 08:38:30.539940 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/efb1e391-d2a3-45e1-b5c8-9075a353bda5-ovsdbserver-nb\") pod \"dnsmasq-dns-5784cf869f-2hhvl\" (UID: \"efb1e391-d2a3-45e1-b5c8-9075a353bda5\") " pod="openstack/dnsmasq-dns-5784cf869f-2hhvl" Dec 11 08:38:30 crc kubenswrapper[4881]: I1211 08:38:30.540031 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4vrct\" (UniqueName: \"kubernetes.io/projected/a982ebde-b33a-4397-bd71-34da9b1efc2e-kube-api-access-4vrct\") pod \"cinder-scheduler-0\" (UID: \"a982ebde-b33a-4397-bd71-34da9b1efc2e\") " pod="openstack/cinder-scheduler-0" Dec 11 08:38:30 crc kubenswrapper[4881]: I1211 08:38:30.540071 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j78wt\" (UniqueName: \"kubernetes.io/projected/efb1e391-d2a3-45e1-b5c8-9075a353bda5-kube-api-access-j78wt\") pod \"dnsmasq-dns-5784cf869f-2hhvl\" (UID: \"efb1e391-d2a3-45e1-b5c8-9075a353bda5\") " pod="openstack/dnsmasq-dns-5784cf869f-2hhvl" Dec 11 08:38:30 crc kubenswrapper[4881]: I1211 08:38:30.540097 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j7m6l\" (UniqueName: \"kubernetes.io/projected/93406a4d-f987-4191-acf2-4a5d1fa63457-kube-api-access-j7m6l\") pod \"cinder-api-0\" (UID: \"93406a4d-f987-4191-acf2-4a5d1fa63457\") " pod="openstack/cinder-api-0" Dec 11 08:38:30 crc kubenswrapper[4881]: I1211 08:38:30.540132 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/93406a4d-f987-4191-acf2-4a5d1fa63457-etc-machine-id\") pod \"cinder-api-0\" (UID: \"93406a4d-f987-4191-acf2-4a5d1fa63457\") " pod="openstack/cinder-api-0" Dec 11 08:38:30 crc kubenswrapper[4881]: I1211 08:38:30.540159 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/efb1e391-d2a3-45e1-b5c8-9075a353bda5-ovsdbserver-sb\") pod \"dnsmasq-dns-5784cf869f-2hhvl\" (UID: \"efb1e391-d2a3-45e1-b5c8-9075a353bda5\") " pod="openstack/dnsmasq-dns-5784cf869f-2hhvl" Dec 11 08:38:30 crc kubenswrapper[4881]: I1211 08:38:30.540199 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a982ebde-b33a-4397-bd71-34da9b1efc2e-config-data\") pod \"cinder-scheduler-0\" (UID: \"a982ebde-b33a-4397-bd71-34da9b1efc2e\") " pod="openstack/cinder-scheduler-0" Dec 11 08:38:30 crc kubenswrapper[4881]: I1211 08:38:30.540240 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a982ebde-b33a-4397-bd71-34da9b1efc2e-scripts\") pod \"cinder-scheduler-0\" (UID: \"a982ebde-b33a-4397-bd71-34da9b1efc2e\") " pod="openstack/cinder-scheduler-0" Dec 11 08:38:30 crc kubenswrapper[4881]: I1211 08:38:30.540266 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/93406a4d-f987-4191-acf2-4a5d1fa63457-logs\") pod \"cinder-api-0\" (UID: \"93406a4d-f987-4191-acf2-4a5d1fa63457\") " pod="openstack/cinder-api-0" Dec 11 08:38:30 crc kubenswrapper[4881]: I1211 08:38:30.540303 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/93406a4d-f987-4191-acf2-4a5d1fa63457-config-data-custom\") pod \"cinder-api-0\" (UID: \"93406a4d-f987-4191-acf2-4a5d1fa63457\") " pod="openstack/cinder-api-0" Dec 11 08:38:30 crc kubenswrapper[4881]: I1211 08:38:30.540322 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a982ebde-b33a-4397-bd71-34da9b1efc2e-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"a982ebde-b33a-4397-bd71-34da9b1efc2e\") " pod="openstack/cinder-scheduler-0" Dec 11 08:38:30 crc kubenswrapper[4881]: I1211 08:38:30.540349 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/efb1e391-d2a3-45e1-b5c8-9075a353bda5-config\") pod \"dnsmasq-dns-5784cf869f-2hhvl\" (UID: \"efb1e391-d2a3-45e1-b5c8-9075a353bda5\") " pod="openstack/dnsmasq-dns-5784cf869f-2hhvl" Dec 11 08:38:30 crc kubenswrapper[4881]: I1211 08:38:30.541233 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/efb1e391-d2a3-45e1-b5c8-9075a353bda5-config\") pod \"dnsmasq-dns-5784cf869f-2hhvl\" (UID: \"efb1e391-d2a3-45e1-b5c8-9075a353bda5\") " pod="openstack/dnsmasq-dns-5784cf869f-2hhvl" Dec 11 08:38:30 crc kubenswrapper[4881]: I1211 08:38:30.542130 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/efb1e391-d2a3-45e1-b5c8-9075a353bda5-ovsdbserver-sb\") pod \"dnsmasq-dns-5784cf869f-2hhvl\" (UID: \"efb1e391-d2a3-45e1-b5c8-9075a353bda5\") " pod="openstack/dnsmasq-dns-5784cf869f-2hhvl" Dec 11 08:38:30 crc kubenswrapper[4881]: I1211 08:38:30.547816 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a982ebde-b33a-4397-bd71-34da9b1efc2e-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"a982ebde-b33a-4397-bd71-34da9b1efc2e\") " pod="openstack/cinder-scheduler-0" Dec 11 08:38:30 crc kubenswrapper[4881]: I1211 08:38:30.548557 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/efb1e391-d2a3-45e1-b5c8-9075a353bda5-dns-swift-storage-0\") pod \"dnsmasq-dns-5784cf869f-2hhvl\" (UID: \"efb1e391-d2a3-45e1-b5c8-9075a353bda5\") " pod="openstack/dnsmasq-dns-5784cf869f-2hhvl" Dec 11 08:38:30 crc kubenswrapper[4881]: I1211 08:38:30.548621 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/a982ebde-b33a-4397-bd71-34da9b1efc2e-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"a982ebde-b33a-4397-bd71-34da9b1efc2e\") " pod="openstack/cinder-scheduler-0" Dec 11 08:38:30 crc kubenswrapper[4881]: I1211 08:38:30.551136 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a982ebde-b33a-4397-bd71-34da9b1efc2e-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"a982ebde-b33a-4397-bd71-34da9b1efc2e\") " pod="openstack/cinder-scheduler-0" Dec 11 08:38:30 crc kubenswrapper[4881]: I1211 08:38:30.552778 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a982ebde-b33a-4397-bd71-34da9b1efc2e-scripts\") pod \"cinder-scheduler-0\" (UID: \"a982ebde-b33a-4397-bd71-34da9b1efc2e\") " pod="openstack/cinder-scheduler-0" Dec 11 08:38:30 crc kubenswrapper[4881]: I1211 08:38:30.554200 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/efb1e391-d2a3-45e1-b5c8-9075a353bda5-dns-svc\") pod \"dnsmasq-dns-5784cf869f-2hhvl\" (UID: \"efb1e391-d2a3-45e1-b5c8-9075a353bda5\") " pod="openstack/dnsmasq-dns-5784cf869f-2hhvl" Dec 11 08:38:30 crc kubenswrapper[4881]: I1211 08:38:30.555057 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/efb1e391-d2a3-45e1-b5c8-9075a353bda5-ovsdbserver-nb\") pod \"dnsmasq-dns-5784cf869f-2hhvl\" (UID: \"efb1e391-d2a3-45e1-b5c8-9075a353bda5\") " pod="openstack/dnsmasq-dns-5784cf869f-2hhvl" Dec 11 08:38:30 crc kubenswrapper[4881]: I1211 08:38:30.560313 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4vrct\" (UniqueName: \"kubernetes.io/projected/a982ebde-b33a-4397-bd71-34da9b1efc2e-kube-api-access-4vrct\") pod \"cinder-scheduler-0\" (UID: \"a982ebde-b33a-4397-bd71-34da9b1efc2e\") " pod="openstack/cinder-scheduler-0" Dec 11 08:38:30 crc kubenswrapper[4881]: I1211 08:38:30.561812 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a982ebde-b33a-4397-bd71-34da9b1efc2e-config-data\") pod \"cinder-scheduler-0\" (UID: \"a982ebde-b33a-4397-bd71-34da9b1efc2e\") " pod="openstack/cinder-scheduler-0" Dec 11 08:38:30 crc kubenswrapper[4881]: I1211 08:38:30.561956 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j78wt\" (UniqueName: \"kubernetes.io/projected/efb1e391-d2a3-45e1-b5c8-9075a353bda5-kube-api-access-j78wt\") pod \"dnsmasq-dns-5784cf869f-2hhvl\" (UID: \"efb1e391-d2a3-45e1-b5c8-9075a353bda5\") " pod="openstack/dnsmasq-dns-5784cf869f-2hhvl" Dec 11 08:38:30 crc kubenswrapper[4881]: I1211 08:38:30.642665 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j7m6l\" (UniqueName: \"kubernetes.io/projected/93406a4d-f987-4191-acf2-4a5d1fa63457-kube-api-access-j7m6l\") pod \"cinder-api-0\" (UID: \"93406a4d-f987-4191-acf2-4a5d1fa63457\") " pod="openstack/cinder-api-0" Dec 11 08:38:30 crc kubenswrapper[4881]: I1211 08:38:30.642738 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/93406a4d-f987-4191-acf2-4a5d1fa63457-etc-machine-id\") pod \"cinder-api-0\" (UID: \"93406a4d-f987-4191-acf2-4a5d1fa63457\") " pod="openstack/cinder-api-0" Dec 11 08:38:30 crc kubenswrapper[4881]: I1211 08:38:30.642801 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/93406a4d-f987-4191-acf2-4a5d1fa63457-logs\") pod \"cinder-api-0\" (UID: \"93406a4d-f987-4191-acf2-4a5d1fa63457\") " pod="openstack/cinder-api-0" Dec 11 08:38:30 crc kubenswrapper[4881]: I1211 08:38:30.642837 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/93406a4d-f987-4191-acf2-4a5d1fa63457-config-data-custom\") pod \"cinder-api-0\" (UID: \"93406a4d-f987-4191-acf2-4a5d1fa63457\") " pod="openstack/cinder-api-0" Dec 11 08:38:30 crc kubenswrapper[4881]: I1211 08:38:30.642912 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/93406a4d-f987-4191-acf2-4a5d1fa63457-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"93406a4d-f987-4191-acf2-4a5d1fa63457\") " pod="openstack/cinder-api-0" Dec 11 08:38:30 crc kubenswrapper[4881]: I1211 08:38:30.642956 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/93406a4d-f987-4191-acf2-4a5d1fa63457-scripts\") pod \"cinder-api-0\" (UID: \"93406a4d-f987-4191-acf2-4a5d1fa63457\") " pod="openstack/cinder-api-0" Dec 11 08:38:30 crc kubenswrapper[4881]: I1211 08:38:30.642981 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/93406a4d-f987-4191-acf2-4a5d1fa63457-config-data\") pod \"cinder-api-0\" (UID: \"93406a4d-f987-4191-acf2-4a5d1fa63457\") " pod="openstack/cinder-api-0" Dec 11 08:38:30 crc kubenswrapper[4881]: I1211 08:38:30.642986 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/93406a4d-f987-4191-acf2-4a5d1fa63457-etc-machine-id\") pod \"cinder-api-0\" (UID: \"93406a4d-f987-4191-acf2-4a5d1fa63457\") " pod="openstack/cinder-api-0" Dec 11 08:38:30 crc kubenswrapper[4881]: I1211 08:38:30.643769 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/93406a4d-f987-4191-acf2-4a5d1fa63457-logs\") pod \"cinder-api-0\" (UID: \"93406a4d-f987-4191-acf2-4a5d1fa63457\") " pod="openstack/cinder-api-0" Dec 11 08:38:30 crc kubenswrapper[4881]: I1211 08:38:30.647281 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/93406a4d-f987-4191-acf2-4a5d1fa63457-scripts\") pod \"cinder-api-0\" (UID: \"93406a4d-f987-4191-acf2-4a5d1fa63457\") " pod="openstack/cinder-api-0" Dec 11 08:38:30 crc kubenswrapper[4881]: I1211 08:38:30.648059 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/93406a4d-f987-4191-acf2-4a5d1fa63457-config-data\") pod \"cinder-api-0\" (UID: \"93406a4d-f987-4191-acf2-4a5d1fa63457\") " pod="openstack/cinder-api-0" Dec 11 08:38:30 crc kubenswrapper[4881]: I1211 08:38:30.648515 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/93406a4d-f987-4191-acf2-4a5d1fa63457-config-data-custom\") pod \"cinder-api-0\" (UID: \"93406a4d-f987-4191-acf2-4a5d1fa63457\") " pod="openstack/cinder-api-0" Dec 11 08:38:30 crc kubenswrapper[4881]: I1211 08:38:30.657896 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Dec 11 08:38:30 crc kubenswrapper[4881]: I1211 08:38:30.660268 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/93406a4d-f987-4191-acf2-4a5d1fa63457-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"93406a4d-f987-4191-acf2-4a5d1fa63457\") " pod="openstack/cinder-api-0" Dec 11 08:38:30 crc kubenswrapper[4881]: I1211 08:38:30.662388 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j7m6l\" (UniqueName: \"kubernetes.io/projected/93406a4d-f987-4191-acf2-4a5d1fa63457-kube-api-access-j7m6l\") pod \"cinder-api-0\" (UID: \"93406a4d-f987-4191-acf2-4a5d1fa63457\") " pod="openstack/cinder-api-0" Dec 11 08:38:30 crc kubenswrapper[4881]: I1211 08:38:30.709922 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5784cf869f-2hhvl" Dec 11 08:38:30 crc kubenswrapper[4881]: I1211 08:38:30.801363 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Dec 11 08:38:30 crc kubenswrapper[4881]: I1211 08:38:30.987647 4881 generic.go:334] "Generic (PLEG): container finished" podID="47f7a1b2-62e4-4963-b09c-bed44acc3c4a" containerID="6bc1a39d2a37c39394f31cd46fb1f773b6cedde4eec1c84f1f3c870e6caeb7d1" exitCode=143 Dec 11 08:38:30 crc kubenswrapper[4881]: I1211 08:38:30.987929 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-78cc5875b6-mr4tb" event={"ID":"47f7a1b2-62e4-4963-b09c-bed44acc3c4a","Type":"ContainerDied","Data":"6bc1a39d2a37c39394f31cd46fb1f773b6cedde4eec1c84f1f3c870e6caeb7d1"} Dec 11 08:38:31 crc kubenswrapper[4881]: I1211 08:38:31.027457 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b9e508c6-cbef-4749-adb9-eee328d49eec" path="/var/lib/kubelet/pods/b9e508c6-cbef-4749-adb9-eee328d49eec/volumes" Dec 11 08:38:31 crc kubenswrapper[4881]: I1211 08:38:31.193232 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Dec 11 08:38:31 crc kubenswrapper[4881]: I1211 08:38:31.468661 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5784cf869f-2hhvl"] Dec 11 08:38:31 crc kubenswrapper[4881]: W1211 08:38:31.623253 4881 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod93406a4d_f987_4191_acf2_4a5d1fa63457.slice/crio-4d036f1fda508ba696b0af12f1ae20c568c9e22d857550f433803d708c1a926f WatchSource:0}: Error finding container 4d036f1fda508ba696b0af12f1ae20c568c9e22d857550f433803d708c1a926f: Status 404 returned error can't find the container with id 4d036f1fda508ba696b0af12f1ae20c568c9e22d857550f433803d708c1a926f Dec 11 08:38:31 crc kubenswrapper[4881]: I1211 08:38:31.626931 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Dec 11 08:38:32 crc kubenswrapper[4881]: I1211 08:38:32.011397 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"93406a4d-f987-4191-acf2-4a5d1fa63457","Type":"ContainerStarted","Data":"4d036f1fda508ba696b0af12f1ae20c568c9e22d857550f433803d708c1a926f"} Dec 11 08:38:32 crc kubenswrapper[4881]: I1211 08:38:32.015812 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5784cf869f-2hhvl" event={"ID":"efb1e391-d2a3-45e1-b5c8-9075a353bda5","Type":"ContainerStarted","Data":"85ee7ec7a41d6480a753746cfb3de6b63db9ecd0e50ec8638cd291f07e41ee5a"} Dec 11 08:38:32 crc kubenswrapper[4881]: I1211 08:38:32.030694 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"a982ebde-b33a-4397-bd71-34da9b1efc2e","Type":"ContainerStarted","Data":"949cb76d57d3383325a18e901b0307d86ea8f5c4d3187cec8e33756ef6b5c231"} Dec 11 08:38:32 crc kubenswrapper[4881]: I1211 08:38:32.916890 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Dec 11 08:38:33 crc kubenswrapper[4881]: I1211 08:38:33.288573 4881 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-78cc5875b6-mr4tb" podUID="47f7a1b2-62e4-4963-b09c-bed44acc3c4a" containerName="barbican-api-log" probeResult="failure" output="Get \"http://10.217.0.196:9311/healthcheck\": read tcp 10.217.0.2:55586->10.217.0.196:9311: read: connection reset by peer" Dec 11 08:38:33 crc kubenswrapper[4881]: I1211 08:38:33.288589 4881 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-78cc5875b6-mr4tb" podUID="47f7a1b2-62e4-4963-b09c-bed44acc3c4a" containerName="barbican-api" probeResult="failure" output="Get \"http://10.217.0.196:9311/healthcheck\": read tcp 10.217.0.2:55570->10.217.0.196:9311: read: connection reset by peer" Dec 11 08:38:34 crc kubenswrapper[4881]: I1211 08:38:34.090704 4881 generic.go:334] "Generic (PLEG): container finished" podID="efb1e391-d2a3-45e1-b5c8-9075a353bda5" containerID="4848573dfa50cbcfe9d09f59e39f1d3bb7ac710e7cc19adef0b3bf6d1aaf2e77" exitCode=0 Dec 11 08:38:34 crc kubenswrapper[4881]: I1211 08:38:34.091016 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5784cf869f-2hhvl" event={"ID":"efb1e391-d2a3-45e1-b5c8-9075a353bda5","Type":"ContainerDied","Data":"4848573dfa50cbcfe9d09f59e39f1d3bb7ac710e7cc19adef0b3bf6d1aaf2e77"} Dec 11 08:38:34 crc kubenswrapper[4881]: I1211 08:38:34.101734 4881 generic.go:334] "Generic (PLEG): container finished" podID="47f7a1b2-62e4-4963-b09c-bed44acc3c4a" containerID="4619f9566660bb1526549a3b2acad0cfae04db286cf9f43cf27cce2b2aa931f7" exitCode=0 Dec 11 08:38:34 crc kubenswrapper[4881]: I1211 08:38:34.101798 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-78cc5875b6-mr4tb" event={"ID":"47f7a1b2-62e4-4963-b09c-bed44acc3c4a","Type":"ContainerDied","Data":"4619f9566660bb1526549a3b2acad0cfae04db286cf9f43cf27cce2b2aa931f7"} Dec 11 08:38:34 crc kubenswrapper[4881]: I1211 08:38:34.956428 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-engine-557bc7cb47-8hntq"] Dec 11 08:38:34 crc kubenswrapper[4881]: I1211 08:38:34.957846 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-engine-557bc7cb47-8hntq" Dec 11 08:38:34 crc kubenswrapper[4881]: I1211 08:38:34.966020 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-engine-config-data" Dec 11 08:38:34 crc kubenswrapper[4881]: I1211 08:38:34.966326 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-heat-dockercfg-gpnxr" Dec 11 08:38:34 crc kubenswrapper[4881]: I1211 08:38:34.966570 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-config-data" Dec 11 08:38:34 crc kubenswrapper[4881]: I1211 08:38:34.971697 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-engine-557bc7cb47-8hntq"] Dec 11 08:38:35 crc kubenswrapper[4881]: I1211 08:38:35.086626 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/41f75170-bc89-4911-a5f3-5456d3512897-config-data-custom\") pod \"heat-engine-557bc7cb47-8hntq\" (UID: \"41f75170-bc89-4911-a5f3-5456d3512897\") " pod="openstack/heat-engine-557bc7cb47-8hntq" Dec 11 08:38:35 crc kubenswrapper[4881]: I1211 08:38:35.086919 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/41f75170-bc89-4911-a5f3-5456d3512897-config-data\") pod \"heat-engine-557bc7cb47-8hntq\" (UID: \"41f75170-bc89-4911-a5f3-5456d3512897\") " pod="openstack/heat-engine-557bc7cb47-8hntq" Dec 11 08:38:35 crc kubenswrapper[4881]: I1211 08:38:35.087014 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/41f75170-bc89-4911-a5f3-5456d3512897-combined-ca-bundle\") pod \"heat-engine-557bc7cb47-8hntq\" (UID: \"41f75170-bc89-4911-a5f3-5456d3512897\") " pod="openstack/heat-engine-557bc7cb47-8hntq" Dec 11 08:38:35 crc kubenswrapper[4881]: I1211 08:38:35.087041 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-45brw\" (UniqueName: \"kubernetes.io/projected/41f75170-bc89-4911-a5f3-5456d3512897-kube-api-access-45brw\") pod \"heat-engine-557bc7cb47-8hntq\" (UID: \"41f75170-bc89-4911-a5f3-5456d3512897\") " pod="openstack/heat-engine-557bc7cb47-8hntq" Dec 11 08:38:35 crc kubenswrapper[4881]: I1211 08:38:35.088138 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-cfnapi-86c494669f-2l5s8"] Dec 11 08:38:35 crc kubenswrapper[4881]: I1211 08:38:35.096729 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-86c494669f-2l5s8" Dec 11 08:38:35 crc kubenswrapper[4881]: I1211 08:38:35.107519 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-cfnapi-config-data" Dec 11 08:38:35 crc kubenswrapper[4881]: I1211 08:38:35.151630 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5784cf869f-2hhvl"] Dec 11 08:38:35 crc kubenswrapper[4881]: I1211 08:38:35.185458 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-cfnapi-86c494669f-2l5s8"] Dec 11 08:38:35 crc kubenswrapper[4881]: I1211 08:38:35.189618 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/41f75170-bc89-4911-a5f3-5456d3512897-config-data\") pod \"heat-engine-557bc7cb47-8hntq\" (UID: \"41f75170-bc89-4911-a5f3-5456d3512897\") " pod="openstack/heat-engine-557bc7cb47-8hntq" Dec 11 08:38:35 crc kubenswrapper[4881]: I1211 08:38:35.189679 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bkzqs\" (UniqueName: \"kubernetes.io/projected/773654f0-c183-4962-a942-9fc33e9f44a7-kube-api-access-bkzqs\") pod \"heat-cfnapi-86c494669f-2l5s8\" (UID: \"773654f0-c183-4962-a942-9fc33e9f44a7\") " pod="openstack/heat-cfnapi-86c494669f-2l5s8" Dec 11 08:38:35 crc kubenswrapper[4881]: I1211 08:38:35.189720 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/773654f0-c183-4962-a942-9fc33e9f44a7-combined-ca-bundle\") pod \"heat-cfnapi-86c494669f-2l5s8\" (UID: \"773654f0-c183-4962-a942-9fc33e9f44a7\") " pod="openstack/heat-cfnapi-86c494669f-2l5s8" Dec 11 08:38:35 crc kubenswrapper[4881]: I1211 08:38:35.189742 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/41f75170-bc89-4911-a5f3-5456d3512897-combined-ca-bundle\") pod \"heat-engine-557bc7cb47-8hntq\" (UID: \"41f75170-bc89-4911-a5f3-5456d3512897\") " pod="openstack/heat-engine-557bc7cb47-8hntq" Dec 11 08:38:35 crc kubenswrapper[4881]: I1211 08:38:35.189767 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/773654f0-c183-4962-a942-9fc33e9f44a7-config-data\") pod \"heat-cfnapi-86c494669f-2l5s8\" (UID: \"773654f0-c183-4962-a942-9fc33e9f44a7\") " pod="openstack/heat-cfnapi-86c494669f-2l5s8" Dec 11 08:38:35 crc kubenswrapper[4881]: I1211 08:38:35.189791 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-45brw\" (UniqueName: \"kubernetes.io/projected/41f75170-bc89-4911-a5f3-5456d3512897-kube-api-access-45brw\") pod \"heat-engine-557bc7cb47-8hntq\" (UID: \"41f75170-bc89-4911-a5f3-5456d3512897\") " pod="openstack/heat-engine-557bc7cb47-8hntq" Dec 11 08:38:35 crc kubenswrapper[4881]: I1211 08:38:35.189861 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/41f75170-bc89-4911-a5f3-5456d3512897-config-data-custom\") pod \"heat-engine-557bc7cb47-8hntq\" (UID: \"41f75170-bc89-4911-a5f3-5456d3512897\") " pod="openstack/heat-engine-557bc7cb47-8hntq" Dec 11 08:38:35 crc kubenswrapper[4881]: I1211 08:38:35.189952 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/773654f0-c183-4962-a942-9fc33e9f44a7-config-data-custom\") pod \"heat-cfnapi-86c494669f-2l5s8\" (UID: \"773654f0-c183-4962-a942-9fc33e9f44a7\") " pod="openstack/heat-cfnapi-86c494669f-2l5s8" Dec 11 08:38:35 crc kubenswrapper[4881]: I1211 08:38:35.231515 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/41f75170-bc89-4911-a5f3-5456d3512897-combined-ca-bundle\") pod \"heat-engine-557bc7cb47-8hntq\" (UID: \"41f75170-bc89-4911-a5f3-5456d3512897\") " pod="openstack/heat-engine-557bc7cb47-8hntq" Dec 11 08:38:35 crc kubenswrapper[4881]: I1211 08:38:35.248809 4881 generic.go:334] "Generic (PLEG): container finished" podID="9cac4a82-5425-4f3a-bf86-26d9099432e3" containerID="989f77dfba60f8b047748455ff61be09fdb5fecc1425754e7501917b9b888581" exitCode=0 Dec 11 08:38:35 crc kubenswrapper[4881]: I1211 08:38:35.248949 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5ccbc79968-kjfhq" event={"ID":"9cac4a82-5425-4f3a-bf86-26d9099432e3","Type":"ContainerDied","Data":"989f77dfba60f8b047748455ff61be09fdb5fecc1425754e7501917b9b888581"} Dec 11 08:38:35 crc kubenswrapper[4881]: I1211 08:38:35.251948 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-45brw\" (UniqueName: \"kubernetes.io/projected/41f75170-bc89-4911-a5f3-5456d3512897-kube-api-access-45brw\") pod \"heat-engine-557bc7cb47-8hntq\" (UID: \"41f75170-bc89-4911-a5f3-5456d3512897\") " pod="openstack/heat-engine-557bc7cb47-8hntq" Dec 11 08:38:35 crc kubenswrapper[4881]: I1211 08:38:35.253553 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/41f75170-bc89-4911-a5f3-5456d3512897-config-data-custom\") pod \"heat-engine-557bc7cb47-8hntq\" (UID: \"41f75170-bc89-4911-a5f3-5456d3512897\") " pod="openstack/heat-engine-557bc7cb47-8hntq" Dec 11 08:38:35 crc kubenswrapper[4881]: I1211 08:38:35.254748 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"93406a4d-f987-4191-acf2-4a5d1fa63457","Type":"ContainerStarted","Data":"32f970ae0914dac738470b0ee696139fc2cd4a575cff1e87f454798e3115a6ee"} Dec 11 08:38:35 crc kubenswrapper[4881]: I1211 08:38:35.266997 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5784cf869f-2hhvl" event={"ID":"efb1e391-d2a3-45e1-b5c8-9075a353bda5","Type":"ContainerStarted","Data":"75cb17e2a2180f1327dcf0e8db156c8441f2b0dc5f09cec942f838bffef70ccb"} Dec 11 08:38:35 crc kubenswrapper[4881]: I1211 08:38:35.267431 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-5784cf869f-2hhvl" Dec 11 08:38:35 crc kubenswrapper[4881]: I1211 08:38:35.278674 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-f6bc4c6c9-4r7pn"] Dec 11 08:38:35 crc kubenswrapper[4881]: I1211 08:38:35.290254 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-f6bc4c6c9-4r7pn" Dec 11 08:38:35 crc kubenswrapper[4881]: I1211 08:38:35.301385 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/41f75170-bc89-4911-a5f3-5456d3512897-config-data\") pod \"heat-engine-557bc7cb47-8hntq\" (UID: \"41f75170-bc89-4911-a5f3-5456d3512897\") " pod="openstack/heat-engine-557bc7cb47-8hntq" Dec 11 08:38:35 crc kubenswrapper[4881]: I1211 08:38:35.309310 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/773654f0-c183-4962-a942-9fc33e9f44a7-config-data-custom\") pod \"heat-cfnapi-86c494669f-2l5s8\" (UID: \"773654f0-c183-4962-a942-9fc33e9f44a7\") " pod="openstack/heat-cfnapi-86c494669f-2l5s8" Dec 11 08:38:35 crc kubenswrapper[4881]: I1211 08:38:35.328378 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bkzqs\" (UniqueName: \"kubernetes.io/projected/773654f0-c183-4962-a942-9fc33e9f44a7-kube-api-access-bkzqs\") pod \"heat-cfnapi-86c494669f-2l5s8\" (UID: \"773654f0-c183-4962-a942-9fc33e9f44a7\") " pod="openstack/heat-cfnapi-86c494669f-2l5s8" Dec 11 08:38:35 crc kubenswrapper[4881]: I1211 08:38:35.328469 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/773654f0-c183-4962-a942-9fc33e9f44a7-combined-ca-bundle\") pod \"heat-cfnapi-86c494669f-2l5s8\" (UID: \"773654f0-c183-4962-a942-9fc33e9f44a7\") " pod="openstack/heat-cfnapi-86c494669f-2l5s8" Dec 11 08:38:35 crc kubenswrapper[4881]: I1211 08:38:35.328515 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/773654f0-c183-4962-a942-9fc33e9f44a7-config-data\") pod \"heat-cfnapi-86c494669f-2l5s8\" (UID: \"773654f0-c183-4962-a942-9fc33e9f44a7\") " pod="openstack/heat-cfnapi-86c494669f-2l5s8" Dec 11 08:38:35 crc kubenswrapper[4881]: I1211 08:38:35.332751 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-api-5497987457-dkncd"] Dec 11 08:38:35 crc kubenswrapper[4881]: I1211 08:38:35.334493 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/773654f0-c183-4962-a942-9fc33e9f44a7-config-data-custom\") pod \"heat-cfnapi-86c494669f-2l5s8\" (UID: \"773654f0-c183-4962-a942-9fc33e9f44a7\") " pod="openstack/heat-cfnapi-86c494669f-2l5s8" Dec 11 08:38:35 crc kubenswrapper[4881]: I1211 08:38:35.313028 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-engine-557bc7cb47-8hntq" Dec 11 08:38:35 crc kubenswrapper[4881]: I1211 08:38:35.334887 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-5497987457-dkncd" Dec 11 08:38:35 crc kubenswrapper[4881]: I1211 08:38:35.340037 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-api-config-data" Dec 11 08:38:35 crc kubenswrapper[4881]: I1211 08:38:35.343167 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/773654f0-c183-4962-a942-9fc33e9f44a7-combined-ca-bundle\") pod \"heat-cfnapi-86c494669f-2l5s8\" (UID: \"773654f0-c183-4962-a942-9fc33e9f44a7\") " pod="openstack/heat-cfnapi-86c494669f-2l5s8" Dec 11 08:38:35 crc kubenswrapper[4881]: I1211 08:38:35.343965 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/773654f0-c183-4962-a942-9fc33e9f44a7-config-data\") pod \"heat-cfnapi-86c494669f-2l5s8\" (UID: \"773654f0-c183-4962-a942-9fc33e9f44a7\") " pod="openstack/heat-cfnapi-86c494669f-2l5s8" Dec 11 08:38:35 crc kubenswrapper[4881]: I1211 08:38:35.359710 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bkzqs\" (UniqueName: \"kubernetes.io/projected/773654f0-c183-4962-a942-9fc33e9f44a7-kube-api-access-bkzqs\") pod \"heat-cfnapi-86c494669f-2l5s8\" (UID: \"773654f0-c183-4962-a942-9fc33e9f44a7\") " pod="openstack/heat-cfnapi-86c494669f-2l5s8" Dec 11 08:38:35 crc kubenswrapper[4881]: I1211 08:38:35.362251 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-f6bc4c6c9-4r7pn"] Dec 11 08:38:35 crc kubenswrapper[4881]: I1211 08:38:35.390728 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-api-5497987457-dkncd"] Dec 11 08:38:35 crc kubenswrapper[4881]: I1211 08:38:35.398016 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-5784cf869f-2hhvl" podStartSLOduration=5.397996044 podStartE2EDuration="5.397996044s" podCreationTimestamp="2025-12-11 08:38:30 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 08:38:35.289771255 +0000 UTC m=+1363.667139962" watchObservedRunningTime="2025-12-11 08:38:35.397996044 +0000 UTC m=+1363.775364741" Dec 11 08:38:35 crc kubenswrapper[4881]: I1211 08:38:35.435887 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8dkwh\" (UniqueName: \"kubernetes.io/projected/8ae1990b-5d22-4cb7-ace7-92acddc6df35-kube-api-access-8dkwh\") pod \"dnsmasq-dns-f6bc4c6c9-4r7pn\" (UID: \"8ae1990b-5d22-4cb7-ace7-92acddc6df35\") " pod="openstack/dnsmasq-dns-f6bc4c6c9-4r7pn" Dec 11 08:38:35 crc kubenswrapper[4881]: I1211 08:38:35.436167 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8ae1990b-5d22-4cb7-ace7-92acddc6df35-dns-svc\") pod \"dnsmasq-dns-f6bc4c6c9-4r7pn\" (UID: \"8ae1990b-5d22-4cb7-ace7-92acddc6df35\") " pod="openstack/dnsmasq-dns-f6bc4c6c9-4r7pn" Dec 11 08:38:35 crc kubenswrapper[4881]: I1211 08:38:35.436412 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8ae1990b-5d22-4cb7-ace7-92acddc6df35-config\") pod \"dnsmasq-dns-f6bc4c6c9-4r7pn\" (UID: \"8ae1990b-5d22-4cb7-ace7-92acddc6df35\") " pod="openstack/dnsmasq-dns-f6bc4c6c9-4r7pn" Dec 11 08:38:35 crc kubenswrapper[4881]: I1211 08:38:35.436577 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/020cd7f3-faa3-4bec-adfd-25a9b60456f7-config-data\") pod \"heat-api-5497987457-dkncd\" (UID: \"020cd7f3-faa3-4bec-adfd-25a9b60456f7\") " pod="openstack/heat-api-5497987457-dkncd" Dec 11 08:38:35 crc kubenswrapper[4881]: I1211 08:38:35.436664 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xqhl8\" (UniqueName: \"kubernetes.io/projected/020cd7f3-faa3-4bec-adfd-25a9b60456f7-kube-api-access-xqhl8\") pod \"heat-api-5497987457-dkncd\" (UID: \"020cd7f3-faa3-4bec-adfd-25a9b60456f7\") " pod="openstack/heat-api-5497987457-dkncd" Dec 11 08:38:35 crc kubenswrapper[4881]: I1211 08:38:35.436810 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/8ae1990b-5d22-4cb7-ace7-92acddc6df35-dns-swift-storage-0\") pod \"dnsmasq-dns-f6bc4c6c9-4r7pn\" (UID: \"8ae1990b-5d22-4cb7-ace7-92acddc6df35\") " pod="openstack/dnsmasq-dns-f6bc4c6c9-4r7pn" Dec 11 08:38:35 crc kubenswrapper[4881]: I1211 08:38:35.436909 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/8ae1990b-5d22-4cb7-ace7-92acddc6df35-ovsdbserver-sb\") pod \"dnsmasq-dns-f6bc4c6c9-4r7pn\" (UID: \"8ae1990b-5d22-4cb7-ace7-92acddc6df35\") " pod="openstack/dnsmasq-dns-f6bc4c6c9-4r7pn" Dec 11 08:38:35 crc kubenswrapper[4881]: I1211 08:38:35.437041 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/020cd7f3-faa3-4bec-adfd-25a9b60456f7-combined-ca-bundle\") pod \"heat-api-5497987457-dkncd\" (UID: \"020cd7f3-faa3-4bec-adfd-25a9b60456f7\") " pod="openstack/heat-api-5497987457-dkncd" Dec 11 08:38:35 crc kubenswrapper[4881]: I1211 08:38:35.437136 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/020cd7f3-faa3-4bec-adfd-25a9b60456f7-config-data-custom\") pod \"heat-api-5497987457-dkncd\" (UID: \"020cd7f3-faa3-4bec-adfd-25a9b60456f7\") " pod="openstack/heat-api-5497987457-dkncd" Dec 11 08:38:35 crc kubenswrapper[4881]: I1211 08:38:35.437237 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/8ae1990b-5d22-4cb7-ace7-92acddc6df35-ovsdbserver-nb\") pod \"dnsmasq-dns-f6bc4c6c9-4r7pn\" (UID: \"8ae1990b-5d22-4cb7-ace7-92acddc6df35\") " pod="openstack/dnsmasq-dns-f6bc4c6c9-4r7pn" Dec 11 08:38:35 crc kubenswrapper[4881]: I1211 08:38:35.457394 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-86c494669f-2l5s8" Dec 11 08:38:35 crc kubenswrapper[4881]: I1211 08:38:35.539990 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xqhl8\" (UniqueName: \"kubernetes.io/projected/020cd7f3-faa3-4bec-adfd-25a9b60456f7-kube-api-access-xqhl8\") pod \"heat-api-5497987457-dkncd\" (UID: \"020cd7f3-faa3-4bec-adfd-25a9b60456f7\") " pod="openstack/heat-api-5497987457-dkncd" Dec 11 08:38:35 crc kubenswrapper[4881]: I1211 08:38:35.540090 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/8ae1990b-5d22-4cb7-ace7-92acddc6df35-dns-swift-storage-0\") pod \"dnsmasq-dns-f6bc4c6c9-4r7pn\" (UID: \"8ae1990b-5d22-4cb7-ace7-92acddc6df35\") " pod="openstack/dnsmasq-dns-f6bc4c6c9-4r7pn" Dec 11 08:38:35 crc kubenswrapper[4881]: I1211 08:38:35.540124 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/8ae1990b-5d22-4cb7-ace7-92acddc6df35-ovsdbserver-sb\") pod \"dnsmasq-dns-f6bc4c6c9-4r7pn\" (UID: \"8ae1990b-5d22-4cb7-ace7-92acddc6df35\") " pod="openstack/dnsmasq-dns-f6bc4c6c9-4r7pn" Dec 11 08:38:35 crc kubenswrapper[4881]: I1211 08:38:35.540176 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/020cd7f3-faa3-4bec-adfd-25a9b60456f7-combined-ca-bundle\") pod \"heat-api-5497987457-dkncd\" (UID: \"020cd7f3-faa3-4bec-adfd-25a9b60456f7\") " pod="openstack/heat-api-5497987457-dkncd" Dec 11 08:38:35 crc kubenswrapper[4881]: I1211 08:38:35.540203 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/020cd7f3-faa3-4bec-adfd-25a9b60456f7-config-data-custom\") pod \"heat-api-5497987457-dkncd\" (UID: \"020cd7f3-faa3-4bec-adfd-25a9b60456f7\") " pod="openstack/heat-api-5497987457-dkncd" Dec 11 08:38:35 crc kubenswrapper[4881]: I1211 08:38:35.540244 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/8ae1990b-5d22-4cb7-ace7-92acddc6df35-ovsdbserver-nb\") pod \"dnsmasq-dns-f6bc4c6c9-4r7pn\" (UID: \"8ae1990b-5d22-4cb7-ace7-92acddc6df35\") " pod="openstack/dnsmasq-dns-f6bc4c6c9-4r7pn" Dec 11 08:38:35 crc kubenswrapper[4881]: I1211 08:38:35.540286 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8dkwh\" (UniqueName: \"kubernetes.io/projected/8ae1990b-5d22-4cb7-ace7-92acddc6df35-kube-api-access-8dkwh\") pod \"dnsmasq-dns-f6bc4c6c9-4r7pn\" (UID: \"8ae1990b-5d22-4cb7-ace7-92acddc6df35\") " pod="openstack/dnsmasq-dns-f6bc4c6c9-4r7pn" Dec 11 08:38:35 crc kubenswrapper[4881]: I1211 08:38:35.540319 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8ae1990b-5d22-4cb7-ace7-92acddc6df35-dns-svc\") pod \"dnsmasq-dns-f6bc4c6c9-4r7pn\" (UID: \"8ae1990b-5d22-4cb7-ace7-92acddc6df35\") " pod="openstack/dnsmasq-dns-f6bc4c6c9-4r7pn" Dec 11 08:38:35 crc kubenswrapper[4881]: I1211 08:38:35.540438 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8ae1990b-5d22-4cb7-ace7-92acddc6df35-config\") pod \"dnsmasq-dns-f6bc4c6c9-4r7pn\" (UID: \"8ae1990b-5d22-4cb7-ace7-92acddc6df35\") " pod="openstack/dnsmasq-dns-f6bc4c6c9-4r7pn" Dec 11 08:38:35 crc kubenswrapper[4881]: I1211 08:38:35.540460 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/020cd7f3-faa3-4bec-adfd-25a9b60456f7-config-data\") pod \"heat-api-5497987457-dkncd\" (UID: \"020cd7f3-faa3-4bec-adfd-25a9b60456f7\") " pod="openstack/heat-api-5497987457-dkncd" Dec 11 08:38:35 crc kubenswrapper[4881]: I1211 08:38:35.544752 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/8ae1990b-5d22-4cb7-ace7-92acddc6df35-ovsdbserver-nb\") pod \"dnsmasq-dns-f6bc4c6c9-4r7pn\" (UID: \"8ae1990b-5d22-4cb7-ace7-92acddc6df35\") " pod="openstack/dnsmasq-dns-f6bc4c6c9-4r7pn" Dec 11 08:38:35 crc kubenswrapper[4881]: I1211 08:38:35.545235 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/020cd7f3-faa3-4bec-adfd-25a9b60456f7-config-data\") pod \"heat-api-5497987457-dkncd\" (UID: \"020cd7f3-faa3-4bec-adfd-25a9b60456f7\") " pod="openstack/heat-api-5497987457-dkncd" Dec 11 08:38:35 crc kubenswrapper[4881]: I1211 08:38:35.545249 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/8ae1990b-5d22-4cb7-ace7-92acddc6df35-ovsdbserver-sb\") pod \"dnsmasq-dns-f6bc4c6c9-4r7pn\" (UID: \"8ae1990b-5d22-4cb7-ace7-92acddc6df35\") " pod="openstack/dnsmasq-dns-f6bc4c6c9-4r7pn" Dec 11 08:38:35 crc kubenswrapper[4881]: I1211 08:38:35.545409 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/8ae1990b-5d22-4cb7-ace7-92acddc6df35-dns-swift-storage-0\") pod \"dnsmasq-dns-f6bc4c6c9-4r7pn\" (UID: \"8ae1990b-5d22-4cb7-ace7-92acddc6df35\") " pod="openstack/dnsmasq-dns-f6bc4c6c9-4r7pn" Dec 11 08:38:35 crc kubenswrapper[4881]: I1211 08:38:35.545529 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8ae1990b-5d22-4cb7-ace7-92acddc6df35-dns-svc\") pod \"dnsmasq-dns-f6bc4c6c9-4r7pn\" (UID: \"8ae1990b-5d22-4cb7-ace7-92acddc6df35\") " pod="openstack/dnsmasq-dns-f6bc4c6c9-4r7pn" Dec 11 08:38:35 crc kubenswrapper[4881]: I1211 08:38:35.545924 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8ae1990b-5d22-4cb7-ace7-92acddc6df35-config\") pod \"dnsmasq-dns-f6bc4c6c9-4r7pn\" (UID: \"8ae1990b-5d22-4cb7-ace7-92acddc6df35\") " pod="openstack/dnsmasq-dns-f6bc4c6c9-4r7pn" Dec 11 08:38:35 crc kubenswrapper[4881]: I1211 08:38:35.554185 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/020cd7f3-faa3-4bec-adfd-25a9b60456f7-combined-ca-bundle\") pod \"heat-api-5497987457-dkncd\" (UID: \"020cd7f3-faa3-4bec-adfd-25a9b60456f7\") " pod="openstack/heat-api-5497987457-dkncd" Dec 11 08:38:35 crc kubenswrapper[4881]: I1211 08:38:35.554542 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/020cd7f3-faa3-4bec-adfd-25a9b60456f7-config-data-custom\") pod \"heat-api-5497987457-dkncd\" (UID: \"020cd7f3-faa3-4bec-adfd-25a9b60456f7\") " pod="openstack/heat-api-5497987457-dkncd" Dec 11 08:38:35 crc kubenswrapper[4881]: I1211 08:38:35.571766 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xqhl8\" (UniqueName: \"kubernetes.io/projected/020cd7f3-faa3-4bec-adfd-25a9b60456f7-kube-api-access-xqhl8\") pod \"heat-api-5497987457-dkncd\" (UID: \"020cd7f3-faa3-4bec-adfd-25a9b60456f7\") " pod="openstack/heat-api-5497987457-dkncd" Dec 11 08:38:35 crc kubenswrapper[4881]: I1211 08:38:35.577036 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8dkwh\" (UniqueName: \"kubernetes.io/projected/8ae1990b-5d22-4cb7-ace7-92acddc6df35-kube-api-access-8dkwh\") pod \"dnsmasq-dns-f6bc4c6c9-4r7pn\" (UID: \"8ae1990b-5d22-4cb7-ace7-92acddc6df35\") " pod="openstack/dnsmasq-dns-f6bc4c6c9-4r7pn" Dec 11 08:38:35 crc kubenswrapper[4881]: I1211 08:38:35.667531 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-78cc5875b6-mr4tb" Dec 11 08:38:35 crc kubenswrapper[4881]: I1211 08:38:35.721895 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-5ccbc79968-kjfhq" Dec 11 08:38:35 crc kubenswrapper[4881]: I1211 08:38:35.744761 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/47f7a1b2-62e4-4963-b09c-bed44acc3c4a-config-data\") pod \"47f7a1b2-62e4-4963-b09c-bed44acc3c4a\" (UID: \"47f7a1b2-62e4-4963-b09c-bed44acc3c4a\") " Dec 11 08:38:35 crc kubenswrapper[4881]: I1211 08:38:35.744932 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/47f7a1b2-62e4-4963-b09c-bed44acc3c4a-logs\") pod \"47f7a1b2-62e4-4963-b09c-bed44acc3c4a\" (UID: \"47f7a1b2-62e4-4963-b09c-bed44acc3c4a\") " Dec 11 08:38:35 crc kubenswrapper[4881]: I1211 08:38:35.745022 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/47f7a1b2-62e4-4963-b09c-bed44acc3c4a-config-data-custom\") pod \"47f7a1b2-62e4-4963-b09c-bed44acc3c4a\" (UID: \"47f7a1b2-62e4-4963-b09c-bed44acc3c4a\") " Dec 11 08:38:35 crc kubenswrapper[4881]: I1211 08:38:35.745052 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/47f7a1b2-62e4-4963-b09c-bed44acc3c4a-combined-ca-bundle\") pod \"47f7a1b2-62e4-4963-b09c-bed44acc3c4a\" (UID: \"47f7a1b2-62e4-4963-b09c-bed44acc3c4a\") " Dec 11 08:38:35 crc kubenswrapper[4881]: I1211 08:38:35.745155 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mpqx6\" (UniqueName: \"kubernetes.io/projected/47f7a1b2-62e4-4963-b09c-bed44acc3c4a-kube-api-access-mpqx6\") pod \"47f7a1b2-62e4-4963-b09c-bed44acc3c4a\" (UID: \"47f7a1b2-62e4-4963-b09c-bed44acc3c4a\") " Dec 11 08:38:35 crc kubenswrapper[4881]: I1211 08:38:35.750929 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/47f7a1b2-62e4-4963-b09c-bed44acc3c4a-kube-api-access-mpqx6" (OuterVolumeSpecName: "kube-api-access-mpqx6") pod "47f7a1b2-62e4-4963-b09c-bed44acc3c4a" (UID: "47f7a1b2-62e4-4963-b09c-bed44acc3c4a"). InnerVolumeSpecName "kube-api-access-mpqx6". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:38:35 crc kubenswrapper[4881]: I1211 08:38:35.751272 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/47f7a1b2-62e4-4963-b09c-bed44acc3c4a-logs" (OuterVolumeSpecName: "logs") pod "47f7a1b2-62e4-4963-b09c-bed44acc3c4a" (UID: "47f7a1b2-62e4-4963-b09c-bed44acc3c4a"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 08:38:35 crc kubenswrapper[4881]: I1211 08:38:35.757542 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/47f7a1b2-62e4-4963-b09c-bed44acc3c4a-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "47f7a1b2-62e4-4963-b09c-bed44acc3c4a" (UID: "47f7a1b2-62e4-4963-b09c-bed44acc3c4a"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:38:35 crc kubenswrapper[4881]: I1211 08:38:35.775227 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-f6bc4c6c9-4r7pn" Dec 11 08:38:35 crc kubenswrapper[4881]: I1211 08:38:35.791306 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-5497987457-dkncd" Dec 11 08:38:35 crc kubenswrapper[4881]: I1211 08:38:35.861554 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/47f7a1b2-62e4-4963-b09c-bed44acc3c4a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "47f7a1b2-62e4-4963-b09c-bed44acc3c4a" (UID: "47f7a1b2-62e4-4963-b09c-bed44acc3c4a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:38:35 crc kubenswrapper[4881]: I1211 08:38:35.862297 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/9cac4a82-5425-4f3a-bf86-26d9099432e3-config\") pod \"9cac4a82-5425-4f3a-bf86-26d9099432e3\" (UID: \"9cac4a82-5425-4f3a-bf86-26d9099432e3\") " Dec 11 08:38:35 crc kubenswrapper[4881]: I1211 08:38:35.862379 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9cac4a82-5425-4f3a-bf86-26d9099432e3-combined-ca-bundle\") pod \"9cac4a82-5425-4f3a-bf86-26d9099432e3\" (UID: \"9cac4a82-5425-4f3a-bf86-26d9099432e3\") " Dec 11 08:38:35 crc kubenswrapper[4881]: I1211 08:38:35.862468 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/9cac4a82-5425-4f3a-bf86-26d9099432e3-ovndb-tls-certs\") pod \"9cac4a82-5425-4f3a-bf86-26d9099432e3\" (UID: \"9cac4a82-5425-4f3a-bf86-26d9099432e3\") " Dec 11 08:38:35 crc kubenswrapper[4881]: I1211 08:38:35.862489 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nht9t\" (UniqueName: \"kubernetes.io/projected/9cac4a82-5425-4f3a-bf86-26d9099432e3-kube-api-access-nht9t\") pod \"9cac4a82-5425-4f3a-bf86-26d9099432e3\" (UID: \"9cac4a82-5425-4f3a-bf86-26d9099432e3\") " Dec 11 08:38:35 crc kubenswrapper[4881]: I1211 08:38:35.862592 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/9cac4a82-5425-4f3a-bf86-26d9099432e3-httpd-config\") pod \"9cac4a82-5425-4f3a-bf86-26d9099432e3\" (UID: \"9cac4a82-5425-4f3a-bf86-26d9099432e3\") " Dec 11 08:38:35 crc kubenswrapper[4881]: I1211 08:38:35.865325 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/47f7a1b2-62e4-4963-b09c-bed44acc3c4a-config-data" (OuterVolumeSpecName: "config-data") pod "47f7a1b2-62e4-4963-b09c-bed44acc3c4a" (UID: "47f7a1b2-62e4-4963-b09c-bed44acc3c4a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:38:35 crc kubenswrapper[4881]: I1211 08:38:35.867867 4881 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/47f7a1b2-62e4-4963-b09c-bed44acc3c4a-logs\") on node \"crc\" DevicePath \"\"" Dec 11 08:38:35 crc kubenswrapper[4881]: I1211 08:38:35.867893 4881 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/47f7a1b2-62e4-4963-b09c-bed44acc3c4a-config-data-custom\") on node \"crc\" DevicePath \"\"" Dec 11 08:38:35 crc kubenswrapper[4881]: I1211 08:38:35.867905 4881 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/47f7a1b2-62e4-4963-b09c-bed44acc3c4a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 11 08:38:35 crc kubenswrapper[4881]: I1211 08:38:35.867914 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mpqx6\" (UniqueName: \"kubernetes.io/projected/47f7a1b2-62e4-4963-b09c-bed44acc3c4a-kube-api-access-mpqx6\") on node \"crc\" DevicePath \"\"" Dec 11 08:38:35 crc kubenswrapper[4881]: I1211 08:38:35.867923 4881 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/47f7a1b2-62e4-4963-b09c-bed44acc3c4a-config-data\") on node \"crc\" DevicePath \"\"" Dec 11 08:38:35 crc kubenswrapper[4881]: I1211 08:38:35.872538 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9cac4a82-5425-4f3a-bf86-26d9099432e3-kube-api-access-nht9t" (OuterVolumeSpecName: "kube-api-access-nht9t") pod "9cac4a82-5425-4f3a-bf86-26d9099432e3" (UID: "9cac4a82-5425-4f3a-bf86-26d9099432e3"). InnerVolumeSpecName "kube-api-access-nht9t". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:38:35 crc kubenswrapper[4881]: I1211 08:38:35.906687 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9cac4a82-5425-4f3a-bf86-26d9099432e3-httpd-config" (OuterVolumeSpecName: "httpd-config") pod "9cac4a82-5425-4f3a-bf86-26d9099432e3" (UID: "9cac4a82-5425-4f3a-bf86-26d9099432e3"). InnerVolumeSpecName "httpd-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:38:35 crc kubenswrapper[4881]: I1211 08:38:35.976011 4881 reconciler_common.go:293] "Volume detached for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/9cac4a82-5425-4f3a-bf86-26d9099432e3-httpd-config\") on node \"crc\" DevicePath \"\"" Dec 11 08:38:35 crc kubenswrapper[4881]: I1211 08:38:35.976337 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nht9t\" (UniqueName: \"kubernetes.io/projected/9cac4a82-5425-4f3a-bf86-26d9099432e3-kube-api-access-nht9t\") on node \"crc\" DevicePath \"\"" Dec 11 08:38:36 crc kubenswrapper[4881]: I1211 08:38:36.022778 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9cac4a82-5425-4f3a-bf86-26d9099432e3-config" (OuterVolumeSpecName: "config") pod "9cac4a82-5425-4f3a-bf86-26d9099432e3" (UID: "9cac4a82-5425-4f3a-bf86-26d9099432e3"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:38:36 crc kubenswrapper[4881]: I1211 08:38:36.044470 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9cac4a82-5425-4f3a-bf86-26d9099432e3-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "9cac4a82-5425-4f3a-bf86-26d9099432e3" (UID: "9cac4a82-5425-4f3a-bf86-26d9099432e3"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:38:36 crc kubenswrapper[4881]: I1211 08:38:36.079346 4881 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/9cac4a82-5425-4f3a-bf86-26d9099432e3-config\") on node \"crc\" DevicePath \"\"" Dec 11 08:38:36 crc kubenswrapper[4881]: I1211 08:38:36.081393 4881 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9cac4a82-5425-4f3a-bf86-26d9099432e3-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 11 08:38:36 crc kubenswrapper[4881]: I1211 08:38:36.151414 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9cac4a82-5425-4f3a-bf86-26d9099432e3-ovndb-tls-certs" (OuterVolumeSpecName: "ovndb-tls-certs") pod "9cac4a82-5425-4f3a-bf86-26d9099432e3" (UID: "9cac4a82-5425-4f3a-bf86-26d9099432e3"). InnerVolumeSpecName "ovndb-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:38:36 crc kubenswrapper[4881]: I1211 08:38:36.187890 4881 reconciler_common.go:293] "Volume detached for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/9cac4a82-5425-4f3a-bf86-26d9099432e3-ovndb-tls-certs\") on node \"crc\" DevicePath \"\"" Dec 11 08:38:36 crc kubenswrapper[4881]: I1211 08:38:36.289955 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-engine-557bc7cb47-8hntq"] Dec 11 08:38:36 crc kubenswrapper[4881]: W1211 08:38:36.307041 4881 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod41f75170_bc89_4911_a5f3_5456d3512897.slice/crio-99ef9e8842d16f19d0a64f913afee0b2db9e83ec9c99f5876cc22f4bbf7b25a5 WatchSource:0}: Error finding container 99ef9e8842d16f19d0a64f913afee0b2db9e83ec9c99f5876cc22f4bbf7b25a5: Status 404 returned error can't find the container with id 99ef9e8842d16f19d0a64f913afee0b2db9e83ec9c99f5876cc22f4bbf7b25a5 Dec 11 08:38:36 crc kubenswrapper[4881]: I1211 08:38:36.315801 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-78cc5875b6-mr4tb" event={"ID":"47f7a1b2-62e4-4963-b09c-bed44acc3c4a","Type":"ContainerDied","Data":"bd02cc3a17b6d3398858fbb78249a674d2e353dbbe9ff0da2cfbb1a56a43636a"} Dec 11 08:38:36 crc kubenswrapper[4881]: I1211 08:38:36.315853 4881 scope.go:117] "RemoveContainer" containerID="4619f9566660bb1526549a3b2acad0cfae04db286cf9f43cf27cce2b2aa931f7" Dec 11 08:38:36 crc kubenswrapper[4881]: I1211 08:38:36.315993 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-78cc5875b6-mr4tb" Dec 11 08:38:36 crc kubenswrapper[4881]: I1211 08:38:36.331394 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-5784cf869f-2hhvl" podUID="efb1e391-d2a3-45e1-b5c8-9075a353bda5" containerName="dnsmasq-dns" containerID="cri-o://75cb17e2a2180f1327dcf0e8db156c8441f2b0dc5f09cec942f838bffef70ccb" gracePeriod=10 Dec 11 08:38:36 crc kubenswrapper[4881]: I1211 08:38:36.331703 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-5ccbc79968-kjfhq" Dec 11 08:38:36 crc kubenswrapper[4881]: I1211 08:38:36.335853 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5ccbc79968-kjfhq" event={"ID":"9cac4a82-5425-4f3a-bf86-26d9099432e3","Type":"ContainerDied","Data":"8fc0b155ad9f9c867c71b4cf517c1afdb007ba51f3af9c990591a4b1a157b5f7"} Dec 11 08:38:36 crc kubenswrapper[4881]: I1211 08:38:36.392451 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-5ccbc79968-kjfhq"] Dec 11 08:38:36 crc kubenswrapper[4881]: I1211 08:38:36.408812 4881 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-5ccbc79968-kjfhq"] Dec 11 08:38:36 crc kubenswrapper[4881]: I1211 08:38:36.416436 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-cfnapi-86c494669f-2l5s8"] Dec 11 08:38:36 crc kubenswrapper[4881]: I1211 08:38:36.428128 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-78cc5875b6-mr4tb"] Dec 11 08:38:36 crc kubenswrapper[4881]: I1211 08:38:36.436218 4881 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-api-78cc5875b6-mr4tb"] Dec 11 08:38:36 crc kubenswrapper[4881]: I1211 08:38:36.449216 4881 scope.go:117] "RemoveContainer" containerID="6bc1a39d2a37c39394f31cd46fb1f773b6cedde4eec1c84f1f3c870e6caeb7d1" Dec 11 08:38:36 crc kubenswrapper[4881]: I1211 08:38:36.552438 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-f6bc4c6c9-4r7pn"] Dec 11 08:38:36 crc kubenswrapper[4881]: I1211 08:38:36.561162 4881 scope.go:117] "RemoveContainer" containerID="b4787b60ea2d1af27f7a25e12d4c65e4fa7e8df398d8335843a14437ef100214" Dec 11 08:38:36 crc kubenswrapper[4881]: W1211 08:38:36.586022 4881 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8ae1990b_5d22_4cb7_ace7_92acddc6df35.slice/crio-f3ede814408af67f714fa7560345425599d168f707853b292da87b048fb4dd10 WatchSource:0}: Error finding container f3ede814408af67f714fa7560345425599d168f707853b292da87b048fb4dd10: Status 404 returned error can't find the container with id f3ede814408af67f714fa7560345425599d168f707853b292da87b048fb4dd10 Dec 11 08:38:36 crc kubenswrapper[4881]: I1211 08:38:36.639317 4881 scope.go:117] "RemoveContainer" containerID="989f77dfba60f8b047748455ff61be09fdb5fecc1425754e7501917b9b888581" Dec 11 08:38:36 crc kubenswrapper[4881]: I1211 08:38:36.718012 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-api-5497987457-dkncd"] Dec 11 08:38:36 crc kubenswrapper[4881]: W1211 08:38:36.741032 4881 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod020cd7f3_faa3_4bec_adfd_25a9b60456f7.slice/crio-53319dd70f9021440e509374c4dc13c0ce9ba56b8ec0ac5a0c70301ff8500648 WatchSource:0}: Error finding container 53319dd70f9021440e509374c4dc13c0ce9ba56b8ec0ac5a0c70301ff8500648: Status 404 returned error can't find the container with id 53319dd70f9021440e509374c4dc13c0ce9ba56b8ec0ac5a0c70301ff8500648 Dec 11 08:38:37 crc kubenswrapper[4881]: I1211 08:38:37.020122 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="47f7a1b2-62e4-4963-b09c-bed44acc3c4a" path="/var/lib/kubelet/pods/47f7a1b2-62e4-4963-b09c-bed44acc3c4a/volumes" Dec 11 08:38:37 crc kubenswrapper[4881]: I1211 08:38:37.020955 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9cac4a82-5425-4f3a-bf86-26d9099432e3" path="/var/lib/kubelet/pods/9cac4a82-5425-4f3a-bf86-26d9099432e3/volumes" Dec 11 08:38:37 crc kubenswrapper[4881]: I1211 08:38:37.342674 4881 generic.go:334] "Generic (PLEG): container finished" podID="efb1e391-d2a3-45e1-b5c8-9075a353bda5" containerID="75cb17e2a2180f1327dcf0e8db156c8441f2b0dc5f09cec942f838bffef70ccb" exitCode=0 Dec 11 08:38:37 crc kubenswrapper[4881]: I1211 08:38:37.342744 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5784cf869f-2hhvl" event={"ID":"efb1e391-d2a3-45e1-b5c8-9075a353bda5","Type":"ContainerDied","Data":"75cb17e2a2180f1327dcf0e8db156c8441f2b0dc5f09cec942f838bffef70ccb"} Dec 11 08:38:37 crc kubenswrapper[4881]: I1211 08:38:37.345122 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-engine-557bc7cb47-8hntq" event={"ID":"41f75170-bc89-4911-a5f3-5456d3512897","Type":"ContainerStarted","Data":"99ef9e8842d16f19d0a64f913afee0b2db9e83ec9c99f5876cc22f4bbf7b25a5"} Dec 11 08:38:37 crc kubenswrapper[4881]: I1211 08:38:37.346199 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-f6bc4c6c9-4r7pn" event={"ID":"8ae1990b-5d22-4cb7-ace7-92acddc6df35","Type":"ContainerStarted","Data":"f3ede814408af67f714fa7560345425599d168f707853b292da87b048fb4dd10"} Dec 11 08:38:37 crc kubenswrapper[4881]: I1211 08:38:37.351409 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-86c494669f-2l5s8" event={"ID":"773654f0-c183-4962-a942-9fc33e9f44a7","Type":"ContainerStarted","Data":"40f52ed3f2d0d999b613255b598e85ac711267670f850cfd8794876c5620bb28"} Dec 11 08:38:37 crc kubenswrapper[4881]: I1211 08:38:37.353037 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-5497987457-dkncd" event={"ID":"020cd7f3-faa3-4bec-adfd-25a9b60456f7","Type":"ContainerStarted","Data":"53319dd70f9021440e509374c4dc13c0ce9ba56b8ec0ac5a0c70301ff8500648"} Dec 11 08:38:38 crc kubenswrapper[4881]: I1211 08:38:38.368065 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-f6bc4c6c9-4r7pn" event={"ID":"8ae1990b-5d22-4cb7-ace7-92acddc6df35","Type":"ContainerStarted","Data":"4840d348945bd8796ccac6a5b0d54d1e4143b7cea5f2a1f61918a38dfecec705"} Dec 11 08:38:39 crc kubenswrapper[4881]: I1211 08:38:39.080789 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-proxy-5f6c547b6c-rjk9h"] Dec 11 08:38:39 crc kubenswrapper[4881]: E1211 08:38:39.081817 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="47f7a1b2-62e4-4963-b09c-bed44acc3c4a" containerName="barbican-api-log" Dec 11 08:38:39 crc kubenswrapper[4881]: I1211 08:38:39.081830 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="47f7a1b2-62e4-4963-b09c-bed44acc3c4a" containerName="barbican-api-log" Dec 11 08:38:39 crc kubenswrapper[4881]: E1211 08:38:39.081853 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9cac4a82-5425-4f3a-bf86-26d9099432e3" containerName="neutron-httpd" Dec 11 08:38:39 crc kubenswrapper[4881]: I1211 08:38:39.081859 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="9cac4a82-5425-4f3a-bf86-26d9099432e3" containerName="neutron-httpd" Dec 11 08:38:39 crc kubenswrapper[4881]: E1211 08:38:39.081924 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9cac4a82-5425-4f3a-bf86-26d9099432e3" containerName="neutron-api" Dec 11 08:38:39 crc kubenswrapper[4881]: I1211 08:38:39.081931 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="9cac4a82-5425-4f3a-bf86-26d9099432e3" containerName="neutron-api" Dec 11 08:38:39 crc kubenswrapper[4881]: E1211 08:38:39.081943 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="47f7a1b2-62e4-4963-b09c-bed44acc3c4a" containerName="barbican-api" Dec 11 08:38:39 crc kubenswrapper[4881]: I1211 08:38:39.081949 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="47f7a1b2-62e4-4963-b09c-bed44acc3c4a" containerName="barbican-api" Dec 11 08:38:39 crc kubenswrapper[4881]: I1211 08:38:39.082220 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="47f7a1b2-62e4-4963-b09c-bed44acc3c4a" containerName="barbican-api-log" Dec 11 08:38:39 crc kubenswrapper[4881]: I1211 08:38:39.082273 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="47f7a1b2-62e4-4963-b09c-bed44acc3c4a" containerName="barbican-api" Dec 11 08:38:39 crc kubenswrapper[4881]: I1211 08:38:39.082284 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="9cac4a82-5425-4f3a-bf86-26d9099432e3" containerName="neutron-api" Dec 11 08:38:39 crc kubenswrapper[4881]: I1211 08:38:39.082300 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="9cac4a82-5425-4f3a-bf86-26d9099432e3" containerName="neutron-httpd" Dec 11 08:38:39 crc kubenswrapper[4881]: I1211 08:38:39.083833 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-proxy-5f6c547b6c-rjk9h"] Dec 11 08:38:39 crc kubenswrapper[4881]: I1211 08:38:39.083935 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-5f6c547b6c-rjk9h" Dec 11 08:38:39 crc kubenswrapper[4881]: I1211 08:38:39.087208 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-proxy-config-data" Dec 11 08:38:39 crc kubenswrapper[4881]: I1211 08:38:39.087550 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-swift-internal-svc" Dec 11 08:38:39 crc kubenswrapper[4881]: I1211 08:38:39.089171 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-swift-public-svc" Dec 11 08:38:39 crc kubenswrapper[4881]: I1211 08:38:39.265707 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/910014af-7b9e-49b8-99e3-b80a15d72faf-run-httpd\") pod \"swift-proxy-5f6c547b6c-rjk9h\" (UID: \"910014af-7b9e-49b8-99e3-b80a15d72faf\") " pod="openstack/swift-proxy-5f6c547b6c-rjk9h" Dec 11 08:38:39 crc kubenswrapper[4881]: I1211 08:38:39.265813 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/910014af-7b9e-49b8-99e3-b80a15d72faf-public-tls-certs\") pod \"swift-proxy-5f6c547b6c-rjk9h\" (UID: \"910014af-7b9e-49b8-99e3-b80a15d72faf\") " pod="openstack/swift-proxy-5f6c547b6c-rjk9h" Dec 11 08:38:39 crc kubenswrapper[4881]: I1211 08:38:39.265845 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/910014af-7b9e-49b8-99e3-b80a15d72faf-config-data\") pod \"swift-proxy-5f6c547b6c-rjk9h\" (UID: \"910014af-7b9e-49b8-99e3-b80a15d72faf\") " pod="openstack/swift-proxy-5f6c547b6c-rjk9h" Dec 11 08:38:39 crc kubenswrapper[4881]: I1211 08:38:39.265913 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kfz6r\" (UniqueName: \"kubernetes.io/projected/910014af-7b9e-49b8-99e3-b80a15d72faf-kube-api-access-kfz6r\") pod \"swift-proxy-5f6c547b6c-rjk9h\" (UID: \"910014af-7b9e-49b8-99e3-b80a15d72faf\") " pod="openstack/swift-proxy-5f6c547b6c-rjk9h" Dec 11 08:38:39 crc kubenswrapper[4881]: I1211 08:38:39.265993 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/910014af-7b9e-49b8-99e3-b80a15d72faf-log-httpd\") pod \"swift-proxy-5f6c547b6c-rjk9h\" (UID: \"910014af-7b9e-49b8-99e3-b80a15d72faf\") " pod="openstack/swift-proxy-5f6c547b6c-rjk9h" Dec 11 08:38:39 crc kubenswrapper[4881]: I1211 08:38:39.266018 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/910014af-7b9e-49b8-99e3-b80a15d72faf-internal-tls-certs\") pod \"swift-proxy-5f6c547b6c-rjk9h\" (UID: \"910014af-7b9e-49b8-99e3-b80a15d72faf\") " pod="openstack/swift-proxy-5f6c547b6c-rjk9h" Dec 11 08:38:39 crc kubenswrapper[4881]: I1211 08:38:39.266038 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/910014af-7b9e-49b8-99e3-b80a15d72faf-combined-ca-bundle\") pod \"swift-proxy-5f6c547b6c-rjk9h\" (UID: \"910014af-7b9e-49b8-99e3-b80a15d72faf\") " pod="openstack/swift-proxy-5f6c547b6c-rjk9h" Dec 11 08:38:39 crc kubenswrapper[4881]: I1211 08:38:39.266057 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/910014af-7b9e-49b8-99e3-b80a15d72faf-etc-swift\") pod \"swift-proxy-5f6c547b6c-rjk9h\" (UID: \"910014af-7b9e-49b8-99e3-b80a15d72faf\") " pod="openstack/swift-proxy-5f6c547b6c-rjk9h" Dec 11 08:38:39 crc kubenswrapper[4881]: I1211 08:38:39.371327 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/910014af-7b9e-49b8-99e3-b80a15d72faf-log-httpd\") pod \"swift-proxy-5f6c547b6c-rjk9h\" (UID: \"910014af-7b9e-49b8-99e3-b80a15d72faf\") " pod="openstack/swift-proxy-5f6c547b6c-rjk9h" Dec 11 08:38:39 crc kubenswrapper[4881]: I1211 08:38:39.371726 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/910014af-7b9e-49b8-99e3-b80a15d72faf-log-httpd\") pod \"swift-proxy-5f6c547b6c-rjk9h\" (UID: \"910014af-7b9e-49b8-99e3-b80a15d72faf\") " pod="openstack/swift-proxy-5f6c547b6c-rjk9h" Dec 11 08:38:39 crc kubenswrapper[4881]: I1211 08:38:39.371747 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/910014af-7b9e-49b8-99e3-b80a15d72faf-internal-tls-certs\") pod \"swift-proxy-5f6c547b6c-rjk9h\" (UID: \"910014af-7b9e-49b8-99e3-b80a15d72faf\") " pod="openstack/swift-proxy-5f6c547b6c-rjk9h" Dec 11 08:38:39 crc kubenswrapper[4881]: I1211 08:38:39.371837 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/910014af-7b9e-49b8-99e3-b80a15d72faf-combined-ca-bundle\") pod \"swift-proxy-5f6c547b6c-rjk9h\" (UID: \"910014af-7b9e-49b8-99e3-b80a15d72faf\") " pod="openstack/swift-proxy-5f6c547b6c-rjk9h" Dec 11 08:38:39 crc kubenswrapper[4881]: I1211 08:38:39.371878 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/910014af-7b9e-49b8-99e3-b80a15d72faf-etc-swift\") pod \"swift-proxy-5f6c547b6c-rjk9h\" (UID: \"910014af-7b9e-49b8-99e3-b80a15d72faf\") " pod="openstack/swift-proxy-5f6c547b6c-rjk9h" Dec 11 08:38:39 crc kubenswrapper[4881]: I1211 08:38:39.371985 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/910014af-7b9e-49b8-99e3-b80a15d72faf-run-httpd\") pod \"swift-proxy-5f6c547b6c-rjk9h\" (UID: \"910014af-7b9e-49b8-99e3-b80a15d72faf\") " pod="openstack/swift-proxy-5f6c547b6c-rjk9h" Dec 11 08:38:39 crc kubenswrapper[4881]: I1211 08:38:39.372185 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/910014af-7b9e-49b8-99e3-b80a15d72faf-public-tls-certs\") pod \"swift-proxy-5f6c547b6c-rjk9h\" (UID: \"910014af-7b9e-49b8-99e3-b80a15d72faf\") " pod="openstack/swift-proxy-5f6c547b6c-rjk9h" Dec 11 08:38:39 crc kubenswrapper[4881]: I1211 08:38:39.372246 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/910014af-7b9e-49b8-99e3-b80a15d72faf-config-data\") pod \"swift-proxy-5f6c547b6c-rjk9h\" (UID: \"910014af-7b9e-49b8-99e3-b80a15d72faf\") " pod="openstack/swift-proxy-5f6c547b6c-rjk9h" Dec 11 08:38:39 crc kubenswrapper[4881]: I1211 08:38:39.372407 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kfz6r\" (UniqueName: \"kubernetes.io/projected/910014af-7b9e-49b8-99e3-b80a15d72faf-kube-api-access-kfz6r\") pod \"swift-proxy-5f6c547b6c-rjk9h\" (UID: \"910014af-7b9e-49b8-99e3-b80a15d72faf\") " pod="openstack/swift-proxy-5f6c547b6c-rjk9h" Dec 11 08:38:39 crc kubenswrapper[4881]: I1211 08:38:39.372889 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/910014af-7b9e-49b8-99e3-b80a15d72faf-run-httpd\") pod \"swift-proxy-5f6c547b6c-rjk9h\" (UID: \"910014af-7b9e-49b8-99e3-b80a15d72faf\") " pod="openstack/swift-proxy-5f6c547b6c-rjk9h" Dec 11 08:38:39 crc kubenswrapper[4881]: I1211 08:38:39.380491 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/910014af-7b9e-49b8-99e3-b80a15d72faf-etc-swift\") pod \"swift-proxy-5f6c547b6c-rjk9h\" (UID: \"910014af-7b9e-49b8-99e3-b80a15d72faf\") " pod="openstack/swift-proxy-5f6c547b6c-rjk9h" Dec 11 08:38:39 crc kubenswrapper[4881]: I1211 08:38:39.381324 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/910014af-7b9e-49b8-99e3-b80a15d72faf-combined-ca-bundle\") pod \"swift-proxy-5f6c547b6c-rjk9h\" (UID: \"910014af-7b9e-49b8-99e3-b80a15d72faf\") " pod="openstack/swift-proxy-5f6c547b6c-rjk9h" Dec 11 08:38:39 crc kubenswrapper[4881]: I1211 08:38:39.381409 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/910014af-7b9e-49b8-99e3-b80a15d72faf-config-data\") pod \"swift-proxy-5f6c547b6c-rjk9h\" (UID: \"910014af-7b9e-49b8-99e3-b80a15d72faf\") " pod="openstack/swift-proxy-5f6c547b6c-rjk9h" Dec 11 08:38:39 crc kubenswrapper[4881]: I1211 08:38:39.381673 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/910014af-7b9e-49b8-99e3-b80a15d72faf-internal-tls-certs\") pod \"swift-proxy-5f6c547b6c-rjk9h\" (UID: \"910014af-7b9e-49b8-99e3-b80a15d72faf\") " pod="openstack/swift-proxy-5f6c547b6c-rjk9h" Dec 11 08:38:39 crc kubenswrapper[4881]: I1211 08:38:39.387695 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/910014af-7b9e-49b8-99e3-b80a15d72faf-public-tls-certs\") pod \"swift-proxy-5f6c547b6c-rjk9h\" (UID: \"910014af-7b9e-49b8-99e3-b80a15d72faf\") " pod="openstack/swift-proxy-5f6c547b6c-rjk9h" Dec 11 08:38:39 crc kubenswrapper[4881]: I1211 08:38:39.397053 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kfz6r\" (UniqueName: \"kubernetes.io/projected/910014af-7b9e-49b8-99e3-b80a15d72faf-kube-api-access-kfz6r\") pod \"swift-proxy-5f6c547b6c-rjk9h\" (UID: \"910014af-7b9e-49b8-99e3-b80a15d72faf\") " pod="openstack/swift-proxy-5f6c547b6c-rjk9h" Dec 11 08:38:39 crc kubenswrapper[4881]: I1211 08:38:39.418050 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"93406a4d-f987-4191-acf2-4a5d1fa63457","Type":"ContainerStarted","Data":"9d19c0500ec624aac640e79d51ba54d0fb3f157e17dc5f8d84035c30655a9031"} Dec 11 08:38:39 crc kubenswrapper[4881]: I1211 08:38:39.418216 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="93406a4d-f987-4191-acf2-4a5d1fa63457" containerName="cinder-api-log" containerID="cri-o://32f970ae0914dac738470b0ee696139fc2cd4a575cff1e87f454798e3115a6ee" gracePeriod=30 Dec 11 08:38:39 crc kubenswrapper[4881]: I1211 08:38:39.418449 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="93406a4d-f987-4191-acf2-4a5d1fa63457" containerName="cinder-api" containerID="cri-o://9d19c0500ec624aac640e79d51ba54d0fb3f157e17dc5f8d84035c30655a9031" gracePeriod=30 Dec 11 08:38:39 crc kubenswrapper[4881]: I1211 08:38:39.418624 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cinder-api-0" Dec 11 08:38:39 crc kubenswrapper[4881]: I1211 08:38:39.422199 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-5f6c547b6c-rjk9h" Dec 11 08:38:39 crc kubenswrapper[4881]: I1211 08:38:39.434906 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-engine-557bc7cb47-8hntq" event={"ID":"41f75170-bc89-4911-a5f3-5456d3512897","Type":"ContainerStarted","Data":"e984e439f24989cdf8f7b0f35994a1bb5660e2301c8cd791b8a8c90727ebe50f"} Dec 11 08:38:39 crc kubenswrapper[4881]: I1211 08:38:39.436005 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-engine-557bc7cb47-8hntq" Dec 11 08:38:39 crc kubenswrapper[4881]: I1211 08:38:39.443057 4881 generic.go:334] "Generic (PLEG): container finished" podID="8ae1990b-5d22-4cb7-ace7-92acddc6df35" containerID="4840d348945bd8796ccac6a5b0d54d1e4143b7cea5f2a1f61918a38dfecec705" exitCode=0 Dec 11 08:38:39 crc kubenswrapper[4881]: I1211 08:38:39.443121 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-f6bc4c6c9-4r7pn" event={"ID":"8ae1990b-5d22-4cb7-ace7-92acddc6df35","Type":"ContainerDied","Data":"4840d348945bd8796ccac6a5b0d54d1e4143b7cea5f2a1f61918a38dfecec705"} Dec 11 08:38:39 crc kubenswrapper[4881]: I1211 08:38:39.457931 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-api-0" podStartSLOduration=9.45790755 podStartE2EDuration="9.45790755s" podCreationTimestamp="2025-12-11 08:38:30 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 08:38:39.446229976 +0000 UTC m=+1367.823598693" watchObservedRunningTime="2025-12-11 08:38:39.45790755 +0000 UTC m=+1367.835276237" Dec 11 08:38:39 crc kubenswrapper[4881]: I1211 08:38:39.462187 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"a982ebde-b33a-4397-bd71-34da9b1efc2e","Type":"ContainerStarted","Data":"945304d81117ec5b4b769570dff0eab198d9721114145fd8e56e910043d30a80"} Dec 11 08:38:39 crc kubenswrapper[4881]: I1211 08:38:39.525524 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-engine-557bc7cb47-8hntq" podStartSLOduration=5.525505174 podStartE2EDuration="5.525505174s" podCreationTimestamp="2025-12-11 08:38:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 08:38:39.469137023 +0000 UTC m=+1367.846505720" watchObservedRunningTime="2025-12-11 08:38:39.525505174 +0000 UTC m=+1367.902873871" Dec 11 08:38:39 crc kubenswrapper[4881]: I1211 08:38:39.591251 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5784cf869f-2hhvl" Dec 11 08:38:39 crc kubenswrapper[4881]: I1211 08:38:39.682236 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/efb1e391-d2a3-45e1-b5c8-9075a353bda5-ovsdbserver-sb\") pod \"efb1e391-d2a3-45e1-b5c8-9075a353bda5\" (UID: \"efb1e391-d2a3-45e1-b5c8-9075a353bda5\") " Dec 11 08:38:39 crc kubenswrapper[4881]: I1211 08:38:39.682358 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-j78wt\" (UniqueName: \"kubernetes.io/projected/efb1e391-d2a3-45e1-b5c8-9075a353bda5-kube-api-access-j78wt\") pod \"efb1e391-d2a3-45e1-b5c8-9075a353bda5\" (UID: \"efb1e391-d2a3-45e1-b5c8-9075a353bda5\") " Dec 11 08:38:39 crc kubenswrapper[4881]: I1211 08:38:39.682406 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/efb1e391-d2a3-45e1-b5c8-9075a353bda5-config\") pod \"efb1e391-d2a3-45e1-b5c8-9075a353bda5\" (UID: \"efb1e391-d2a3-45e1-b5c8-9075a353bda5\") " Dec 11 08:38:39 crc kubenswrapper[4881]: I1211 08:38:39.682535 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/efb1e391-d2a3-45e1-b5c8-9075a353bda5-dns-swift-storage-0\") pod \"efb1e391-d2a3-45e1-b5c8-9075a353bda5\" (UID: \"efb1e391-d2a3-45e1-b5c8-9075a353bda5\") " Dec 11 08:38:39 crc kubenswrapper[4881]: I1211 08:38:39.682574 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/efb1e391-d2a3-45e1-b5c8-9075a353bda5-ovsdbserver-nb\") pod \"efb1e391-d2a3-45e1-b5c8-9075a353bda5\" (UID: \"efb1e391-d2a3-45e1-b5c8-9075a353bda5\") " Dec 11 08:38:39 crc kubenswrapper[4881]: I1211 08:38:39.682663 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/efb1e391-d2a3-45e1-b5c8-9075a353bda5-dns-svc\") pod \"efb1e391-d2a3-45e1-b5c8-9075a353bda5\" (UID: \"efb1e391-d2a3-45e1-b5c8-9075a353bda5\") " Dec 11 08:38:39 crc kubenswrapper[4881]: I1211 08:38:39.703505 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/efb1e391-d2a3-45e1-b5c8-9075a353bda5-kube-api-access-j78wt" (OuterVolumeSpecName: "kube-api-access-j78wt") pod "efb1e391-d2a3-45e1-b5c8-9075a353bda5" (UID: "efb1e391-d2a3-45e1-b5c8-9075a353bda5"). InnerVolumeSpecName "kube-api-access-j78wt". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:38:39 crc kubenswrapper[4881]: I1211 08:38:39.785225 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-j78wt\" (UniqueName: \"kubernetes.io/projected/efb1e391-d2a3-45e1-b5c8-9075a353bda5-kube-api-access-j78wt\") on node \"crc\" DevicePath \"\"" Dec 11 08:38:39 crc kubenswrapper[4881]: I1211 08:38:39.892018 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/efb1e391-d2a3-45e1-b5c8-9075a353bda5-config" (OuterVolumeSpecName: "config") pod "efb1e391-d2a3-45e1-b5c8-9075a353bda5" (UID: "efb1e391-d2a3-45e1-b5c8-9075a353bda5"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:38:39 crc kubenswrapper[4881]: I1211 08:38:39.920434 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/efb1e391-d2a3-45e1-b5c8-9075a353bda5-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "efb1e391-d2a3-45e1-b5c8-9075a353bda5" (UID: "efb1e391-d2a3-45e1-b5c8-9075a353bda5"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:38:39 crc kubenswrapper[4881]: I1211 08:38:39.934466 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/efb1e391-d2a3-45e1-b5c8-9075a353bda5-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "efb1e391-d2a3-45e1-b5c8-9075a353bda5" (UID: "efb1e391-d2a3-45e1-b5c8-9075a353bda5"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:38:39 crc kubenswrapper[4881]: I1211 08:38:39.942548 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/efb1e391-d2a3-45e1-b5c8-9075a353bda5-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "efb1e391-d2a3-45e1-b5c8-9075a353bda5" (UID: "efb1e391-d2a3-45e1-b5c8-9075a353bda5"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:38:39 crc kubenswrapper[4881]: I1211 08:38:39.987480 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/efb1e391-d2a3-45e1-b5c8-9075a353bda5-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "efb1e391-d2a3-45e1-b5c8-9075a353bda5" (UID: "efb1e391-d2a3-45e1-b5c8-9075a353bda5"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:38:39 crc kubenswrapper[4881]: I1211 08:38:39.988984 4881 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/efb1e391-d2a3-45e1-b5c8-9075a353bda5-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Dec 11 08:38:39 crc kubenswrapper[4881]: I1211 08:38:39.989002 4881 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/efb1e391-d2a3-45e1-b5c8-9075a353bda5-config\") on node \"crc\" DevicePath \"\"" Dec 11 08:38:39 crc kubenswrapper[4881]: I1211 08:38:39.989013 4881 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/efb1e391-d2a3-45e1-b5c8-9075a353bda5-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Dec 11 08:38:39 crc kubenswrapper[4881]: I1211 08:38:39.989027 4881 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/efb1e391-d2a3-45e1-b5c8-9075a353bda5-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Dec 11 08:38:39 crc kubenswrapper[4881]: I1211 08:38:39.989037 4881 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/efb1e391-d2a3-45e1-b5c8-9075a353bda5-dns-svc\") on node \"crc\" DevicePath \"\"" Dec 11 08:38:40 crc kubenswrapper[4881]: I1211 08:38:40.210129 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-proxy-5f6c547b6c-rjk9h"] Dec 11 08:38:40 crc kubenswrapper[4881]: W1211 08:38:40.225917 4881 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod910014af_7b9e_49b8_99e3_b80a15d72faf.slice/crio-f9c2cda567e9d34d927021029c1a67cac8edb7dae8cec44ce8a57548c53b8ce1 WatchSource:0}: Error finding container f9c2cda567e9d34d927021029c1a67cac8edb7dae8cec44ce8a57548c53b8ce1: Status 404 returned error can't find the container with id f9c2cda567e9d34d927021029c1a67cac8edb7dae8cec44ce8a57548c53b8ce1 Dec 11 08:38:40 crc kubenswrapper[4881]: I1211 08:38:40.483484 4881 generic.go:334] "Generic (PLEG): container finished" podID="93406a4d-f987-4191-acf2-4a5d1fa63457" containerID="9d19c0500ec624aac640e79d51ba54d0fb3f157e17dc5f8d84035c30655a9031" exitCode=0 Dec 11 08:38:40 crc kubenswrapper[4881]: I1211 08:38:40.483905 4881 generic.go:334] "Generic (PLEG): container finished" podID="93406a4d-f987-4191-acf2-4a5d1fa63457" containerID="32f970ae0914dac738470b0ee696139fc2cd4a575cff1e87f454798e3115a6ee" exitCode=143 Dec 11 08:38:40 crc kubenswrapper[4881]: I1211 08:38:40.483569 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"93406a4d-f987-4191-acf2-4a5d1fa63457","Type":"ContainerDied","Data":"9d19c0500ec624aac640e79d51ba54d0fb3f157e17dc5f8d84035c30655a9031"} Dec 11 08:38:40 crc kubenswrapper[4881]: I1211 08:38:40.484077 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"93406a4d-f987-4191-acf2-4a5d1fa63457","Type":"ContainerDied","Data":"32f970ae0914dac738470b0ee696139fc2cd4a575cff1e87f454798e3115a6ee"} Dec 11 08:38:40 crc kubenswrapper[4881]: I1211 08:38:40.489652 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-5f6c547b6c-rjk9h" event={"ID":"910014af-7b9e-49b8-99e3-b80a15d72faf","Type":"ContainerStarted","Data":"f9c2cda567e9d34d927021029c1a67cac8edb7dae8cec44ce8a57548c53b8ce1"} Dec 11 08:38:40 crc kubenswrapper[4881]: I1211 08:38:40.494290 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5784cf869f-2hhvl" Dec 11 08:38:40 crc kubenswrapper[4881]: I1211 08:38:40.498512 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5784cf869f-2hhvl" event={"ID":"efb1e391-d2a3-45e1-b5c8-9075a353bda5","Type":"ContainerDied","Data":"85ee7ec7a41d6480a753746cfb3de6b63db9ecd0e50ec8638cd291f07e41ee5a"} Dec 11 08:38:40 crc kubenswrapper[4881]: I1211 08:38:40.506559 4881 scope.go:117] "RemoveContainer" containerID="75cb17e2a2180f1327dcf0e8db156c8441f2b0dc5f09cec942f838bffef70ccb" Dec 11 08:38:40 crc kubenswrapper[4881]: I1211 08:38:40.544392 4881 scope.go:117] "RemoveContainer" containerID="4848573dfa50cbcfe9d09f59e39f1d3bb7ac710e7cc19adef0b3bf6d1aaf2e77" Dec 11 08:38:40 crc kubenswrapper[4881]: I1211 08:38:40.545246 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5784cf869f-2hhvl"] Dec 11 08:38:40 crc kubenswrapper[4881]: I1211 08:38:40.557784 4881 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5784cf869f-2hhvl"] Dec 11 08:38:41 crc kubenswrapper[4881]: I1211 08:38:41.021005 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="efb1e391-d2a3-45e1-b5c8-9075a353bda5" path="/var/lib/kubelet/pods/efb1e391-d2a3-45e1-b5c8-9075a353bda5/volumes" Dec 11 08:38:41 crc kubenswrapper[4881]: I1211 08:38:41.507829 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-5f6c547b6c-rjk9h" event={"ID":"910014af-7b9e-49b8-99e3-b80a15d72faf","Type":"ContainerStarted","Data":"4bf89854425e5122c3eef5b58c251d3234474e437b978c75d18aef8a17d962b9"} Dec 11 08:38:41 crc kubenswrapper[4881]: I1211 08:38:41.514271 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-f6bc4c6c9-4r7pn" event={"ID":"8ae1990b-5d22-4cb7-ace7-92acddc6df35","Type":"ContainerStarted","Data":"3e1e885c89156a7d385bcec17fd37c76098b23f9a0ffd2288225cae75a0514cd"} Dec 11 08:38:41 crc kubenswrapper[4881]: I1211 08:38:41.516878 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-f6bc4c6c9-4r7pn" Dec 11 08:38:41 crc kubenswrapper[4881]: I1211 08:38:41.521388 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"a982ebde-b33a-4397-bd71-34da9b1efc2e","Type":"ContainerStarted","Data":"509bf974b82628f86f012c6a7b20eadef9d160c2075eb853f9a1d8dfb4857bc9"} Dec 11 08:38:41 crc kubenswrapper[4881]: I1211 08:38:41.535143 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-f6bc4c6c9-4r7pn" podStartSLOduration=6.53512623 podStartE2EDuration="6.53512623s" podCreationTimestamp="2025-12-11 08:38:35 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 08:38:41.530844601 +0000 UTC m=+1369.908213308" watchObservedRunningTime="2025-12-11 08:38:41.53512623 +0000 UTC m=+1369.912494927" Dec 11 08:38:41 crc kubenswrapper[4881]: I1211 08:38:41.557015 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-scheduler-0" podStartSLOduration=7.085461231 podStartE2EDuration="11.55699663s" podCreationTimestamp="2025-12-11 08:38:30 +0000 UTC" firstStartedPulling="2025-12-11 08:38:31.212649242 +0000 UTC m=+1359.590017939" lastFinishedPulling="2025-12-11 08:38:35.684184641 +0000 UTC m=+1364.061553338" observedRunningTime="2025-12-11 08:38:41.551669966 +0000 UTC m=+1369.929038673" watchObservedRunningTime="2025-12-11 08:38:41.55699663 +0000 UTC m=+1369.934365327" Dec 11 08:38:41 crc kubenswrapper[4881]: I1211 08:38:41.677061 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Dec 11 08:38:41 crc kubenswrapper[4881]: I1211 08:38:41.677415 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="0074c6f2-5d03-406d-a8a3-19f87e5980d8" containerName="ceilometer-notification-agent" containerID="cri-o://cfbaa890eb39174cd60e497974aeb6e480aa79256fdd9afb6abd5dfc43862e10" gracePeriod=30 Dec 11 08:38:41 crc kubenswrapper[4881]: I1211 08:38:41.677889 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="0074c6f2-5d03-406d-a8a3-19f87e5980d8" containerName="ceilometer-central-agent" containerID="cri-o://a950c6b7c7510594a27d4cb885b3933b226c40a3677f64a01bbf119ce5d9e0ee" gracePeriod=30 Dec 11 08:38:41 crc kubenswrapper[4881]: I1211 08:38:41.677972 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="0074c6f2-5d03-406d-a8a3-19f87e5980d8" containerName="proxy-httpd" containerID="cri-o://5717d0de5676b96297cf2bd65ce4a4be8677cc71f4e9a1b7ec795e2d09a43453" gracePeriod=30 Dec 11 08:38:41 crc kubenswrapper[4881]: I1211 08:38:41.678031 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="0074c6f2-5d03-406d-a8a3-19f87e5980d8" containerName="sg-core" containerID="cri-o://fc0e0f13955e8f26f942126958994aa607d18e71140c9af36d62770bcb6b8588" gracePeriod=30 Dec 11 08:38:41 crc kubenswrapper[4881]: I1211 08:38:41.687377 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Dec 11 08:38:42 crc kubenswrapper[4881]: I1211 08:38:42.536879 4881 generic.go:334] "Generic (PLEG): container finished" podID="0074c6f2-5d03-406d-a8a3-19f87e5980d8" containerID="fc0e0f13955e8f26f942126958994aa607d18e71140c9af36d62770bcb6b8588" exitCode=2 Dec 11 08:38:42 crc kubenswrapper[4881]: I1211 08:38:42.537573 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0074c6f2-5d03-406d-a8a3-19f87e5980d8","Type":"ContainerDied","Data":"fc0e0f13955e8f26f942126958994aa607d18e71140c9af36d62770bcb6b8588"} Dec 11 08:38:43 crc kubenswrapper[4881]: I1211 08:38:43.082739 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Dec 11 08:38:43 crc kubenswrapper[4881]: I1211 08:38:43.083312 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="fb741205-ce03-4c1d-9181-b2efc3c92319" containerName="glance-log" containerID="cri-o://c4006b0e738d45d323e12ce6001a2110fff8d26a20bbcb970da9a399dd8e3e62" gracePeriod=30 Dec 11 08:38:43 crc kubenswrapper[4881]: I1211 08:38:43.083442 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="fb741205-ce03-4c1d-9181-b2efc3c92319" containerName="glance-httpd" containerID="cri-o://e63123dac3e0c7900898c6e9b6eb12295de5e928c5abc16df766d433bbaf73cb" gracePeriod=30 Dec 11 08:38:43 crc kubenswrapper[4881]: I1211 08:38:43.376493 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-engine-d56bd9469-fm4xb"] Dec 11 08:38:43 crc kubenswrapper[4881]: E1211 08:38:43.377825 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="efb1e391-d2a3-45e1-b5c8-9075a353bda5" containerName="init" Dec 11 08:38:43 crc kubenswrapper[4881]: I1211 08:38:43.377843 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="efb1e391-d2a3-45e1-b5c8-9075a353bda5" containerName="init" Dec 11 08:38:43 crc kubenswrapper[4881]: E1211 08:38:43.377873 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="efb1e391-d2a3-45e1-b5c8-9075a353bda5" containerName="dnsmasq-dns" Dec 11 08:38:43 crc kubenswrapper[4881]: I1211 08:38:43.377882 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="efb1e391-d2a3-45e1-b5c8-9075a353bda5" containerName="dnsmasq-dns" Dec 11 08:38:43 crc kubenswrapper[4881]: I1211 08:38:43.378432 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="efb1e391-d2a3-45e1-b5c8-9075a353bda5" containerName="dnsmasq-dns" Dec 11 08:38:43 crc kubenswrapper[4881]: I1211 08:38:43.379554 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-engine-d56bd9469-fm4xb" Dec 11 08:38:43 crc kubenswrapper[4881]: I1211 08:38:43.428705 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r9vp7\" (UniqueName: \"kubernetes.io/projected/8f8f4fc0-e759-433b-835a-f2c0db79850f-kube-api-access-r9vp7\") pod \"heat-engine-d56bd9469-fm4xb\" (UID: \"8f8f4fc0-e759-433b-835a-f2c0db79850f\") " pod="openstack/heat-engine-d56bd9469-fm4xb" Dec 11 08:38:43 crc kubenswrapper[4881]: I1211 08:38:43.428820 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8f8f4fc0-e759-433b-835a-f2c0db79850f-config-data\") pod \"heat-engine-d56bd9469-fm4xb\" (UID: \"8f8f4fc0-e759-433b-835a-f2c0db79850f\") " pod="openstack/heat-engine-d56bd9469-fm4xb" Dec 11 08:38:43 crc kubenswrapper[4881]: I1211 08:38:43.428892 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8f8f4fc0-e759-433b-835a-f2c0db79850f-combined-ca-bundle\") pod \"heat-engine-d56bd9469-fm4xb\" (UID: \"8f8f4fc0-e759-433b-835a-f2c0db79850f\") " pod="openstack/heat-engine-d56bd9469-fm4xb" Dec 11 08:38:43 crc kubenswrapper[4881]: I1211 08:38:43.428979 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/8f8f4fc0-e759-433b-835a-f2c0db79850f-config-data-custom\") pod \"heat-engine-d56bd9469-fm4xb\" (UID: \"8f8f4fc0-e759-433b-835a-f2c0db79850f\") " pod="openstack/heat-engine-d56bd9469-fm4xb" Dec 11 08:38:43 crc kubenswrapper[4881]: I1211 08:38:43.465365 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-engine-d56bd9469-fm4xb"] Dec 11 08:38:43 crc kubenswrapper[4881]: I1211 08:38:43.538656 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-cfnapi-bd877888f-jfg9m"] Dec 11 08:38:43 crc kubenswrapper[4881]: I1211 08:38:43.540396 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-bd877888f-jfg9m" Dec 11 08:38:43 crc kubenswrapper[4881]: I1211 08:38:43.563195 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-api-7b446c9cf4-8fv29"] Dec 11 08:38:43 crc kubenswrapper[4881]: I1211 08:38:43.563156 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r9vp7\" (UniqueName: \"kubernetes.io/projected/8f8f4fc0-e759-433b-835a-f2c0db79850f-kube-api-access-r9vp7\") pod \"heat-engine-d56bd9469-fm4xb\" (UID: \"8f8f4fc0-e759-433b-835a-f2c0db79850f\") " pod="openstack/heat-engine-d56bd9469-fm4xb" Dec 11 08:38:43 crc kubenswrapper[4881]: I1211 08:38:43.563399 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8f8f4fc0-e759-433b-835a-f2c0db79850f-config-data\") pod \"heat-engine-d56bd9469-fm4xb\" (UID: \"8f8f4fc0-e759-433b-835a-f2c0db79850f\") " pod="openstack/heat-engine-d56bd9469-fm4xb" Dec 11 08:38:43 crc kubenswrapper[4881]: I1211 08:38:43.564211 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8f8f4fc0-e759-433b-835a-f2c0db79850f-combined-ca-bundle\") pod \"heat-engine-d56bd9469-fm4xb\" (UID: \"8f8f4fc0-e759-433b-835a-f2c0db79850f\") " pod="openstack/heat-engine-d56bd9469-fm4xb" Dec 11 08:38:43 crc kubenswrapper[4881]: I1211 08:38:43.564290 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/8f8f4fc0-e759-433b-835a-f2c0db79850f-config-data-custom\") pod \"heat-engine-d56bd9469-fm4xb\" (UID: \"8f8f4fc0-e759-433b-835a-f2c0db79850f\") " pod="openstack/heat-engine-d56bd9469-fm4xb" Dec 11 08:38:43 crc kubenswrapper[4881]: I1211 08:38:43.564749 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-7b446c9cf4-8fv29" Dec 11 08:38:43 crc kubenswrapper[4881]: I1211 08:38:43.573860 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8f8f4fc0-e759-433b-835a-f2c0db79850f-combined-ca-bundle\") pod \"heat-engine-d56bd9469-fm4xb\" (UID: \"8f8f4fc0-e759-433b-835a-f2c0db79850f\") " pod="openstack/heat-engine-d56bd9469-fm4xb" Dec 11 08:38:43 crc kubenswrapper[4881]: I1211 08:38:43.575760 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/8f8f4fc0-e759-433b-835a-f2c0db79850f-config-data-custom\") pod \"heat-engine-d56bd9469-fm4xb\" (UID: \"8f8f4fc0-e759-433b-835a-f2c0db79850f\") " pod="openstack/heat-engine-d56bd9469-fm4xb" Dec 11 08:38:43 crc kubenswrapper[4881]: I1211 08:38:43.576712 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8f8f4fc0-e759-433b-835a-f2c0db79850f-config-data\") pod \"heat-engine-d56bd9469-fm4xb\" (UID: \"8f8f4fc0-e759-433b-835a-f2c0db79850f\") " pod="openstack/heat-engine-d56bd9469-fm4xb" Dec 11 08:38:43 crc kubenswrapper[4881]: I1211 08:38:43.591047 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r9vp7\" (UniqueName: \"kubernetes.io/projected/8f8f4fc0-e759-433b-835a-f2c0db79850f-kube-api-access-r9vp7\") pod \"heat-engine-d56bd9469-fm4xb\" (UID: \"8f8f4fc0-e759-433b-835a-f2c0db79850f\") " pod="openstack/heat-engine-d56bd9469-fm4xb" Dec 11 08:38:43 crc kubenswrapper[4881]: I1211 08:38:43.602435 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-cfnapi-bd877888f-jfg9m"] Dec 11 08:38:43 crc kubenswrapper[4881]: I1211 08:38:43.623421 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-api-7b446c9cf4-8fv29"] Dec 11 08:38:43 crc kubenswrapper[4881]: I1211 08:38:43.668436 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8d89p\" (UniqueName: \"kubernetes.io/projected/d5902bc2-d738-4bab-9b99-78b827e3a003-kube-api-access-8d89p\") pod \"heat-cfnapi-bd877888f-jfg9m\" (UID: \"d5902bc2-d738-4bab-9b99-78b827e3a003\") " pod="openstack/heat-cfnapi-bd877888f-jfg9m" Dec 11 08:38:43 crc kubenswrapper[4881]: I1211 08:38:43.668566 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d5902bc2-d738-4bab-9b99-78b827e3a003-config-data\") pod \"heat-cfnapi-bd877888f-jfg9m\" (UID: \"d5902bc2-d738-4bab-9b99-78b827e3a003\") " pod="openstack/heat-cfnapi-bd877888f-jfg9m" Dec 11 08:38:43 crc kubenswrapper[4881]: I1211 08:38:43.668584 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7eca07f6-40f2-48b2-90fe-b5b9b332707f-config-data\") pod \"heat-api-7b446c9cf4-8fv29\" (UID: \"7eca07f6-40f2-48b2-90fe-b5b9b332707f\") " pod="openstack/heat-api-7b446c9cf4-8fv29" Dec 11 08:38:43 crc kubenswrapper[4881]: I1211 08:38:43.668635 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d5902bc2-d738-4bab-9b99-78b827e3a003-config-data-custom\") pod \"heat-cfnapi-bd877888f-jfg9m\" (UID: \"d5902bc2-d738-4bab-9b99-78b827e3a003\") " pod="openstack/heat-cfnapi-bd877888f-jfg9m" Dec 11 08:38:43 crc kubenswrapper[4881]: I1211 08:38:43.668679 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lw5kk\" (UniqueName: \"kubernetes.io/projected/7eca07f6-40f2-48b2-90fe-b5b9b332707f-kube-api-access-lw5kk\") pod \"heat-api-7b446c9cf4-8fv29\" (UID: \"7eca07f6-40f2-48b2-90fe-b5b9b332707f\") " pod="openstack/heat-api-7b446c9cf4-8fv29" Dec 11 08:38:43 crc kubenswrapper[4881]: I1211 08:38:43.668700 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d5902bc2-d738-4bab-9b99-78b827e3a003-combined-ca-bundle\") pod \"heat-cfnapi-bd877888f-jfg9m\" (UID: \"d5902bc2-d738-4bab-9b99-78b827e3a003\") " pod="openstack/heat-cfnapi-bd877888f-jfg9m" Dec 11 08:38:43 crc kubenswrapper[4881]: I1211 08:38:43.668716 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/7eca07f6-40f2-48b2-90fe-b5b9b332707f-config-data-custom\") pod \"heat-api-7b446c9cf4-8fv29\" (UID: \"7eca07f6-40f2-48b2-90fe-b5b9b332707f\") " pod="openstack/heat-api-7b446c9cf4-8fv29" Dec 11 08:38:43 crc kubenswrapper[4881]: I1211 08:38:43.668741 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7eca07f6-40f2-48b2-90fe-b5b9b332707f-combined-ca-bundle\") pod \"heat-api-7b446c9cf4-8fv29\" (UID: \"7eca07f6-40f2-48b2-90fe-b5b9b332707f\") " pod="openstack/heat-api-7b446c9cf4-8fv29" Dec 11 08:38:43 crc kubenswrapper[4881]: I1211 08:38:43.712520 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-engine-d56bd9469-fm4xb" Dec 11 08:38:43 crc kubenswrapper[4881]: I1211 08:38:43.772879 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8d89p\" (UniqueName: \"kubernetes.io/projected/d5902bc2-d738-4bab-9b99-78b827e3a003-kube-api-access-8d89p\") pod \"heat-cfnapi-bd877888f-jfg9m\" (UID: \"d5902bc2-d738-4bab-9b99-78b827e3a003\") " pod="openstack/heat-cfnapi-bd877888f-jfg9m" Dec 11 08:38:43 crc kubenswrapper[4881]: I1211 08:38:43.773221 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d5902bc2-d738-4bab-9b99-78b827e3a003-config-data\") pod \"heat-cfnapi-bd877888f-jfg9m\" (UID: \"d5902bc2-d738-4bab-9b99-78b827e3a003\") " pod="openstack/heat-cfnapi-bd877888f-jfg9m" Dec 11 08:38:43 crc kubenswrapper[4881]: I1211 08:38:43.773239 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7eca07f6-40f2-48b2-90fe-b5b9b332707f-config-data\") pod \"heat-api-7b446c9cf4-8fv29\" (UID: \"7eca07f6-40f2-48b2-90fe-b5b9b332707f\") " pod="openstack/heat-api-7b446c9cf4-8fv29" Dec 11 08:38:43 crc kubenswrapper[4881]: I1211 08:38:43.773302 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d5902bc2-d738-4bab-9b99-78b827e3a003-config-data-custom\") pod \"heat-cfnapi-bd877888f-jfg9m\" (UID: \"d5902bc2-d738-4bab-9b99-78b827e3a003\") " pod="openstack/heat-cfnapi-bd877888f-jfg9m" Dec 11 08:38:43 crc kubenswrapper[4881]: I1211 08:38:43.773362 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lw5kk\" (UniqueName: \"kubernetes.io/projected/7eca07f6-40f2-48b2-90fe-b5b9b332707f-kube-api-access-lw5kk\") pod \"heat-api-7b446c9cf4-8fv29\" (UID: \"7eca07f6-40f2-48b2-90fe-b5b9b332707f\") " pod="openstack/heat-api-7b446c9cf4-8fv29" Dec 11 08:38:43 crc kubenswrapper[4881]: I1211 08:38:43.773382 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d5902bc2-d738-4bab-9b99-78b827e3a003-combined-ca-bundle\") pod \"heat-cfnapi-bd877888f-jfg9m\" (UID: \"d5902bc2-d738-4bab-9b99-78b827e3a003\") " pod="openstack/heat-cfnapi-bd877888f-jfg9m" Dec 11 08:38:43 crc kubenswrapper[4881]: I1211 08:38:43.773418 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/7eca07f6-40f2-48b2-90fe-b5b9b332707f-config-data-custom\") pod \"heat-api-7b446c9cf4-8fv29\" (UID: \"7eca07f6-40f2-48b2-90fe-b5b9b332707f\") " pod="openstack/heat-api-7b446c9cf4-8fv29" Dec 11 08:38:43 crc kubenswrapper[4881]: I1211 08:38:43.773448 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7eca07f6-40f2-48b2-90fe-b5b9b332707f-combined-ca-bundle\") pod \"heat-api-7b446c9cf4-8fv29\" (UID: \"7eca07f6-40f2-48b2-90fe-b5b9b332707f\") " pod="openstack/heat-api-7b446c9cf4-8fv29" Dec 11 08:38:43 crc kubenswrapper[4881]: I1211 08:38:43.782459 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d5902bc2-d738-4bab-9b99-78b827e3a003-config-data\") pod \"heat-cfnapi-bd877888f-jfg9m\" (UID: \"d5902bc2-d738-4bab-9b99-78b827e3a003\") " pod="openstack/heat-cfnapi-bd877888f-jfg9m" Dec 11 08:38:43 crc kubenswrapper[4881]: I1211 08:38:43.785696 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7eca07f6-40f2-48b2-90fe-b5b9b332707f-combined-ca-bundle\") pod \"heat-api-7b446c9cf4-8fv29\" (UID: \"7eca07f6-40f2-48b2-90fe-b5b9b332707f\") " pod="openstack/heat-api-7b446c9cf4-8fv29" Dec 11 08:38:43 crc kubenswrapper[4881]: I1211 08:38:43.786104 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d5902bc2-d738-4bab-9b99-78b827e3a003-config-data-custom\") pod \"heat-cfnapi-bd877888f-jfg9m\" (UID: \"d5902bc2-d738-4bab-9b99-78b827e3a003\") " pod="openstack/heat-cfnapi-bd877888f-jfg9m" Dec 11 08:38:43 crc kubenswrapper[4881]: I1211 08:38:43.790001 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d5902bc2-d738-4bab-9b99-78b827e3a003-combined-ca-bundle\") pod \"heat-cfnapi-bd877888f-jfg9m\" (UID: \"d5902bc2-d738-4bab-9b99-78b827e3a003\") " pod="openstack/heat-cfnapi-bd877888f-jfg9m" Dec 11 08:38:43 crc kubenswrapper[4881]: I1211 08:38:43.790173 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/7eca07f6-40f2-48b2-90fe-b5b9b332707f-config-data-custom\") pod \"heat-api-7b446c9cf4-8fv29\" (UID: \"7eca07f6-40f2-48b2-90fe-b5b9b332707f\") " pod="openstack/heat-api-7b446c9cf4-8fv29" Dec 11 08:38:43 crc kubenswrapper[4881]: I1211 08:38:43.794406 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8d89p\" (UniqueName: \"kubernetes.io/projected/d5902bc2-d738-4bab-9b99-78b827e3a003-kube-api-access-8d89p\") pod \"heat-cfnapi-bd877888f-jfg9m\" (UID: \"d5902bc2-d738-4bab-9b99-78b827e3a003\") " pod="openstack/heat-cfnapi-bd877888f-jfg9m" Dec 11 08:38:43 crc kubenswrapper[4881]: I1211 08:38:43.799675 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lw5kk\" (UniqueName: \"kubernetes.io/projected/7eca07f6-40f2-48b2-90fe-b5b9b332707f-kube-api-access-lw5kk\") pod \"heat-api-7b446c9cf4-8fv29\" (UID: \"7eca07f6-40f2-48b2-90fe-b5b9b332707f\") " pod="openstack/heat-api-7b446c9cf4-8fv29" Dec 11 08:38:43 crc kubenswrapper[4881]: I1211 08:38:43.801137 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7eca07f6-40f2-48b2-90fe-b5b9b332707f-config-data\") pod \"heat-api-7b446c9cf4-8fv29\" (UID: \"7eca07f6-40f2-48b2-90fe-b5b9b332707f\") " pod="openstack/heat-api-7b446c9cf4-8fv29" Dec 11 08:38:43 crc kubenswrapper[4881]: I1211 08:38:43.867171 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-bd877888f-jfg9m" Dec 11 08:38:43 crc kubenswrapper[4881]: I1211 08:38:43.991485 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-7b446c9cf4-8fv29" Dec 11 08:38:44 crc kubenswrapper[4881]: I1211 08:38:44.581361 4881 generic.go:334] "Generic (PLEG): container finished" podID="0074c6f2-5d03-406d-a8a3-19f87e5980d8" containerID="5717d0de5676b96297cf2bd65ce4a4be8677cc71f4e9a1b7ec795e2d09a43453" exitCode=0 Dec 11 08:38:44 crc kubenswrapper[4881]: I1211 08:38:44.581380 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0074c6f2-5d03-406d-a8a3-19f87e5980d8","Type":"ContainerDied","Data":"5717d0de5676b96297cf2bd65ce4a4be8677cc71f4e9a1b7ec795e2d09a43453"} Dec 11 08:38:44 crc kubenswrapper[4881]: I1211 08:38:44.719660 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Dec 11 08:38:44 crc kubenswrapper[4881]: I1211 08:38:44.797284 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/93406a4d-f987-4191-acf2-4a5d1fa63457-scripts\") pod \"93406a4d-f987-4191-acf2-4a5d1fa63457\" (UID: \"93406a4d-f987-4191-acf2-4a5d1fa63457\") " Dec 11 08:38:44 crc kubenswrapper[4881]: I1211 08:38:44.797426 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/93406a4d-f987-4191-acf2-4a5d1fa63457-config-data-custom\") pod \"93406a4d-f987-4191-acf2-4a5d1fa63457\" (UID: \"93406a4d-f987-4191-acf2-4a5d1fa63457\") " Dec 11 08:38:44 crc kubenswrapper[4881]: I1211 08:38:44.797484 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-j7m6l\" (UniqueName: \"kubernetes.io/projected/93406a4d-f987-4191-acf2-4a5d1fa63457-kube-api-access-j7m6l\") pod \"93406a4d-f987-4191-acf2-4a5d1fa63457\" (UID: \"93406a4d-f987-4191-acf2-4a5d1fa63457\") " Dec 11 08:38:44 crc kubenswrapper[4881]: I1211 08:38:44.797507 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/93406a4d-f987-4191-acf2-4a5d1fa63457-etc-machine-id\") pod \"93406a4d-f987-4191-acf2-4a5d1fa63457\" (UID: \"93406a4d-f987-4191-acf2-4a5d1fa63457\") " Dec 11 08:38:44 crc kubenswrapper[4881]: I1211 08:38:44.797634 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/93406a4d-f987-4191-acf2-4a5d1fa63457-combined-ca-bundle\") pod \"93406a4d-f987-4191-acf2-4a5d1fa63457\" (UID: \"93406a4d-f987-4191-acf2-4a5d1fa63457\") " Dec 11 08:38:44 crc kubenswrapper[4881]: I1211 08:38:44.797685 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/93406a4d-f987-4191-acf2-4a5d1fa63457-logs\") pod \"93406a4d-f987-4191-acf2-4a5d1fa63457\" (UID: \"93406a4d-f987-4191-acf2-4a5d1fa63457\") " Dec 11 08:38:44 crc kubenswrapper[4881]: I1211 08:38:44.797726 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/93406a4d-f987-4191-acf2-4a5d1fa63457-config-data\") pod \"93406a4d-f987-4191-acf2-4a5d1fa63457\" (UID: \"93406a4d-f987-4191-acf2-4a5d1fa63457\") " Dec 11 08:38:44 crc kubenswrapper[4881]: I1211 08:38:44.797971 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/93406a4d-f987-4191-acf2-4a5d1fa63457-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "93406a4d-f987-4191-acf2-4a5d1fa63457" (UID: "93406a4d-f987-4191-acf2-4a5d1fa63457"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 11 08:38:44 crc kubenswrapper[4881]: I1211 08:38:44.798352 4881 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/93406a4d-f987-4191-acf2-4a5d1fa63457-etc-machine-id\") on node \"crc\" DevicePath \"\"" Dec 11 08:38:44 crc kubenswrapper[4881]: I1211 08:38:44.801193 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/93406a4d-f987-4191-acf2-4a5d1fa63457-scripts" (OuterVolumeSpecName: "scripts") pod "93406a4d-f987-4191-acf2-4a5d1fa63457" (UID: "93406a4d-f987-4191-acf2-4a5d1fa63457"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:38:44 crc kubenswrapper[4881]: I1211 08:38:44.801453 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/93406a4d-f987-4191-acf2-4a5d1fa63457-logs" (OuterVolumeSpecName: "logs") pod "93406a4d-f987-4191-acf2-4a5d1fa63457" (UID: "93406a4d-f987-4191-acf2-4a5d1fa63457"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 08:38:44 crc kubenswrapper[4881]: I1211 08:38:44.813395 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/93406a4d-f987-4191-acf2-4a5d1fa63457-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "93406a4d-f987-4191-acf2-4a5d1fa63457" (UID: "93406a4d-f987-4191-acf2-4a5d1fa63457"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:38:44 crc kubenswrapper[4881]: I1211 08:38:44.813658 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/93406a4d-f987-4191-acf2-4a5d1fa63457-kube-api-access-j7m6l" (OuterVolumeSpecName: "kube-api-access-j7m6l") pod "93406a4d-f987-4191-acf2-4a5d1fa63457" (UID: "93406a4d-f987-4191-acf2-4a5d1fa63457"). InnerVolumeSpecName "kube-api-access-j7m6l". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:38:44 crc kubenswrapper[4881]: I1211 08:38:44.883240 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/93406a4d-f987-4191-acf2-4a5d1fa63457-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "93406a4d-f987-4191-acf2-4a5d1fa63457" (UID: "93406a4d-f987-4191-acf2-4a5d1fa63457"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:38:44 crc kubenswrapper[4881]: I1211 08:38:44.908005 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/93406a4d-f987-4191-acf2-4a5d1fa63457-config-data" (OuterVolumeSpecName: "config-data") pod "93406a4d-f987-4191-acf2-4a5d1fa63457" (UID: "93406a4d-f987-4191-acf2-4a5d1fa63457"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:38:44 crc kubenswrapper[4881]: I1211 08:38:44.908861 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-j7m6l\" (UniqueName: \"kubernetes.io/projected/93406a4d-f987-4191-acf2-4a5d1fa63457-kube-api-access-j7m6l\") on node \"crc\" DevicePath \"\"" Dec 11 08:38:44 crc kubenswrapper[4881]: I1211 08:38:44.908901 4881 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/93406a4d-f987-4191-acf2-4a5d1fa63457-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 11 08:38:44 crc kubenswrapper[4881]: I1211 08:38:44.908913 4881 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/93406a4d-f987-4191-acf2-4a5d1fa63457-logs\") on node \"crc\" DevicePath \"\"" Dec 11 08:38:44 crc kubenswrapper[4881]: I1211 08:38:44.908950 4881 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/93406a4d-f987-4191-acf2-4a5d1fa63457-config-data\") on node \"crc\" DevicePath \"\"" Dec 11 08:38:44 crc kubenswrapper[4881]: I1211 08:38:44.908961 4881 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/93406a4d-f987-4191-acf2-4a5d1fa63457-scripts\") on node \"crc\" DevicePath \"\"" Dec 11 08:38:44 crc kubenswrapper[4881]: I1211 08:38:44.908974 4881 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/93406a4d-f987-4191-acf2-4a5d1fa63457-config-data-custom\") on node \"crc\" DevicePath \"\"" Dec 11 08:38:45 crc kubenswrapper[4881]: I1211 08:38:45.437178 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-api-5497987457-dkncd"] Dec 11 08:38:45 crc kubenswrapper[4881]: I1211 08:38:45.453498 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-cfnapi-86c494669f-2l5s8"] Dec 11 08:38:45 crc kubenswrapper[4881]: I1211 08:38:45.477353 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-cfnapi-579694df8d-vpn5m"] Dec 11 08:38:45 crc kubenswrapper[4881]: E1211 08:38:45.477935 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="93406a4d-f987-4191-acf2-4a5d1fa63457" containerName="cinder-api-log" Dec 11 08:38:45 crc kubenswrapper[4881]: I1211 08:38:45.477958 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="93406a4d-f987-4191-acf2-4a5d1fa63457" containerName="cinder-api-log" Dec 11 08:38:45 crc kubenswrapper[4881]: E1211 08:38:45.477986 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="93406a4d-f987-4191-acf2-4a5d1fa63457" containerName="cinder-api" Dec 11 08:38:45 crc kubenswrapper[4881]: I1211 08:38:45.477995 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="93406a4d-f987-4191-acf2-4a5d1fa63457" containerName="cinder-api" Dec 11 08:38:45 crc kubenswrapper[4881]: I1211 08:38:45.478348 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="93406a4d-f987-4191-acf2-4a5d1fa63457" containerName="cinder-api-log" Dec 11 08:38:45 crc kubenswrapper[4881]: I1211 08:38:45.478385 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="93406a4d-f987-4191-acf2-4a5d1fa63457" containerName="cinder-api" Dec 11 08:38:45 crc kubenswrapper[4881]: I1211 08:38:45.479403 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-579694df8d-vpn5m" Dec 11 08:38:45 crc kubenswrapper[4881]: I1211 08:38:45.484195 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-heat-cfnapi-internal-svc" Dec 11 08:38:45 crc kubenswrapper[4881]: I1211 08:38:45.484445 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-heat-cfnapi-public-svc" Dec 11 08:38:45 crc kubenswrapper[4881]: I1211 08:38:45.504398 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-cfnapi-579694df8d-vpn5m"] Dec 11 08:38:45 crc kubenswrapper[4881]: I1211 08:38:45.523413 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-api-b5858866f-srtbn"] Dec 11 08:38:45 crc kubenswrapper[4881]: I1211 08:38:45.525624 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-b5858866f-srtbn" Dec 11 08:38:45 crc kubenswrapper[4881]: I1211 08:38:45.525727 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/8bc9eb50-582d-468f-8286-f9a0d3c1def4-internal-tls-certs\") pod \"heat-cfnapi-579694df8d-vpn5m\" (UID: \"8bc9eb50-582d-468f-8286-f9a0d3c1def4\") " pod="openstack/heat-cfnapi-579694df8d-vpn5m" Dec 11 08:38:45 crc kubenswrapper[4881]: I1211 08:38:45.525796 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/8bc9eb50-582d-468f-8286-f9a0d3c1def4-config-data-custom\") pod \"heat-cfnapi-579694df8d-vpn5m\" (UID: \"8bc9eb50-582d-468f-8286-f9a0d3c1def4\") " pod="openstack/heat-cfnapi-579694df8d-vpn5m" Dec 11 08:38:45 crc kubenswrapper[4881]: I1211 08:38:45.525903 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8bc9eb50-582d-468f-8286-f9a0d3c1def4-config-data\") pod \"heat-cfnapi-579694df8d-vpn5m\" (UID: \"8bc9eb50-582d-468f-8286-f9a0d3c1def4\") " pod="openstack/heat-cfnapi-579694df8d-vpn5m" Dec 11 08:38:45 crc kubenswrapper[4881]: I1211 08:38:45.526047 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xv2xp\" (UniqueName: \"kubernetes.io/projected/8bc9eb50-582d-468f-8286-f9a0d3c1def4-kube-api-access-xv2xp\") pod \"heat-cfnapi-579694df8d-vpn5m\" (UID: \"8bc9eb50-582d-468f-8286-f9a0d3c1def4\") " pod="openstack/heat-cfnapi-579694df8d-vpn5m" Dec 11 08:38:45 crc kubenswrapper[4881]: I1211 08:38:45.526111 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8bc9eb50-582d-468f-8286-f9a0d3c1def4-combined-ca-bundle\") pod \"heat-cfnapi-579694df8d-vpn5m\" (UID: \"8bc9eb50-582d-468f-8286-f9a0d3c1def4\") " pod="openstack/heat-cfnapi-579694df8d-vpn5m" Dec 11 08:38:45 crc kubenswrapper[4881]: I1211 08:38:45.526428 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/8bc9eb50-582d-468f-8286-f9a0d3c1def4-public-tls-certs\") pod \"heat-cfnapi-579694df8d-vpn5m\" (UID: \"8bc9eb50-582d-468f-8286-f9a0d3c1def4\") " pod="openstack/heat-cfnapi-579694df8d-vpn5m" Dec 11 08:38:45 crc kubenswrapper[4881]: I1211 08:38:45.532773 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-heat-api-internal-svc" Dec 11 08:38:45 crc kubenswrapper[4881]: I1211 08:38:45.533013 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-heat-api-public-svc" Dec 11 08:38:45 crc kubenswrapper[4881]: I1211 08:38:45.556269 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-api-b5858866f-srtbn"] Dec 11 08:38:45 crc kubenswrapper[4881]: I1211 08:38:45.629490 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xv2xp\" (UniqueName: \"kubernetes.io/projected/8bc9eb50-582d-468f-8286-f9a0d3c1def4-kube-api-access-xv2xp\") pod \"heat-cfnapi-579694df8d-vpn5m\" (UID: \"8bc9eb50-582d-468f-8286-f9a0d3c1def4\") " pod="openstack/heat-cfnapi-579694df8d-vpn5m" Dec 11 08:38:45 crc kubenswrapper[4881]: I1211 08:38:45.629568 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8bc9eb50-582d-468f-8286-f9a0d3c1def4-combined-ca-bundle\") pod \"heat-cfnapi-579694df8d-vpn5m\" (UID: \"8bc9eb50-582d-468f-8286-f9a0d3c1def4\") " pod="openstack/heat-cfnapi-579694df8d-vpn5m" Dec 11 08:38:45 crc kubenswrapper[4881]: I1211 08:38:45.629613 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a466820d-9c8f-4aa5-8e39-485b6212a154-config-data-custom\") pod \"heat-api-b5858866f-srtbn\" (UID: \"a466820d-9c8f-4aa5-8e39-485b6212a154\") " pod="openstack/heat-api-b5858866f-srtbn" Dec 11 08:38:45 crc kubenswrapper[4881]: I1211 08:38:45.629667 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6gwhn\" (UniqueName: \"kubernetes.io/projected/a466820d-9c8f-4aa5-8e39-485b6212a154-kube-api-access-6gwhn\") pod \"heat-api-b5858866f-srtbn\" (UID: \"a466820d-9c8f-4aa5-8e39-485b6212a154\") " pod="openstack/heat-api-b5858866f-srtbn" Dec 11 08:38:45 crc kubenswrapper[4881]: I1211 08:38:45.630380 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a466820d-9c8f-4aa5-8e39-485b6212a154-config-data\") pod \"heat-api-b5858866f-srtbn\" (UID: \"a466820d-9c8f-4aa5-8e39-485b6212a154\") " pod="openstack/heat-api-b5858866f-srtbn" Dec 11 08:38:45 crc kubenswrapper[4881]: I1211 08:38:45.630449 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/a466820d-9c8f-4aa5-8e39-485b6212a154-public-tls-certs\") pod \"heat-api-b5858866f-srtbn\" (UID: \"a466820d-9c8f-4aa5-8e39-485b6212a154\") " pod="openstack/heat-api-b5858866f-srtbn" Dec 11 08:38:45 crc kubenswrapper[4881]: I1211 08:38:45.630582 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/8bc9eb50-582d-468f-8286-f9a0d3c1def4-public-tls-certs\") pod \"heat-cfnapi-579694df8d-vpn5m\" (UID: \"8bc9eb50-582d-468f-8286-f9a0d3c1def4\") " pod="openstack/heat-cfnapi-579694df8d-vpn5m" Dec 11 08:38:45 crc kubenswrapper[4881]: I1211 08:38:45.630610 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/a466820d-9c8f-4aa5-8e39-485b6212a154-internal-tls-certs\") pod \"heat-api-b5858866f-srtbn\" (UID: \"a466820d-9c8f-4aa5-8e39-485b6212a154\") " pod="openstack/heat-api-b5858866f-srtbn" Dec 11 08:38:45 crc kubenswrapper[4881]: I1211 08:38:45.630669 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/8bc9eb50-582d-468f-8286-f9a0d3c1def4-internal-tls-certs\") pod \"heat-cfnapi-579694df8d-vpn5m\" (UID: \"8bc9eb50-582d-468f-8286-f9a0d3c1def4\") " pod="openstack/heat-cfnapi-579694df8d-vpn5m" Dec 11 08:38:45 crc kubenswrapper[4881]: I1211 08:38:45.630701 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/8bc9eb50-582d-468f-8286-f9a0d3c1def4-config-data-custom\") pod \"heat-cfnapi-579694df8d-vpn5m\" (UID: \"8bc9eb50-582d-468f-8286-f9a0d3c1def4\") " pod="openstack/heat-cfnapi-579694df8d-vpn5m" Dec 11 08:38:45 crc kubenswrapper[4881]: I1211 08:38:45.632105 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8bc9eb50-582d-468f-8286-f9a0d3c1def4-config-data\") pod \"heat-cfnapi-579694df8d-vpn5m\" (UID: \"8bc9eb50-582d-468f-8286-f9a0d3c1def4\") " pod="openstack/heat-cfnapi-579694df8d-vpn5m" Dec 11 08:38:45 crc kubenswrapper[4881]: I1211 08:38:45.632199 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a466820d-9c8f-4aa5-8e39-485b6212a154-combined-ca-bundle\") pod \"heat-api-b5858866f-srtbn\" (UID: \"a466820d-9c8f-4aa5-8e39-485b6212a154\") " pod="openstack/heat-api-b5858866f-srtbn" Dec 11 08:38:45 crc kubenswrapper[4881]: I1211 08:38:45.636678 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8bc9eb50-582d-468f-8286-f9a0d3c1def4-combined-ca-bundle\") pod \"heat-cfnapi-579694df8d-vpn5m\" (UID: \"8bc9eb50-582d-468f-8286-f9a0d3c1def4\") " pod="openstack/heat-cfnapi-579694df8d-vpn5m" Dec 11 08:38:45 crc kubenswrapper[4881]: I1211 08:38:45.637747 4881 generic.go:334] "Generic (PLEG): container finished" podID="fb741205-ce03-4c1d-9181-b2efc3c92319" containerID="c4006b0e738d45d323e12ce6001a2110fff8d26a20bbcb970da9a399dd8e3e62" exitCode=143 Dec 11 08:38:45 crc kubenswrapper[4881]: I1211 08:38:45.637827 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"fb741205-ce03-4c1d-9181-b2efc3c92319","Type":"ContainerDied","Data":"c4006b0e738d45d323e12ce6001a2110fff8d26a20bbcb970da9a399dd8e3e62"} Dec 11 08:38:45 crc kubenswrapper[4881]: I1211 08:38:45.638815 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/8bc9eb50-582d-468f-8286-f9a0d3c1def4-public-tls-certs\") pod \"heat-cfnapi-579694df8d-vpn5m\" (UID: \"8bc9eb50-582d-468f-8286-f9a0d3c1def4\") " pod="openstack/heat-cfnapi-579694df8d-vpn5m" Dec 11 08:38:45 crc kubenswrapper[4881]: I1211 08:38:45.640934 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/8bc9eb50-582d-468f-8286-f9a0d3c1def4-internal-tls-certs\") pod \"heat-cfnapi-579694df8d-vpn5m\" (UID: \"8bc9eb50-582d-468f-8286-f9a0d3c1def4\") " pod="openstack/heat-cfnapi-579694df8d-vpn5m" Dec 11 08:38:45 crc kubenswrapper[4881]: I1211 08:38:45.645151 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/8bc9eb50-582d-468f-8286-f9a0d3c1def4-config-data-custom\") pod \"heat-cfnapi-579694df8d-vpn5m\" (UID: \"8bc9eb50-582d-468f-8286-f9a0d3c1def4\") " pod="openstack/heat-cfnapi-579694df8d-vpn5m" Dec 11 08:38:45 crc kubenswrapper[4881]: I1211 08:38:45.645598 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8bc9eb50-582d-468f-8286-f9a0d3c1def4-config-data\") pod \"heat-cfnapi-579694df8d-vpn5m\" (UID: \"8bc9eb50-582d-468f-8286-f9a0d3c1def4\") " pod="openstack/heat-cfnapi-579694df8d-vpn5m" Dec 11 08:38:45 crc kubenswrapper[4881]: I1211 08:38:45.654513 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"93406a4d-f987-4191-acf2-4a5d1fa63457","Type":"ContainerDied","Data":"4d036f1fda508ba696b0af12f1ae20c568c9e22d857550f433803d708c1a926f"} Dec 11 08:38:45 crc kubenswrapper[4881]: I1211 08:38:45.654566 4881 scope.go:117] "RemoveContainer" containerID="9d19c0500ec624aac640e79d51ba54d0fb3f157e17dc5f8d84035c30655a9031" Dec 11 08:38:45 crc kubenswrapper[4881]: I1211 08:38:45.654712 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Dec 11 08:38:45 crc kubenswrapper[4881]: I1211 08:38:45.658640 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-scheduler-0" Dec 11 08:38:45 crc kubenswrapper[4881]: I1211 08:38:45.663995 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xv2xp\" (UniqueName: \"kubernetes.io/projected/8bc9eb50-582d-468f-8286-f9a0d3c1def4-kube-api-access-xv2xp\") pod \"heat-cfnapi-579694df8d-vpn5m\" (UID: \"8bc9eb50-582d-468f-8286-f9a0d3c1def4\") " pod="openstack/heat-cfnapi-579694df8d-vpn5m" Dec 11 08:38:45 crc kubenswrapper[4881]: I1211 08:38:45.724326 4881 generic.go:334] "Generic (PLEG): container finished" podID="0074c6f2-5d03-406d-a8a3-19f87e5980d8" containerID="a950c6b7c7510594a27d4cb885b3933b226c40a3677f64a01bbf119ce5d9e0ee" exitCode=0 Dec 11 08:38:45 crc kubenswrapper[4881]: I1211 08:38:45.724400 4881 generic.go:334] "Generic (PLEG): container finished" podID="0074c6f2-5d03-406d-a8a3-19f87e5980d8" containerID="cfbaa890eb39174cd60e497974aeb6e480aa79256fdd9afb6abd5dfc43862e10" exitCode=0 Dec 11 08:38:45 crc kubenswrapper[4881]: I1211 08:38:45.724426 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0074c6f2-5d03-406d-a8a3-19f87e5980d8","Type":"ContainerDied","Data":"a950c6b7c7510594a27d4cb885b3933b226c40a3677f64a01bbf119ce5d9e0ee"} Dec 11 08:38:45 crc kubenswrapper[4881]: I1211 08:38:45.724458 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0074c6f2-5d03-406d-a8a3-19f87e5980d8","Type":"ContainerDied","Data":"cfbaa890eb39174cd60e497974aeb6e480aa79256fdd9afb6abd5dfc43862e10"} Dec 11 08:38:45 crc kubenswrapper[4881]: I1211 08:38:45.736758 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/a466820d-9c8f-4aa5-8e39-485b6212a154-internal-tls-certs\") pod \"heat-api-b5858866f-srtbn\" (UID: \"a466820d-9c8f-4aa5-8e39-485b6212a154\") " pod="openstack/heat-api-b5858866f-srtbn" Dec 11 08:38:45 crc kubenswrapper[4881]: I1211 08:38:45.736961 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a466820d-9c8f-4aa5-8e39-485b6212a154-combined-ca-bundle\") pod \"heat-api-b5858866f-srtbn\" (UID: \"a466820d-9c8f-4aa5-8e39-485b6212a154\") " pod="openstack/heat-api-b5858866f-srtbn" Dec 11 08:38:45 crc kubenswrapper[4881]: I1211 08:38:45.737060 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a466820d-9c8f-4aa5-8e39-485b6212a154-config-data-custom\") pod \"heat-api-b5858866f-srtbn\" (UID: \"a466820d-9c8f-4aa5-8e39-485b6212a154\") " pod="openstack/heat-api-b5858866f-srtbn" Dec 11 08:38:45 crc kubenswrapper[4881]: I1211 08:38:45.737139 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6gwhn\" (UniqueName: \"kubernetes.io/projected/a466820d-9c8f-4aa5-8e39-485b6212a154-kube-api-access-6gwhn\") pod \"heat-api-b5858866f-srtbn\" (UID: \"a466820d-9c8f-4aa5-8e39-485b6212a154\") " pod="openstack/heat-api-b5858866f-srtbn" Dec 11 08:38:45 crc kubenswrapper[4881]: I1211 08:38:45.737241 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a466820d-9c8f-4aa5-8e39-485b6212a154-config-data\") pod \"heat-api-b5858866f-srtbn\" (UID: \"a466820d-9c8f-4aa5-8e39-485b6212a154\") " pod="openstack/heat-api-b5858866f-srtbn" Dec 11 08:38:45 crc kubenswrapper[4881]: I1211 08:38:45.737306 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/a466820d-9c8f-4aa5-8e39-485b6212a154-public-tls-certs\") pod \"heat-api-b5858866f-srtbn\" (UID: \"a466820d-9c8f-4aa5-8e39-485b6212a154\") " pod="openstack/heat-api-b5858866f-srtbn" Dec 11 08:38:45 crc kubenswrapper[4881]: I1211 08:38:45.756198 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a466820d-9c8f-4aa5-8e39-485b6212a154-combined-ca-bundle\") pod \"heat-api-b5858866f-srtbn\" (UID: \"a466820d-9c8f-4aa5-8e39-485b6212a154\") " pod="openstack/heat-api-b5858866f-srtbn" Dec 11 08:38:45 crc kubenswrapper[4881]: I1211 08:38:45.756965 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a466820d-9c8f-4aa5-8e39-485b6212a154-config-data-custom\") pod \"heat-api-b5858866f-srtbn\" (UID: \"a466820d-9c8f-4aa5-8e39-485b6212a154\") " pod="openstack/heat-api-b5858866f-srtbn" Dec 11 08:38:45 crc kubenswrapper[4881]: I1211 08:38:45.758896 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/a466820d-9c8f-4aa5-8e39-485b6212a154-internal-tls-certs\") pod \"heat-api-b5858866f-srtbn\" (UID: \"a466820d-9c8f-4aa5-8e39-485b6212a154\") " pod="openstack/heat-api-b5858866f-srtbn" Dec 11 08:38:45 crc kubenswrapper[4881]: I1211 08:38:45.759546 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/a466820d-9c8f-4aa5-8e39-485b6212a154-public-tls-certs\") pod \"heat-api-b5858866f-srtbn\" (UID: \"a466820d-9c8f-4aa5-8e39-485b6212a154\") " pod="openstack/heat-api-b5858866f-srtbn" Dec 11 08:38:45 crc kubenswrapper[4881]: I1211 08:38:45.762116 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6gwhn\" (UniqueName: \"kubernetes.io/projected/a466820d-9c8f-4aa5-8e39-485b6212a154-kube-api-access-6gwhn\") pod \"heat-api-b5858866f-srtbn\" (UID: \"a466820d-9c8f-4aa5-8e39-485b6212a154\") " pod="openstack/heat-api-b5858866f-srtbn" Dec 11 08:38:45 crc kubenswrapper[4881]: I1211 08:38:45.768616 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a466820d-9c8f-4aa5-8e39-485b6212a154-config-data\") pod \"heat-api-b5858866f-srtbn\" (UID: \"a466820d-9c8f-4aa5-8e39-485b6212a154\") " pod="openstack/heat-api-b5858866f-srtbn" Dec 11 08:38:45 crc kubenswrapper[4881]: I1211 08:38:45.818212 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-579694df8d-vpn5m" Dec 11 08:38:45 crc kubenswrapper[4881]: I1211 08:38:45.868247 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-b5858866f-srtbn" Dec 11 08:38:45 crc kubenswrapper[4881]: I1211 08:38:45.899562 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Dec 11 08:38:45 crc kubenswrapper[4881]: I1211 08:38:45.915984 4881 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-api-0"] Dec 11 08:38:45 crc kubenswrapper[4881]: I1211 08:38:45.927257 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-api-0"] Dec 11 08:38:45 crc kubenswrapper[4881]: I1211 08:38:45.929813 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Dec 11 08:38:45 crc kubenswrapper[4881]: I1211 08:38:45.945064 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Dec 11 08:38:45 crc kubenswrapper[4881]: I1211 08:38:45.951403 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cinder-internal-svc" Dec 11 08:38:45 crc kubenswrapper[4881]: I1211 08:38:45.951581 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cinder-public-svc" Dec 11 08:38:45 crc kubenswrapper[4881]: I1211 08:38:45.951643 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Dec 11 08:38:46 crc kubenswrapper[4881]: I1211 08:38:46.066779 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-djlwh\" (UniqueName: \"kubernetes.io/projected/98c33c2d-b3e5-450d-8c52-544acac89c74-kube-api-access-djlwh\") pod \"cinder-api-0\" (UID: \"98c33c2d-b3e5-450d-8c52-544acac89c74\") " pod="openstack/cinder-api-0" Dec 11 08:38:46 crc kubenswrapper[4881]: I1211 08:38:46.066911 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/98c33c2d-b3e5-450d-8c52-544acac89c74-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"98c33c2d-b3e5-450d-8c52-544acac89c74\") " pod="openstack/cinder-api-0" Dec 11 08:38:46 crc kubenswrapper[4881]: I1211 08:38:46.066952 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/98c33c2d-b3e5-450d-8c52-544acac89c74-config-data\") pod \"cinder-api-0\" (UID: \"98c33c2d-b3e5-450d-8c52-544acac89c74\") " pod="openstack/cinder-api-0" Dec 11 08:38:46 crc kubenswrapper[4881]: I1211 08:38:46.066984 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/98c33c2d-b3e5-450d-8c52-544acac89c74-logs\") pod \"cinder-api-0\" (UID: \"98c33c2d-b3e5-450d-8c52-544acac89c74\") " pod="openstack/cinder-api-0" Dec 11 08:38:46 crc kubenswrapper[4881]: I1211 08:38:46.067035 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/98c33c2d-b3e5-450d-8c52-544acac89c74-public-tls-certs\") pod \"cinder-api-0\" (UID: \"98c33c2d-b3e5-450d-8c52-544acac89c74\") " pod="openstack/cinder-api-0" Dec 11 08:38:46 crc kubenswrapper[4881]: I1211 08:38:46.067095 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/98c33c2d-b3e5-450d-8c52-544acac89c74-config-data-custom\") pod \"cinder-api-0\" (UID: \"98c33c2d-b3e5-450d-8c52-544acac89c74\") " pod="openstack/cinder-api-0" Dec 11 08:38:46 crc kubenswrapper[4881]: I1211 08:38:46.067139 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/98c33c2d-b3e5-450d-8c52-544acac89c74-etc-machine-id\") pod \"cinder-api-0\" (UID: \"98c33c2d-b3e5-450d-8c52-544acac89c74\") " pod="openstack/cinder-api-0" Dec 11 08:38:46 crc kubenswrapper[4881]: I1211 08:38:46.067307 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/98c33c2d-b3e5-450d-8c52-544acac89c74-scripts\") pod \"cinder-api-0\" (UID: \"98c33c2d-b3e5-450d-8c52-544acac89c74\") " pod="openstack/cinder-api-0" Dec 11 08:38:46 crc kubenswrapper[4881]: I1211 08:38:46.067381 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/98c33c2d-b3e5-450d-8c52-544acac89c74-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"98c33c2d-b3e5-450d-8c52-544acac89c74\") " pod="openstack/cinder-api-0" Dec 11 08:38:46 crc kubenswrapper[4881]: I1211 08:38:46.118653 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Dec 11 08:38:46 crc kubenswrapper[4881]: I1211 08:38:46.120304 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="96a878c1-c8dc-443b-a6a2-e9e2b3833213" containerName="glance-log" containerID="cri-o://4468e46bc7e99e4bd0e2b6b2c10187478a1315d51fac1b2b9e5dec3b360d5c8f" gracePeriod=30 Dec 11 08:38:46 crc kubenswrapper[4881]: I1211 08:38:46.120604 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="96a878c1-c8dc-443b-a6a2-e9e2b3833213" containerName="glance-httpd" containerID="cri-o://9f2b61792cd2e899dae69b7bf99ab2a846b2cf8cd5c7bc92f5683c5e3af317f1" gracePeriod=30 Dec 11 08:38:46 crc kubenswrapper[4881]: I1211 08:38:46.169654 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/98c33c2d-b3e5-450d-8c52-544acac89c74-etc-machine-id\") pod \"cinder-api-0\" (UID: \"98c33c2d-b3e5-450d-8c52-544acac89c74\") " pod="openstack/cinder-api-0" Dec 11 08:38:46 crc kubenswrapper[4881]: I1211 08:38:46.169792 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/98c33c2d-b3e5-450d-8c52-544acac89c74-scripts\") pod \"cinder-api-0\" (UID: \"98c33c2d-b3e5-450d-8c52-544acac89c74\") " pod="openstack/cinder-api-0" Dec 11 08:38:46 crc kubenswrapper[4881]: I1211 08:38:46.169821 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/98c33c2d-b3e5-450d-8c52-544acac89c74-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"98c33c2d-b3e5-450d-8c52-544acac89c74\") " pod="openstack/cinder-api-0" Dec 11 08:38:46 crc kubenswrapper[4881]: I1211 08:38:46.169858 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-djlwh\" (UniqueName: \"kubernetes.io/projected/98c33c2d-b3e5-450d-8c52-544acac89c74-kube-api-access-djlwh\") pod \"cinder-api-0\" (UID: \"98c33c2d-b3e5-450d-8c52-544acac89c74\") " pod="openstack/cinder-api-0" Dec 11 08:38:46 crc kubenswrapper[4881]: I1211 08:38:46.169912 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/98c33c2d-b3e5-450d-8c52-544acac89c74-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"98c33c2d-b3e5-450d-8c52-544acac89c74\") " pod="openstack/cinder-api-0" Dec 11 08:38:46 crc kubenswrapper[4881]: I1211 08:38:46.169940 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/98c33c2d-b3e5-450d-8c52-544acac89c74-config-data\") pod \"cinder-api-0\" (UID: \"98c33c2d-b3e5-450d-8c52-544acac89c74\") " pod="openstack/cinder-api-0" Dec 11 08:38:46 crc kubenswrapper[4881]: I1211 08:38:46.169962 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/98c33c2d-b3e5-450d-8c52-544acac89c74-logs\") pod \"cinder-api-0\" (UID: \"98c33c2d-b3e5-450d-8c52-544acac89c74\") " pod="openstack/cinder-api-0" Dec 11 08:38:46 crc kubenswrapper[4881]: I1211 08:38:46.169994 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/98c33c2d-b3e5-450d-8c52-544acac89c74-public-tls-certs\") pod \"cinder-api-0\" (UID: \"98c33c2d-b3e5-450d-8c52-544acac89c74\") " pod="openstack/cinder-api-0" Dec 11 08:38:46 crc kubenswrapper[4881]: I1211 08:38:46.170017 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/98c33c2d-b3e5-450d-8c52-544acac89c74-config-data-custom\") pod \"cinder-api-0\" (UID: \"98c33c2d-b3e5-450d-8c52-544acac89c74\") " pod="openstack/cinder-api-0" Dec 11 08:38:46 crc kubenswrapper[4881]: I1211 08:38:46.170394 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/98c33c2d-b3e5-450d-8c52-544acac89c74-etc-machine-id\") pod \"cinder-api-0\" (UID: \"98c33c2d-b3e5-450d-8c52-544acac89c74\") " pod="openstack/cinder-api-0" Dec 11 08:38:46 crc kubenswrapper[4881]: I1211 08:38:46.171045 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/98c33c2d-b3e5-450d-8c52-544acac89c74-logs\") pod \"cinder-api-0\" (UID: \"98c33c2d-b3e5-450d-8c52-544acac89c74\") " pod="openstack/cinder-api-0" Dec 11 08:38:46 crc kubenswrapper[4881]: I1211 08:38:46.177387 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/98c33c2d-b3e5-450d-8c52-544acac89c74-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"98c33c2d-b3e5-450d-8c52-544acac89c74\") " pod="openstack/cinder-api-0" Dec 11 08:38:46 crc kubenswrapper[4881]: I1211 08:38:46.179764 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/98c33c2d-b3e5-450d-8c52-544acac89c74-public-tls-certs\") pod \"cinder-api-0\" (UID: \"98c33c2d-b3e5-450d-8c52-544acac89c74\") " pod="openstack/cinder-api-0" Dec 11 08:38:46 crc kubenswrapper[4881]: I1211 08:38:46.181577 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-db-create-lhqk7"] Dec 11 08:38:46 crc kubenswrapper[4881]: I1211 08:38:46.183221 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/98c33c2d-b3e5-450d-8c52-544acac89c74-scripts\") pod \"cinder-api-0\" (UID: \"98c33c2d-b3e5-450d-8c52-544acac89c74\") " pod="openstack/cinder-api-0" Dec 11 08:38:46 crc kubenswrapper[4881]: I1211 08:38:46.184075 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/98c33c2d-b3e5-450d-8c52-544acac89c74-config-data\") pod \"cinder-api-0\" (UID: \"98c33c2d-b3e5-450d-8c52-544acac89c74\") " pod="openstack/cinder-api-0" Dec 11 08:38:46 crc kubenswrapper[4881]: I1211 08:38:46.199725 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-lhqk7" Dec 11 08:38:46 crc kubenswrapper[4881]: I1211 08:38:46.202217 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-db-create-lhqk7"] Dec 11 08:38:46 crc kubenswrapper[4881]: I1211 08:38:46.202672 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/98c33c2d-b3e5-450d-8c52-544acac89c74-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"98c33c2d-b3e5-450d-8c52-544acac89c74\") " pod="openstack/cinder-api-0" Dec 11 08:38:46 crc kubenswrapper[4881]: I1211 08:38:46.203173 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/98c33c2d-b3e5-450d-8c52-544acac89c74-config-data-custom\") pod \"cinder-api-0\" (UID: \"98c33c2d-b3e5-450d-8c52-544acac89c74\") " pod="openstack/cinder-api-0" Dec 11 08:38:46 crc kubenswrapper[4881]: I1211 08:38:46.217390 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-djlwh\" (UniqueName: \"kubernetes.io/projected/98c33c2d-b3e5-450d-8c52-544acac89c74-kube-api-access-djlwh\") pod \"cinder-api-0\" (UID: \"98c33c2d-b3e5-450d-8c52-544acac89c74\") " pod="openstack/cinder-api-0" Dec 11 08:38:46 crc kubenswrapper[4881]: I1211 08:38:46.238715 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-scheduler-0" Dec 11 08:38:46 crc kubenswrapper[4881]: I1211 08:38:46.274358 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Dec 11 08:38:46 crc kubenswrapper[4881]: I1211 08:38:46.276794 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zrr84\" (UniqueName: \"kubernetes.io/projected/0152813f-e688-49b2-88d2-afba5096bd0e-kube-api-access-zrr84\") pod \"nova-api-db-create-lhqk7\" (UID: \"0152813f-e688-49b2-88d2-afba5096bd0e\") " pod="openstack/nova-api-db-create-lhqk7" Dec 11 08:38:46 crc kubenswrapper[4881]: I1211 08:38:46.314155 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-db-create-gs9fw"] Dec 11 08:38:46 crc kubenswrapper[4881]: I1211 08:38:46.315989 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-gs9fw" Dec 11 08:38:46 crc kubenswrapper[4881]: I1211 08:38:46.332082 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-db-create-gs9fw"] Dec 11 08:38:46 crc kubenswrapper[4881]: I1211 08:38:46.378881 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wz97r\" (UniqueName: \"kubernetes.io/projected/7a116558-2db5-4fe1-9f64-888d7cd93f57-kube-api-access-wz97r\") pod \"nova-cell0-db-create-gs9fw\" (UID: \"7a116558-2db5-4fe1-9f64-888d7cd93f57\") " pod="openstack/nova-cell0-db-create-gs9fw" Dec 11 08:38:46 crc kubenswrapper[4881]: I1211 08:38:46.379835 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zrr84\" (UniqueName: \"kubernetes.io/projected/0152813f-e688-49b2-88d2-afba5096bd0e-kube-api-access-zrr84\") pod \"nova-api-db-create-lhqk7\" (UID: \"0152813f-e688-49b2-88d2-afba5096bd0e\") " pod="openstack/nova-api-db-create-lhqk7" Dec 11 08:38:46 crc kubenswrapper[4881]: I1211 08:38:46.404481 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zrr84\" (UniqueName: \"kubernetes.io/projected/0152813f-e688-49b2-88d2-afba5096bd0e-kube-api-access-zrr84\") pod \"nova-api-db-create-lhqk7\" (UID: \"0152813f-e688-49b2-88d2-afba5096bd0e\") " pod="openstack/nova-api-db-create-lhqk7" Dec 11 08:38:46 crc kubenswrapper[4881]: I1211 08:38:46.482673 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wz97r\" (UniqueName: \"kubernetes.io/projected/7a116558-2db5-4fe1-9f64-888d7cd93f57-kube-api-access-wz97r\") pod \"nova-cell0-db-create-gs9fw\" (UID: \"7a116558-2db5-4fe1-9f64-888d7cd93f57\") " pod="openstack/nova-cell0-db-create-gs9fw" Dec 11 08:38:46 crc kubenswrapper[4881]: I1211 08:38:46.494907 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-db-create-bjwpq"] Dec 11 08:38:46 crc kubenswrapper[4881]: I1211 08:38:46.496871 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-bjwpq" Dec 11 08:38:46 crc kubenswrapper[4881]: I1211 08:38:46.509729 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wz97r\" (UniqueName: \"kubernetes.io/projected/7a116558-2db5-4fe1-9f64-888d7cd93f57-kube-api-access-wz97r\") pod \"nova-cell0-db-create-gs9fw\" (UID: \"7a116558-2db5-4fe1-9f64-888d7cd93f57\") " pod="openstack/nova-cell0-db-create-gs9fw" Dec 11 08:38:46 crc kubenswrapper[4881]: I1211 08:38:46.512788 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-db-create-bjwpq"] Dec 11 08:38:46 crc kubenswrapper[4881]: I1211 08:38:46.580554 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-lhqk7" Dec 11 08:38:46 crc kubenswrapper[4881]: I1211 08:38:46.584736 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tgkxg\" (UniqueName: \"kubernetes.io/projected/556e7646-0cc0-4686-875f-6738267e467e-kube-api-access-tgkxg\") pod \"nova-cell1-db-create-bjwpq\" (UID: \"556e7646-0cc0-4686-875f-6738267e467e\") " pod="openstack/nova-cell1-db-create-bjwpq" Dec 11 08:38:46 crc kubenswrapper[4881]: I1211 08:38:46.649185 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-gs9fw" Dec 11 08:38:46 crc kubenswrapper[4881]: I1211 08:38:46.686704 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tgkxg\" (UniqueName: \"kubernetes.io/projected/556e7646-0cc0-4686-875f-6738267e467e-kube-api-access-tgkxg\") pod \"nova-cell1-db-create-bjwpq\" (UID: \"556e7646-0cc0-4686-875f-6738267e467e\") " pod="openstack/nova-cell1-db-create-bjwpq" Dec 11 08:38:46 crc kubenswrapper[4881]: I1211 08:38:46.709026 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tgkxg\" (UniqueName: \"kubernetes.io/projected/556e7646-0cc0-4686-875f-6738267e467e-kube-api-access-tgkxg\") pod \"nova-cell1-db-create-bjwpq\" (UID: \"556e7646-0cc0-4686-875f-6738267e467e\") " pod="openstack/nova-cell1-db-create-bjwpq" Dec 11 08:38:46 crc kubenswrapper[4881]: I1211 08:38:46.738808 4881 generic.go:334] "Generic (PLEG): container finished" podID="fb741205-ce03-4c1d-9181-b2efc3c92319" containerID="e63123dac3e0c7900898c6e9b6eb12295de5e928c5abc16df766d433bbaf73cb" exitCode=0 Dec 11 08:38:46 crc kubenswrapper[4881]: I1211 08:38:46.738870 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"fb741205-ce03-4c1d-9181-b2efc3c92319","Type":"ContainerDied","Data":"e63123dac3e0c7900898c6e9b6eb12295de5e928c5abc16df766d433bbaf73cb"} Dec 11 08:38:46 crc kubenswrapper[4881]: I1211 08:38:46.741271 4881 generic.go:334] "Generic (PLEG): container finished" podID="96a878c1-c8dc-443b-a6a2-e9e2b3833213" containerID="4468e46bc7e99e4bd0e2b6b2c10187478a1315d51fac1b2b9e5dec3b360d5c8f" exitCode=143 Dec 11 08:38:46 crc kubenswrapper[4881]: I1211 08:38:46.741628 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"96a878c1-c8dc-443b-a6a2-e9e2b3833213","Type":"ContainerDied","Data":"4468e46bc7e99e4bd0e2b6b2c10187478a1315d51fac1b2b9e5dec3b360d5c8f"} Dec 11 08:38:46 crc kubenswrapper[4881]: I1211 08:38:46.793708 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Dec 11 08:38:46 crc kubenswrapper[4881]: I1211 08:38:46.826010 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-bjwpq" Dec 11 08:38:47 crc kubenswrapper[4881]: I1211 08:38:47.021203 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="93406a4d-f987-4191-acf2-4a5d1fa63457" path="/var/lib/kubelet/pods/93406a4d-f987-4191-acf2-4a5d1fa63457/volumes" Dec 11 08:38:47 crc kubenswrapper[4881]: I1211 08:38:47.752285 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="a982ebde-b33a-4397-bd71-34da9b1efc2e" containerName="cinder-scheduler" containerID="cri-o://945304d81117ec5b4b769570dff0eab198d9721114145fd8e56e910043d30a80" gracePeriod=30 Dec 11 08:38:47 crc kubenswrapper[4881]: I1211 08:38:47.752418 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="a982ebde-b33a-4397-bd71-34da9b1efc2e" containerName="probe" containerID="cri-o://509bf974b82628f86f012c6a7b20eadef9d160c2075eb853f9a1d8dfb4857bc9" gracePeriod=30 Dec 11 08:38:49 crc kubenswrapper[4881]: I1211 08:38:49.779288 4881 generic.go:334] "Generic (PLEG): container finished" podID="a982ebde-b33a-4397-bd71-34da9b1efc2e" containerID="509bf974b82628f86f012c6a7b20eadef9d160c2075eb853f9a1d8dfb4857bc9" exitCode=0 Dec 11 08:38:49 crc kubenswrapper[4881]: I1211 08:38:49.779393 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"a982ebde-b33a-4397-bd71-34da9b1efc2e","Type":"ContainerDied","Data":"509bf974b82628f86f012c6a7b20eadef9d160c2075eb853f9a1d8dfb4857bc9"} Dec 11 08:38:49 crc kubenswrapper[4881]: I1211 08:38:49.787375 4881 generic.go:334] "Generic (PLEG): container finished" podID="96a878c1-c8dc-443b-a6a2-e9e2b3833213" containerID="9f2b61792cd2e899dae69b7bf99ab2a846b2cf8cd5c7bc92f5683c5e3af317f1" exitCode=0 Dec 11 08:38:49 crc kubenswrapper[4881]: I1211 08:38:49.787527 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"96a878c1-c8dc-443b-a6a2-e9e2b3833213","Type":"ContainerDied","Data":"9f2b61792cd2e899dae69b7bf99ab2a846b2cf8cd5c7bc92f5683c5e3af317f1"} Dec 11 08:38:49 crc kubenswrapper[4881]: I1211 08:38:49.992965 4881 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ceilometer-0" podUID="0074c6f2-5d03-406d-a8a3-19f87e5980d8" containerName="proxy-httpd" probeResult="failure" output="Get \"http://10.217.0.181:3000/\": dial tcp 10.217.0.181:3000: connect: connection refused" Dec 11 08:38:50 crc kubenswrapper[4881]: I1211 08:38:50.777515 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-f6bc4c6c9-4r7pn" Dec 11 08:38:50 crc kubenswrapper[4881]: I1211 08:38:50.851285 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-75c8ddd69c-js2tw"] Dec 11 08:38:50 crc kubenswrapper[4881]: I1211 08:38:50.851547 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-75c8ddd69c-js2tw" podUID="edab6e89-cb03-44e6-b511-d64ba764b857" containerName="dnsmasq-dns" containerID="cri-o://f5af75019d1bfcfe57fda0fa3426711b5e30dbf674c78914eeb3c9a13b15e860" gracePeriod=10 Dec 11 08:38:51 crc kubenswrapper[4881]: I1211 08:38:51.816301 4881 generic.go:334] "Generic (PLEG): container finished" podID="edab6e89-cb03-44e6-b511-d64ba764b857" containerID="f5af75019d1bfcfe57fda0fa3426711b5e30dbf674c78914eeb3c9a13b15e860" exitCode=0 Dec 11 08:38:51 crc kubenswrapper[4881]: I1211 08:38:51.816378 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-75c8ddd69c-js2tw" event={"ID":"edab6e89-cb03-44e6-b511-d64ba764b857","Type":"ContainerDied","Data":"f5af75019d1bfcfe57fda0fa3426711b5e30dbf674c78914eeb3c9a13b15e860"} Dec 11 08:38:52 crc kubenswrapper[4881]: E1211 08:38:52.225499 4881 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-openstackclient:current-podified" Dec 11 08:38:52 crc kubenswrapper[4881]: E1211 08:38:52.226083 4881 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:openstackclient,Image:quay.io/podified-antelope-centos9/openstack-openstackclient:current-podified,Command:[/bin/sleep],Args:[infinity],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n679h649h64ch6h5ffhdbh556hb8hb7h66fhf5hch545h595h595h97h578h6dh554h576h658h5f8h7fh569h65bh648hb6h96h75h5b5h677h96q,ValueFrom:nil,},EnvVar{Name:OS_CLOUD,Value:default,ValueFrom:nil,},EnvVar{Name:PROMETHEUS_CA_CERT,Value:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,ValueFrom:nil,},EnvVar{Name:PROMETHEUS_HOST,Value:metric-storage-prometheus.openstack.svc,ValueFrom:nil,},EnvVar{Name:PROMETHEUS_PORT,Value:9090,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:openstack-config,ReadOnly:false,MountPath:/home/cloud-admin/.config/openstack/clouds.yaml,SubPath:clouds.yaml,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:openstack-config-secret,ReadOnly:false,MountPath:/home/cloud-admin/.config/openstack/secure.yaml,SubPath:secure.yaml,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:openstack-config-secret,ReadOnly:false,MountPath:/home/cloud-admin/cloudrc,SubPath:cloudrc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-6n5gf,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42401,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:*42401,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod openstackclient_openstack(5e8c400d-4f6c-4e16-8ed3-45d19b56a0bf): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 11 08:38:52 crc kubenswrapper[4881]: E1211 08:38:52.227591 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"openstackclient\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/openstackclient" podUID="5e8c400d-4f6c-4e16-8ed3-45d19b56a0bf" Dec 11 08:38:52 crc kubenswrapper[4881]: I1211 08:38:52.833326 4881 generic.go:334] "Generic (PLEG): container finished" podID="a982ebde-b33a-4397-bd71-34da9b1efc2e" containerID="945304d81117ec5b4b769570dff0eab198d9721114145fd8e56e910043d30a80" exitCode=0 Dec 11 08:38:52 crc kubenswrapper[4881]: I1211 08:38:52.833384 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"a982ebde-b33a-4397-bd71-34da9b1efc2e","Type":"ContainerDied","Data":"945304d81117ec5b4b769570dff0eab198d9721114145fd8e56e910043d30a80"} Dec 11 08:38:52 crc kubenswrapper[4881]: E1211 08:38:52.836543 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"openstackclient\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-openstackclient:current-podified\\\"\"" pod="openstack/openstackclient" podUID="5e8c400d-4f6c-4e16-8ed3-45d19b56a0bf" Dec 11 08:38:52 crc kubenswrapper[4881]: I1211 08:38:52.963000 4881 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-75c8ddd69c-js2tw" podUID="edab6e89-cb03-44e6-b511-d64ba764b857" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.195:5353: connect: connection refused" Dec 11 08:38:53 crc kubenswrapper[4881]: E1211 08:38:53.007688 4881 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-heat-api:current-podified" Dec 11 08:38:53 crc kubenswrapper[4881]: E1211 08:38:53.007887 4881 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:heat-api,Image:quay.io/podified-antelope-centos9/openstack-heat-api:current-podified,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_httpd_setup && /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n645hd9h8ch545h575h5bh5c8h5f8h54dh596h5dh79h66h7fh64h55bh7fh5b9h5d4h665h654h55ch58dhf5h5b5h584h554h65h64bh68chffh68bq,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:heat-api-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data-custom,ReadOnly:true,MountPath:/etc/heat/heat.conf.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/config-data/default,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-xqhl8,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthcheck,Port:{0 8004 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:10,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthcheck,Port:{0 8004 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:10,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42418,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:*42418,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod heat-api-5497987457-dkncd_openstack(020cd7f3-faa3-4bec-adfd-25a9b60456f7): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 11 08:38:53 crc kubenswrapper[4881]: E1211 08:38:53.008986 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-api\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/heat-api-5497987457-dkncd" podUID="020cd7f3-faa3-4bec-adfd-25a9b60456f7" Dec 11 08:38:53 crc kubenswrapper[4881]: I1211 08:38:53.439873 4881 scope.go:117] "RemoveContainer" containerID="32f970ae0914dac738470b0ee696139fc2cd4a575cff1e87f454798e3115a6ee" Dec 11 08:38:53 crc kubenswrapper[4881]: I1211 08:38:53.704113 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 11 08:38:53 crc kubenswrapper[4881]: I1211 08:38:53.790535 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0074c6f2-5d03-406d-a8a3-19f87e5980d8-scripts\") pod \"0074c6f2-5d03-406d-a8a3-19f87e5980d8\" (UID: \"0074c6f2-5d03-406d-a8a3-19f87e5980d8\") " Dec 11 08:38:53 crc kubenswrapper[4881]: I1211 08:38:53.790773 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0074c6f2-5d03-406d-a8a3-19f87e5980d8-combined-ca-bundle\") pod \"0074c6f2-5d03-406d-a8a3-19f87e5980d8\" (UID: \"0074c6f2-5d03-406d-a8a3-19f87e5980d8\") " Dec 11 08:38:53 crc kubenswrapper[4881]: I1211 08:38:53.790898 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0074c6f2-5d03-406d-a8a3-19f87e5980d8-run-httpd\") pod \"0074c6f2-5d03-406d-a8a3-19f87e5980d8\" (UID: \"0074c6f2-5d03-406d-a8a3-19f87e5980d8\") " Dec 11 08:38:53 crc kubenswrapper[4881]: I1211 08:38:53.790942 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nsqms\" (UniqueName: \"kubernetes.io/projected/0074c6f2-5d03-406d-a8a3-19f87e5980d8-kube-api-access-nsqms\") pod \"0074c6f2-5d03-406d-a8a3-19f87e5980d8\" (UID: \"0074c6f2-5d03-406d-a8a3-19f87e5980d8\") " Dec 11 08:38:53 crc kubenswrapper[4881]: I1211 08:38:53.790969 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0074c6f2-5d03-406d-a8a3-19f87e5980d8-config-data\") pod \"0074c6f2-5d03-406d-a8a3-19f87e5980d8\" (UID: \"0074c6f2-5d03-406d-a8a3-19f87e5980d8\") " Dec 11 08:38:53 crc kubenswrapper[4881]: I1211 08:38:53.790995 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0074c6f2-5d03-406d-a8a3-19f87e5980d8-log-httpd\") pod \"0074c6f2-5d03-406d-a8a3-19f87e5980d8\" (UID: \"0074c6f2-5d03-406d-a8a3-19f87e5980d8\") " Dec 11 08:38:53 crc kubenswrapper[4881]: I1211 08:38:53.791178 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/0074c6f2-5d03-406d-a8a3-19f87e5980d8-sg-core-conf-yaml\") pod \"0074c6f2-5d03-406d-a8a3-19f87e5980d8\" (UID: \"0074c6f2-5d03-406d-a8a3-19f87e5980d8\") " Dec 11 08:38:53 crc kubenswrapper[4881]: I1211 08:38:53.928365 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 11 08:38:53 crc kubenswrapper[4881]: I1211 08:38:53.928505 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0074c6f2-5d03-406d-a8a3-19f87e5980d8","Type":"ContainerDied","Data":"d8fdb909bec2b926989722e08fd72d701faa5ebc6cca3a579809322ea84ecce9"} Dec 11 08:38:53 crc kubenswrapper[4881]: I1211 08:38:53.928555 4881 scope.go:117] "RemoveContainer" containerID="a950c6b7c7510594a27d4cb885b3933b226c40a3677f64a01bbf119ce5d9e0ee" Dec 11 08:38:53 crc kubenswrapper[4881]: I1211 08:38:53.989229 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0074c6f2-5d03-406d-a8a3-19f87e5980d8-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "0074c6f2-5d03-406d-a8a3-19f87e5980d8" (UID: "0074c6f2-5d03-406d-a8a3-19f87e5980d8"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 08:38:53 crc kubenswrapper[4881]: I1211 08:38:53.994113 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0074c6f2-5d03-406d-a8a3-19f87e5980d8-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "0074c6f2-5d03-406d-a8a3-19f87e5980d8" (UID: "0074c6f2-5d03-406d-a8a3-19f87e5980d8"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 08:38:54 crc kubenswrapper[4881]: I1211 08:38:54.008719 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0074c6f2-5d03-406d-a8a3-19f87e5980d8-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "0074c6f2-5d03-406d-a8a3-19f87e5980d8" (UID: "0074c6f2-5d03-406d-a8a3-19f87e5980d8"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:38:54 crc kubenswrapper[4881]: I1211 08:38:54.009790 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0074c6f2-5d03-406d-a8a3-19f87e5980d8-kube-api-access-nsqms" (OuterVolumeSpecName: "kube-api-access-nsqms") pod "0074c6f2-5d03-406d-a8a3-19f87e5980d8" (UID: "0074c6f2-5d03-406d-a8a3-19f87e5980d8"). InnerVolumeSpecName "kube-api-access-nsqms". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:38:54 crc kubenswrapper[4881]: I1211 08:38:54.011479 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0074c6f2-5d03-406d-a8a3-19f87e5980d8-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "0074c6f2-5d03-406d-a8a3-19f87e5980d8" (UID: "0074c6f2-5d03-406d-a8a3-19f87e5980d8"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:38:54 crc kubenswrapper[4881]: I1211 08:38:54.016731 4881 scope.go:117] "RemoveContainer" containerID="5717d0de5676b96297cf2bd65ce4a4be8677cc71f4e9a1b7ec795e2d09a43453" Dec 11 08:38:54 crc kubenswrapper[4881]: I1211 08:38:54.017684 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0074c6f2-5d03-406d-a8a3-19f87e5980d8-scripts" (OuterVolumeSpecName: "scripts") pod "0074c6f2-5d03-406d-a8a3-19f87e5980d8" (UID: "0074c6f2-5d03-406d-a8a3-19f87e5980d8"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:38:54 crc kubenswrapper[4881]: I1211 08:38:54.018218 4881 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/0074c6f2-5d03-406d-a8a3-19f87e5980d8-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Dec 11 08:38:54 crc kubenswrapper[4881]: I1211 08:38:54.018258 4881 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0074c6f2-5d03-406d-a8a3-19f87e5980d8-scripts\") on node \"crc\" DevicePath \"\"" Dec 11 08:38:54 crc kubenswrapper[4881]: I1211 08:38:54.018272 4881 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0074c6f2-5d03-406d-a8a3-19f87e5980d8-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 11 08:38:54 crc kubenswrapper[4881]: I1211 08:38:54.018284 4881 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0074c6f2-5d03-406d-a8a3-19f87e5980d8-run-httpd\") on node \"crc\" DevicePath \"\"" Dec 11 08:38:54 crc kubenswrapper[4881]: I1211 08:38:54.018296 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nsqms\" (UniqueName: \"kubernetes.io/projected/0074c6f2-5d03-406d-a8a3-19f87e5980d8-kube-api-access-nsqms\") on node \"crc\" DevicePath \"\"" Dec 11 08:38:54 crc kubenswrapper[4881]: I1211 08:38:54.018309 4881 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0074c6f2-5d03-406d-a8a3-19f87e5980d8-log-httpd\") on node \"crc\" DevicePath \"\"" Dec 11 08:38:54 crc kubenswrapper[4881]: I1211 08:38:54.031234 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-75c8ddd69c-js2tw" Dec 11 08:38:54 crc kubenswrapper[4881]: I1211 08:38:54.031732 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0074c6f2-5d03-406d-a8a3-19f87e5980d8-config-data" (OuterVolumeSpecName: "config-data") pod "0074c6f2-5d03-406d-a8a3-19f87e5980d8" (UID: "0074c6f2-5d03-406d-a8a3-19f87e5980d8"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:38:54 crc kubenswrapper[4881]: I1211 08:38:54.067690 4881 scope.go:117] "RemoveContainer" containerID="fc0e0f13955e8f26f942126958994aa607d18e71140c9af36d62770bcb6b8588" Dec 11 08:38:54 crc kubenswrapper[4881]: I1211 08:38:54.119423 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-f5dpk\" (UniqueName: \"kubernetes.io/projected/edab6e89-cb03-44e6-b511-d64ba764b857-kube-api-access-f5dpk\") pod \"edab6e89-cb03-44e6-b511-d64ba764b857\" (UID: \"edab6e89-cb03-44e6-b511-d64ba764b857\") " Dec 11 08:38:54 crc kubenswrapper[4881]: I1211 08:38:54.119659 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/edab6e89-cb03-44e6-b511-d64ba764b857-ovsdbserver-sb\") pod \"edab6e89-cb03-44e6-b511-d64ba764b857\" (UID: \"edab6e89-cb03-44e6-b511-d64ba764b857\") " Dec 11 08:38:54 crc kubenswrapper[4881]: I1211 08:38:54.119837 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/edab6e89-cb03-44e6-b511-d64ba764b857-ovsdbserver-nb\") pod \"edab6e89-cb03-44e6-b511-d64ba764b857\" (UID: \"edab6e89-cb03-44e6-b511-d64ba764b857\") " Dec 11 08:38:54 crc kubenswrapper[4881]: I1211 08:38:54.119930 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/edab6e89-cb03-44e6-b511-d64ba764b857-dns-svc\") pod \"edab6e89-cb03-44e6-b511-d64ba764b857\" (UID: \"edab6e89-cb03-44e6-b511-d64ba764b857\") " Dec 11 08:38:54 crc kubenswrapper[4881]: I1211 08:38:54.120118 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/edab6e89-cb03-44e6-b511-d64ba764b857-dns-swift-storage-0\") pod \"edab6e89-cb03-44e6-b511-d64ba764b857\" (UID: \"edab6e89-cb03-44e6-b511-d64ba764b857\") " Dec 11 08:38:54 crc kubenswrapper[4881]: I1211 08:38:54.120236 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/edab6e89-cb03-44e6-b511-d64ba764b857-config\") pod \"edab6e89-cb03-44e6-b511-d64ba764b857\" (UID: \"edab6e89-cb03-44e6-b511-d64ba764b857\") " Dec 11 08:38:54 crc kubenswrapper[4881]: I1211 08:38:54.120940 4881 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0074c6f2-5d03-406d-a8a3-19f87e5980d8-config-data\") on node \"crc\" DevicePath \"\"" Dec 11 08:38:54 crc kubenswrapper[4881]: I1211 08:38:54.137024 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/edab6e89-cb03-44e6-b511-d64ba764b857-kube-api-access-f5dpk" (OuterVolumeSpecName: "kube-api-access-f5dpk") pod "edab6e89-cb03-44e6-b511-d64ba764b857" (UID: "edab6e89-cb03-44e6-b511-d64ba764b857"). InnerVolumeSpecName "kube-api-access-f5dpk". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:38:54 crc kubenswrapper[4881]: I1211 08:38:54.228283 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-f5dpk\" (UniqueName: \"kubernetes.io/projected/edab6e89-cb03-44e6-b511-d64ba764b857-kube-api-access-f5dpk\") on node \"crc\" DevicePath \"\"" Dec 11 08:38:54 crc kubenswrapper[4881]: I1211 08:38:54.230953 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/edab6e89-cb03-44e6-b511-d64ba764b857-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "edab6e89-cb03-44e6-b511-d64ba764b857" (UID: "edab6e89-cb03-44e6-b511-d64ba764b857"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:38:54 crc kubenswrapper[4881]: I1211 08:38:54.231989 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/edab6e89-cb03-44e6-b511-d64ba764b857-config" (OuterVolumeSpecName: "config") pod "edab6e89-cb03-44e6-b511-d64ba764b857" (UID: "edab6e89-cb03-44e6-b511-d64ba764b857"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:38:54 crc kubenswrapper[4881]: I1211 08:38:54.239601 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/edab6e89-cb03-44e6-b511-d64ba764b857-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "edab6e89-cb03-44e6-b511-d64ba764b857" (UID: "edab6e89-cb03-44e6-b511-d64ba764b857"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:38:54 crc kubenswrapper[4881]: I1211 08:38:54.245314 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/edab6e89-cb03-44e6-b511-d64ba764b857-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "edab6e89-cb03-44e6-b511-d64ba764b857" (UID: "edab6e89-cb03-44e6-b511-d64ba764b857"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:38:54 crc kubenswrapper[4881]: I1211 08:38:54.247899 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/edab6e89-cb03-44e6-b511-d64ba764b857-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "edab6e89-cb03-44e6-b511-d64ba764b857" (UID: "edab6e89-cb03-44e6-b511-d64ba764b857"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:38:54 crc kubenswrapper[4881]: I1211 08:38:54.332564 4881 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/edab6e89-cb03-44e6-b511-d64ba764b857-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Dec 11 08:38:54 crc kubenswrapper[4881]: I1211 08:38:54.332784 4881 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/edab6e89-cb03-44e6-b511-d64ba764b857-config\") on node \"crc\" DevicePath \"\"" Dec 11 08:38:54 crc kubenswrapper[4881]: I1211 08:38:54.332855 4881 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/edab6e89-cb03-44e6-b511-d64ba764b857-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Dec 11 08:38:54 crc kubenswrapper[4881]: I1211 08:38:54.332934 4881 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/edab6e89-cb03-44e6-b511-d64ba764b857-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Dec 11 08:38:54 crc kubenswrapper[4881]: I1211 08:38:54.333002 4881 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/edab6e89-cb03-44e6-b511-d64ba764b857-dns-svc\") on node \"crc\" DevicePath \"\"" Dec 11 08:38:54 crc kubenswrapper[4881]: I1211 08:38:54.573358 4881 scope.go:117] "RemoveContainer" containerID="cfbaa890eb39174cd60e497974aeb6e480aa79256fdd9afb6abd5dfc43862e10" Dec 11 08:38:54 crc kubenswrapper[4881]: I1211 08:38:54.613765 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Dec 11 08:38:54 crc kubenswrapper[4881]: I1211 08:38:54.670560 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Dec 11 08:38:54 crc kubenswrapper[4881]: I1211 08:38:54.696383 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-api-7b446c9cf4-8fv29"] Dec 11 08:38:54 crc kubenswrapper[4881]: W1211 08:38:54.703551 4881 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7eca07f6_40f2_48b2_90fe_b5b9b332707f.slice/crio-0a8eaf7dff9fe7f1dbe85244174db7474982dc8a3f35561082263e750868419e WatchSource:0}: Error finding container 0a8eaf7dff9fe7f1dbe85244174db7474982dc8a3f35561082263e750868419e: Status 404 returned error can't find the container with id 0a8eaf7dff9fe7f1dbe85244174db7474982dc8a3f35561082263e750868419e Dec 11 08:38:54 crc kubenswrapper[4881]: I1211 08:38:54.708268 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Dec 11 08:38:54 crc kubenswrapper[4881]: I1211 08:38:54.719975 4881 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Dec 11 08:38:54 crc kubenswrapper[4881]: I1211 08:38:54.729638 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Dec 11 08:38:54 crc kubenswrapper[4881]: E1211 08:38:54.730144 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0074c6f2-5d03-406d-a8a3-19f87e5980d8" containerName="ceilometer-central-agent" Dec 11 08:38:54 crc kubenswrapper[4881]: I1211 08:38:54.730158 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="0074c6f2-5d03-406d-a8a3-19f87e5980d8" containerName="ceilometer-central-agent" Dec 11 08:38:54 crc kubenswrapper[4881]: E1211 08:38:54.730185 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="edab6e89-cb03-44e6-b511-d64ba764b857" containerName="init" Dec 11 08:38:54 crc kubenswrapper[4881]: I1211 08:38:54.730192 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="edab6e89-cb03-44e6-b511-d64ba764b857" containerName="init" Dec 11 08:38:54 crc kubenswrapper[4881]: E1211 08:38:54.730210 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0074c6f2-5d03-406d-a8a3-19f87e5980d8" containerName="sg-core" Dec 11 08:38:54 crc kubenswrapper[4881]: I1211 08:38:54.730216 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="0074c6f2-5d03-406d-a8a3-19f87e5980d8" containerName="sg-core" Dec 11 08:38:54 crc kubenswrapper[4881]: E1211 08:38:54.730231 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="edab6e89-cb03-44e6-b511-d64ba764b857" containerName="dnsmasq-dns" Dec 11 08:38:54 crc kubenswrapper[4881]: I1211 08:38:54.730237 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="edab6e89-cb03-44e6-b511-d64ba764b857" containerName="dnsmasq-dns" Dec 11 08:38:54 crc kubenswrapper[4881]: E1211 08:38:54.730248 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a982ebde-b33a-4397-bd71-34da9b1efc2e" containerName="probe" Dec 11 08:38:54 crc kubenswrapper[4881]: I1211 08:38:54.730253 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="a982ebde-b33a-4397-bd71-34da9b1efc2e" containerName="probe" Dec 11 08:38:54 crc kubenswrapper[4881]: E1211 08:38:54.730266 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a982ebde-b33a-4397-bd71-34da9b1efc2e" containerName="cinder-scheduler" Dec 11 08:38:54 crc kubenswrapper[4881]: I1211 08:38:54.730271 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="a982ebde-b33a-4397-bd71-34da9b1efc2e" containerName="cinder-scheduler" Dec 11 08:38:54 crc kubenswrapper[4881]: E1211 08:38:54.730290 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="96a878c1-c8dc-443b-a6a2-e9e2b3833213" containerName="glance-log" Dec 11 08:38:54 crc kubenswrapper[4881]: I1211 08:38:54.730295 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="96a878c1-c8dc-443b-a6a2-e9e2b3833213" containerName="glance-log" Dec 11 08:38:54 crc kubenswrapper[4881]: E1211 08:38:54.730303 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0074c6f2-5d03-406d-a8a3-19f87e5980d8" containerName="proxy-httpd" Dec 11 08:38:54 crc kubenswrapper[4881]: I1211 08:38:54.730310 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="0074c6f2-5d03-406d-a8a3-19f87e5980d8" containerName="proxy-httpd" Dec 11 08:38:54 crc kubenswrapper[4881]: E1211 08:38:54.730320 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="96a878c1-c8dc-443b-a6a2-e9e2b3833213" containerName="glance-httpd" Dec 11 08:38:54 crc kubenswrapper[4881]: I1211 08:38:54.730326 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="96a878c1-c8dc-443b-a6a2-e9e2b3833213" containerName="glance-httpd" Dec 11 08:38:54 crc kubenswrapper[4881]: E1211 08:38:54.730354 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0074c6f2-5d03-406d-a8a3-19f87e5980d8" containerName="ceilometer-notification-agent" Dec 11 08:38:54 crc kubenswrapper[4881]: I1211 08:38:54.730360 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="0074c6f2-5d03-406d-a8a3-19f87e5980d8" containerName="ceilometer-notification-agent" Dec 11 08:38:54 crc kubenswrapper[4881]: I1211 08:38:54.730551 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="0074c6f2-5d03-406d-a8a3-19f87e5980d8" containerName="ceilometer-notification-agent" Dec 11 08:38:54 crc kubenswrapper[4881]: I1211 08:38:54.730562 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="96a878c1-c8dc-443b-a6a2-e9e2b3833213" containerName="glance-log" Dec 11 08:38:54 crc kubenswrapper[4881]: I1211 08:38:54.730578 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="a982ebde-b33a-4397-bd71-34da9b1efc2e" containerName="cinder-scheduler" Dec 11 08:38:54 crc kubenswrapper[4881]: I1211 08:38:54.730589 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="0074c6f2-5d03-406d-a8a3-19f87e5980d8" containerName="ceilometer-central-agent" Dec 11 08:38:54 crc kubenswrapper[4881]: I1211 08:38:54.730601 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="0074c6f2-5d03-406d-a8a3-19f87e5980d8" containerName="proxy-httpd" Dec 11 08:38:54 crc kubenswrapper[4881]: I1211 08:38:54.730609 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="edab6e89-cb03-44e6-b511-d64ba764b857" containerName="dnsmasq-dns" Dec 11 08:38:54 crc kubenswrapper[4881]: I1211 08:38:54.730620 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="a982ebde-b33a-4397-bd71-34da9b1efc2e" containerName="probe" Dec 11 08:38:54 crc kubenswrapper[4881]: I1211 08:38:54.730632 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="96a878c1-c8dc-443b-a6a2-e9e2b3833213" containerName="glance-httpd" Dec 11 08:38:54 crc kubenswrapper[4881]: I1211 08:38:54.730641 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="0074c6f2-5d03-406d-a8a3-19f87e5980d8" containerName="sg-core" Dec 11 08:38:54 crc kubenswrapper[4881]: I1211 08:38:54.751961 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Dec 11 08:38:54 crc kubenswrapper[4881]: I1211 08:38:54.752083 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 11 08:38:54 crc kubenswrapper[4881]: I1211 08:38:54.768554 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Dec 11 08:38:54 crc kubenswrapper[4881]: I1211 08:38:54.768882 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Dec 11 08:38:54 crc kubenswrapper[4881]: I1211 08:38:54.770620 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nwtqx\" (UniqueName: \"kubernetes.io/projected/96a878c1-c8dc-443b-a6a2-e9e2b3833213-kube-api-access-nwtqx\") pod \"96a878c1-c8dc-443b-a6a2-e9e2b3833213\" (UID: \"96a878c1-c8dc-443b-a6a2-e9e2b3833213\") " Dec 11 08:38:54 crc kubenswrapper[4881]: I1211 08:38:54.770678 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"96a878c1-c8dc-443b-a6a2-e9e2b3833213\" (UID: \"96a878c1-c8dc-443b-a6a2-e9e2b3833213\") " Dec 11 08:38:54 crc kubenswrapper[4881]: I1211 08:38:54.770723 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a982ebde-b33a-4397-bd71-34da9b1efc2e-config-data-custom\") pod \"a982ebde-b33a-4397-bd71-34da9b1efc2e\" (UID: \"a982ebde-b33a-4397-bd71-34da9b1efc2e\") " Dec 11 08:38:54 crc kubenswrapper[4881]: I1211 08:38:54.770765 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/96a878c1-c8dc-443b-a6a2-e9e2b3833213-config-data\") pod \"96a878c1-c8dc-443b-a6a2-e9e2b3833213\" (UID: \"96a878c1-c8dc-443b-a6a2-e9e2b3833213\") " Dec 11 08:38:54 crc kubenswrapper[4881]: I1211 08:38:54.770784 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/96a878c1-c8dc-443b-a6a2-e9e2b3833213-httpd-run\") pod \"96a878c1-c8dc-443b-a6a2-e9e2b3833213\" (UID: \"96a878c1-c8dc-443b-a6a2-e9e2b3833213\") " Dec 11 08:38:54 crc kubenswrapper[4881]: I1211 08:38:54.770810 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/96a878c1-c8dc-443b-a6a2-e9e2b3833213-combined-ca-bundle\") pod \"96a878c1-c8dc-443b-a6a2-e9e2b3833213\" (UID: \"96a878c1-c8dc-443b-a6a2-e9e2b3833213\") " Dec 11 08:38:54 crc kubenswrapper[4881]: I1211 08:38:54.770868 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/a982ebde-b33a-4397-bd71-34da9b1efc2e-etc-machine-id\") pod \"a982ebde-b33a-4397-bd71-34da9b1efc2e\" (UID: \"a982ebde-b33a-4397-bd71-34da9b1efc2e\") " Dec 11 08:38:54 crc kubenswrapper[4881]: I1211 08:38:54.770893 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4vrct\" (UniqueName: \"kubernetes.io/projected/a982ebde-b33a-4397-bd71-34da9b1efc2e-kube-api-access-4vrct\") pod \"a982ebde-b33a-4397-bd71-34da9b1efc2e\" (UID: \"a982ebde-b33a-4397-bd71-34da9b1efc2e\") " Dec 11 08:38:54 crc kubenswrapper[4881]: I1211 08:38:54.770930 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a982ebde-b33a-4397-bd71-34da9b1efc2e-combined-ca-bundle\") pod \"a982ebde-b33a-4397-bd71-34da9b1efc2e\" (UID: \"a982ebde-b33a-4397-bd71-34da9b1efc2e\") " Dec 11 08:38:54 crc kubenswrapper[4881]: I1211 08:38:54.770979 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/96a878c1-c8dc-443b-a6a2-e9e2b3833213-logs\") pod \"96a878c1-c8dc-443b-a6a2-e9e2b3833213\" (UID: \"96a878c1-c8dc-443b-a6a2-e9e2b3833213\") " Dec 11 08:38:54 crc kubenswrapper[4881]: I1211 08:38:54.770993 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a982ebde-b33a-4397-bd71-34da9b1efc2e-config-data\") pod \"a982ebde-b33a-4397-bd71-34da9b1efc2e\" (UID: \"a982ebde-b33a-4397-bd71-34da9b1efc2e\") " Dec 11 08:38:54 crc kubenswrapper[4881]: I1211 08:38:54.771119 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a982ebde-b33a-4397-bd71-34da9b1efc2e-scripts\") pod \"a982ebde-b33a-4397-bd71-34da9b1efc2e\" (UID: \"a982ebde-b33a-4397-bd71-34da9b1efc2e\") " Dec 11 08:38:54 crc kubenswrapper[4881]: I1211 08:38:54.771167 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/96a878c1-c8dc-443b-a6a2-e9e2b3833213-scripts\") pod \"96a878c1-c8dc-443b-a6a2-e9e2b3833213\" (UID: \"96a878c1-c8dc-443b-a6a2-e9e2b3833213\") " Dec 11 08:38:54 crc kubenswrapper[4881]: I1211 08:38:54.771200 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/96a878c1-c8dc-443b-a6a2-e9e2b3833213-public-tls-certs\") pod \"96a878c1-c8dc-443b-a6a2-e9e2b3833213\" (UID: \"96a878c1-c8dc-443b-a6a2-e9e2b3833213\") " Dec 11 08:38:54 crc kubenswrapper[4881]: I1211 08:38:54.775862 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/a982ebde-b33a-4397-bd71-34da9b1efc2e-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "a982ebde-b33a-4397-bd71-34da9b1efc2e" (UID: "a982ebde-b33a-4397-bd71-34da9b1efc2e"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 11 08:38:54 crc kubenswrapper[4881]: I1211 08:38:54.777157 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/96a878c1-c8dc-443b-a6a2-e9e2b3833213-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "96a878c1-c8dc-443b-a6a2-e9e2b3833213" (UID: "96a878c1-c8dc-443b-a6a2-e9e2b3833213"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 08:38:54 crc kubenswrapper[4881]: I1211 08:38:54.777385 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/96a878c1-c8dc-443b-a6a2-e9e2b3833213-logs" (OuterVolumeSpecName: "logs") pod "96a878c1-c8dc-443b-a6a2-e9e2b3833213" (UID: "96a878c1-c8dc-443b-a6a2-e9e2b3833213"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 08:38:54 crc kubenswrapper[4881]: I1211 08:38:54.785577 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a982ebde-b33a-4397-bd71-34da9b1efc2e-kube-api-access-4vrct" (OuterVolumeSpecName: "kube-api-access-4vrct") pod "a982ebde-b33a-4397-bd71-34da9b1efc2e" (UID: "a982ebde-b33a-4397-bd71-34da9b1efc2e"). InnerVolumeSpecName "kube-api-access-4vrct". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:38:54 crc kubenswrapper[4881]: I1211 08:38:54.805305 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a982ebde-b33a-4397-bd71-34da9b1efc2e-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "a982ebde-b33a-4397-bd71-34da9b1efc2e" (UID: "a982ebde-b33a-4397-bd71-34da9b1efc2e"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:38:54 crc kubenswrapper[4881]: I1211 08:38:54.814646 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a982ebde-b33a-4397-bd71-34da9b1efc2e-scripts" (OuterVolumeSpecName: "scripts") pod "a982ebde-b33a-4397-bd71-34da9b1efc2e" (UID: "a982ebde-b33a-4397-bd71-34da9b1efc2e"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:38:54 crc kubenswrapper[4881]: I1211 08:38:54.819615 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage02-crc" (OuterVolumeSpecName: "glance") pod "96a878c1-c8dc-443b-a6a2-e9e2b3833213" (UID: "96a878c1-c8dc-443b-a6a2-e9e2b3833213"). InnerVolumeSpecName "local-storage02-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Dec 11 08:38:54 crc kubenswrapper[4881]: I1211 08:38:54.842791 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/96a878c1-c8dc-443b-a6a2-e9e2b3833213-scripts" (OuterVolumeSpecName: "scripts") pod "96a878c1-c8dc-443b-a6a2-e9e2b3833213" (UID: "96a878c1-c8dc-443b-a6a2-e9e2b3833213"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:38:54 crc kubenswrapper[4881]: I1211 08:38:54.851501 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/96a878c1-c8dc-443b-a6a2-e9e2b3833213-kube-api-access-nwtqx" (OuterVolumeSpecName: "kube-api-access-nwtqx") pod "96a878c1-c8dc-443b-a6a2-e9e2b3833213" (UID: "96a878c1-c8dc-443b-a6a2-e9e2b3833213"). InnerVolumeSpecName "kube-api-access-nwtqx". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:38:54 crc kubenswrapper[4881]: I1211 08:38:54.876887 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wg5jb\" (UniqueName: \"kubernetes.io/projected/64b0f79e-085c-4fa8-938d-888f5cea99e3-kube-api-access-wg5jb\") pod \"ceilometer-0\" (UID: \"64b0f79e-085c-4fa8-938d-888f5cea99e3\") " pod="openstack/ceilometer-0" Dec 11 08:38:54 crc kubenswrapper[4881]: I1211 08:38:54.876939 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/64b0f79e-085c-4fa8-938d-888f5cea99e3-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"64b0f79e-085c-4fa8-938d-888f5cea99e3\") " pod="openstack/ceilometer-0" Dec 11 08:38:54 crc kubenswrapper[4881]: I1211 08:38:54.876962 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/64b0f79e-085c-4fa8-938d-888f5cea99e3-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"64b0f79e-085c-4fa8-938d-888f5cea99e3\") " pod="openstack/ceilometer-0" Dec 11 08:38:54 crc kubenswrapper[4881]: I1211 08:38:54.876983 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/64b0f79e-085c-4fa8-938d-888f5cea99e3-run-httpd\") pod \"ceilometer-0\" (UID: \"64b0f79e-085c-4fa8-938d-888f5cea99e3\") " pod="openstack/ceilometer-0" Dec 11 08:38:54 crc kubenswrapper[4881]: I1211 08:38:54.877016 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/64b0f79e-085c-4fa8-938d-888f5cea99e3-log-httpd\") pod \"ceilometer-0\" (UID: \"64b0f79e-085c-4fa8-938d-888f5cea99e3\") " pod="openstack/ceilometer-0" Dec 11 08:38:54 crc kubenswrapper[4881]: I1211 08:38:54.877073 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/64b0f79e-085c-4fa8-938d-888f5cea99e3-config-data\") pod \"ceilometer-0\" (UID: \"64b0f79e-085c-4fa8-938d-888f5cea99e3\") " pod="openstack/ceilometer-0" Dec 11 08:38:54 crc kubenswrapper[4881]: I1211 08:38:54.877117 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/64b0f79e-085c-4fa8-938d-888f5cea99e3-scripts\") pod \"ceilometer-0\" (UID: \"64b0f79e-085c-4fa8-938d-888f5cea99e3\") " pod="openstack/ceilometer-0" Dec 11 08:38:54 crc kubenswrapper[4881]: I1211 08:38:54.877214 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nwtqx\" (UniqueName: \"kubernetes.io/projected/96a878c1-c8dc-443b-a6a2-e9e2b3833213-kube-api-access-nwtqx\") on node \"crc\" DevicePath \"\"" Dec 11 08:38:54 crc kubenswrapper[4881]: I1211 08:38:54.877235 4881 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") on node \"crc\" " Dec 11 08:38:54 crc kubenswrapper[4881]: I1211 08:38:54.877245 4881 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a982ebde-b33a-4397-bd71-34da9b1efc2e-config-data-custom\") on node \"crc\" DevicePath \"\"" Dec 11 08:38:54 crc kubenswrapper[4881]: I1211 08:38:54.877254 4881 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/96a878c1-c8dc-443b-a6a2-e9e2b3833213-httpd-run\") on node \"crc\" DevicePath \"\"" Dec 11 08:38:54 crc kubenswrapper[4881]: I1211 08:38:54.877263 4881 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/a982ebde-b33a-4397-bd71-34da9b1efc2e-etc-machine-id\") on node \"crc\" DevicePath \"\"" Dec 11 08:38:54 crc kubenswrapper[4881]: I1211 08:38:54.877271 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4vrct\" (UniqueName: \"kubernetes.io/projected/a982ebde-b33a-4397-bd71-34da9b1efc2e-kube-api-access-4vrct\") on node \"crc\" DevicePath \"\"" Dec 11 08:38:54 crc kubenswrapper[4881]: I1211 08:38:54.877279 4881 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/96a878c1-c8dc-443b-a6a2-e9e2b3833213-logs\") on node \"crc\" DevicePath \"\"" Dec 11 08:38:54 crc kubenswrapper[4881]: I1211 08:38:54.877287 4881 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a982ebde-b33a-4397-bd71-34da9b1efc2e-scripts\") on node \"crc\" DevicePath \"\"" Dec 11 08:38:54 crc kubenswrapper[4881]: I1211 08:38:54.877294 4881 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/96a878c1-c8dc-443b-a6a2-e9e2b3833213-scripts\") on node \"crc\" DevicePath \"\"" Dec 11 08:38:54 crc kubenswrapper[4881]: I1211 08:38:54.979019 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/64b0f79e-085c-4fa8-938d-888f5cea99e3-scripts\") pod \"ceilometer-0\" (UID: \"64b0f79e-085c-4fa8-938d-888f5cea99e3\") " pod="openstack/ceilometer-0" Dec 11 08:38:54 crc kubenswrapper[4881]: I1211 08:38:54.979191 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wg5jb\" (UniqueName: \"kubernetes.io/projected/64b0f79e-085c-4fa8-938d-888f5cea99e3-kube-api-access-wg5jb\") pod \"ceilometer-0\" (UID: \"64b0f79e-085c-4fa8-938d-888f5cea99e3\") " pod="openstack/ceilometer-0" Dec 11 08:38:54 crc kubenswrapper[4881]: I1211 08:38:54.979218 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/64b0f79e-085c-4fa8-938d-888f5cea99e3-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"64b0f79e-085c-4fa8-938d-888f5cea99e3\") " pod="openstack/ceilometer-0" Dec 11 08:38:54 crc kubenswrapper[4881]: I1211 08:38:54.979239 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/64b0f79e-085c-4fa8-938d-888f5cea99e3-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"64b0f79e-085c-4fa8-938d-888f5cea99e3\") " pod="openstack/ceilometer-0" Dec 11 08:38:54 crc kubenswrapper[4881]: I1211 08:38:54.979258 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/64b0f79e-085c-4fa8-938d-888f5cea99e3-run-httpd\") pod \"ceilometer-0\" (UID: \"64b0f79e-085c-4fa8-938d-888f5cea99e3\") " pod="openstack/ceilometer-0" Dec 11 08:38:54 crc kubenswrapper[4881]: I1211 08:38:54.979742 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/64b0f79e-085c-4fa8-938d-888f5cea99e3-log-httpd\") pod \"ceilometer-0\" (UID: \"64b0f79e-085c-4fa8-938d-888f5cea99e3\") " pod="openstack/ceilometer-0" Dec 11 08:38:54 crc kubenswrapper[4881]: I1211 08:38:54.979811 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/64b0f79e-085c-4fa8-938d-888f5cea99e3-config-data\") pod \"ceilometer-0\" (UID: \"64b0f79e-085c-4fa8-938d-888f5cea99e3\") " pod="openstack/ceilometer-0" Dec 11 08:38:54 crc kubenswrapper[4881]: I1211 08:38:54.980501 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/64b0f79e-085c-4fa8-938d-888f5cea99e3-run-httpd\") pod \"ceilometer-0\" (UID: \"64b0f79e-085c-4fa8-938d-888f5cea99e3\") " pod="openstack/ceilometer-0" Dec 11 08:38:54 crc kubenswrapper[4881]: I1211 08:38:54.980995 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/64b0f79e-085c-4fa8-938d-888f5cea99e3-log-httpd\") pod \"ceilometer-0\" (UID: \"64b0f79e-085c-4fa8-938d-888f5cea99e3\") " pod="openstack/ceilometer-0" Dec 11 08:38:54 crc kubenswrapper[4881]: I1211 08:38:54.990867 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"a982ebde-b33a-4397-bd71-34da9b1efc2e","Type":"ContainerDied","Data":"949cb76d57d3383325a18e901b0307d86ea8f5c4d3187cec8e33756ef6b5c231"} Dec 11 08:38:54 crc kubenswrapper[4881]: I1211 08:38:54.990957 4881 scope.go:117] "RemoveContainer" containerID="509bf974b82628f86f012c6a7b20eadef9d160c2075eb853f9a1d8dfb4857bc9" Dec 11 08:38:54 crc kubenswrapper[4881]: I1211 08:38:54.991106 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Dec 11 08:38:54 crc kubenswrapper[4881]: I1211 08:38:54.991502 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/64b0f79e-085c-4fa8-938d-888f5cea99e3-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"64b0f79e-085c-4fa8-938d-888f5cea99e3\") " pod="openstack/ceilometer-0" Dec 11 08:38:55 crc kubenswrapper[4881]: I1211 08:38:55.004851 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/64b0f79e-085c-4fa8-938d-888f5cea99e3-scripts\") pod \"ceilometer-0\" (UID: \"64b0f79e-085c-4fa8-938d-888f5cea99e3\") " pod="openstack/ceilometer-0" Dec 11 08:38:55 crc kubenswrapper[4881]: I1211 08:38:55.005839 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/64b0f79e-085c-4fa8-938d-888f5cea99e3-config-data\") pod \"ceilometer-0\" (UID: \"64b0f79e-085c-4fa8-938d-888f5cea99e3\") " pod="openstack/ceilometer-0" Dec 11 08:38:55 crc kubenswrapper[4881]: I1211 08:38:55.007446 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/64b0f79e-085c-4fa8-938d-888f5cea99e3-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"64b0f79e-085c-4fa8-938d-888f5cea99e3\") " pod="openstack/ceilometer-0" Dec 11 08:38:55 crc kubenswrapper[4881]: I1211 08:38:55.007926 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-75c8ddd69c-js2tw" Dec 11 08:38:55 crc kubenswrapper[4881]: I1211 08:38:55.008312 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a982ebde-b33a-4397-bd71-34da9b1efc2e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a982ebde-b33a-4397-bd71-34da9b1efc2e" (UID: "a982ebde-b33a-4397-bd71-34da9b1efc2e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:38:55 crc kubenswrapper[4881]: I1211 08:38:55.009195 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wg5jb\" (UniqueName: \"kubernetes.io/projected/64b0f79e-085c-4fa8-938d-888f5cea99e3-kube-api-access-wg5jb\") pod \"ceilometer-0\" (UID: \"64b0f79e-085c-4fa8-938d-888f5cea99e3\") " pod="openstack/ceilometer-0" Dec 11 08:38:55 crc kubenswrapper[4881]: I1211 08:38:55.013112 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Dec 11 08:38:55 crc kubenswrapper[4881]: I1211 08:38:55.038216 4881 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/swift-proxy-5f6c547b6c-rjk9h" podUID="910014af-7b9e-49b8-99e3-b80a15d72faf" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Dec 11 08:38:55 crc kubenswrapper[4881]: I1211 08:38:55.052857 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0074c6f2-5d03-406d-a8a3-19f87e5980d8" path="/var/lib/kubelet/pods/0074c6f2-5d03-406d-a8a3-19f87e5980d8/volumes" Dec 11 08:38:55 crc kubenswrapper[4881]: I1211 08:38:55.076549 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-proxy-5f6c547b6c-rjk9h" podStartSLOduration=16.076526712 podStartE2EDuration="16.076526712s" podCreationTimestamp="2025-12-11 08:38:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 08:38:55.041951929 +0000 UTC m=+1383.419320636" watchObservedRunningTime="2025-12-11 08:38:55.076526712 +0000 UTC m=+1383.453895409" Dec 11 08:38:55 crc kubenswrapper[4881]: I1211 08:38:55.082202 4881 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a982ebde-b33a-4397-bd71-34da9b1efc2e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 11 08:38:55 crc kubenswrapper[4881]: I1211 08:38:55.148526 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/96a878c1-c8dc-443b-a6a2-e9e2b3833213-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "96a878c1-c8dc-443b-a6a2-e9e2b3833213" (UID: "96a878c1-c8dc-443b-a6a2-e9e2b3833213"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:38:55 crc kubenswrapper[4881]: I1211 08:38:55.161225 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/96a878c1-c8dc-443b-a6a2-e9e2b3833213-config-data" (OuterVolumeSpecName: "config-data") pod "96a878c1-c8dc-443b-a6a2-e9e2b3833213" (UID: "96a878c1-c8dc-443b-a6a2-e9e2b3833213"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:38:55 crc kubenswrapper[4881]: I1211 08:38:55.182210 4881 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage02-crc" (UniqueName: "kubernetes.io/local-volume/local-storage02-crc") on node "crc" Dec 11 08:38:55 crc kubenswrapper[4881]: I1211 08:38:55.185599 4881 reconciler_common.go:293] "Volume detached for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") on node \"crc\" DevicePath \"\"" Dec 11 08:38:55 crc kubenswrapper[4881]: I1211 08:38:55.185622 4881 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/96a878c1-c8dc-443b-a6a2-e9e2b3833213-config-data\") on node \"crc\" DevicePath \"\"" Dec 11 08:38:55 crc kubenswrapper[4881]: I1211 08:38:55.185633 4881 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/96a878c1-c8dc-443b-a6a2-e9e2b3833213-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 11 08:38:55 crc kubenswrapper[4881]: I1211 08:38:55.207189 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/96a878c1-c8dc-443b-a6a2-e9e2b3833213-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "96a878c1-c8dc-443b-a6a2-e9e2b3833213" (UID: "96a878c1-c8dc-443b-a6a2-e9e2b3833213"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:38:55 crc kubenswrapper[4881]: I1211 08:38:55.288067 4881 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/96a878c1-c8dc-443b-a6a2-e9e2b3833213-public-tls-certs\") on node \"crc\" DevicePath \"\"" Dec 11 08:38:55 crc kubenswrapper[4881]: I1211 08:38:55.313601 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a982ebde-b33a-4397-bd71-34da9b1efc2e-config-data" (OuterVolumeSpecName: "config-data") pod "a982ebde-b33a-4397-bd71-34da9b1efc2e" (UID: "a982ebde-b33a-4397-bd71-34da9b1efc2e"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:38:55 crc kubenswrapper[4881]: I1211 08:38:55.391951 4881 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a982ebde-b33a-4397-bd71-34da9b1efc2e-config-data\") on node \"crc\" DevicePath \"\"" Dec 11 08:38:55 crc kubenswrapper[4881]: I1211 08:38:55.483361 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-75c8ddd69c-js2tw" event={"ID":"edab6e89-cb03-44e6-b511-d64ba764b857","Type":"ContainerDied","Data":"141f11d310bb26aad6c2c23dbddcbdf3591e6f650607cdee70bf1a32088569b3"} Dec 11 08:38:55 crc kubenswrapper[4881]: I1211 08:38:55.483416 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/swift-proxy-5f6c547b6c-rjk9h" Dec 11 08:38:55 crc kubenswrapper[4881]: I1211 08:38:55.483436 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/swift-proxy-5f6c547b6c-rjk9h" Dec 11 08:38:55 crc kubenswrapper[4881]: I1211 08:38:55.483445 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"96a878c1-c8dc-443b-a6a2-e9e2b3833213","Type":"ContainerDied","Data":"a9e31d49fbe0f2ec3596339c2ec3a43ca591201bb128abf3fd18d46382f6de51"} Dec 11 08:38:55 crc kubenswrapper[4881]: I1211 08:38:55.483467 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-engine-d56bd9469-fm4xb"] Dec 11 08:38:55 crc kubenswrapper[4881]: I1211 08:38:55.483484 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-5f6c547b6c-rjk9h" event={"ID":"910014af-7b9e-49b8-99e3-b80a15d72faf","Type":"ContainerStarted","Data":"25445b7cd523ef2a21ec75b741235f656d88dd50b53fe58cb7cc7547bff2acbd"} Dec 11 08:38:55 crc kubenswrapper[4881]: I1211 08:38:55.483494 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-7b446c9cf4-8fv29" event={"ID":"7eca07f6-40f2-48b2-90fe-b5b9b332707f","Type":"ContainerStarted","Data":"0a8eaf7dff9fe7f1dbe85244174db7474982dc8a3f35561082263e750868419e"} Dec 11 08:38:55 crc kubenswrapper[4881]: I1211 08:38:55.483531 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/heat-engine-557bc7cb47-8hntq" Dec 11 08:38:55 crc kubenswrapper[4881]: I1211 08:38:55.561529 4881 scope.go:117] "RemoveContainer" containerID="945304d81117ec5b4b769570dff0eab198d9721114145fd8e56e910043d30a80" Dec 11 08:38:55 crc kubenswrapper[4881]: I1211 08:38:55.612650 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-5497987457-dkncd" Dec 11 08:38:55 crc kubenswrapper[4881]: I1211 08:38:55.613942 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 11 08:38:55 crc kubenswrapper[4881]: I1211 08:38:55.647584 4881 scope.go:117] "RemoveContainer" containerID="f5af75019d1bfcfe57fda0fa3426711b5e30dbf674c78914eeb3c9a13b15e860" Dec 11 08:38:55 crc kubenswrapper[4881]: I1211 08:38:55.716809 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/020cd7f3-faa3-4bec-adfd-25a9b60456f7-config-data\") pod \"020cd7f3-faa3-4bec-adfd-25a9b60456f7\" (UID: \"020cd7f3-faa3-4bec-adfd-25a9b60456f7\") " Dec 11 08:38:55 crc kubenswrapper[4881]: I1211 08:38:55.716899 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/020cd7f3-faa3-4bec-adfd-25a9b60456f7-combined-ca-bundle\") pod \"020cd7f3-faa3-4bec-adfd-25a9b60456f7\" (UID: \"020cd7f3-faa3-4bec-adfd-25a9b60456f7\") " Dec 11 08:38:55 crc kubenswrapper[4881]: I1211 08:38:55.717034 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xqhl8\" (UniqueName: \"kubernetes.io/projected/020cd7f3-faa3-4bec-adfd-25a9b60456f7-kube-api-access-xqhl8\") pod \"020cd7f3-faa3-4bec-adfd-25a9b60456f7\" (UID: \"020cd7f3-faa3-4bec-adfd-25a9b60456f7\") " Dec 11 08:38:55 crc kubenswrapper[4881]: I1211 08:38:55.717223 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/020cd7f3-faa3-4bec-adfd-25a9b60456f7-config-data-custom\") pod \"020cd7f3-faa3-4bec-adfd-25a9b60456f7\" (UID: \"020cd7f3-faa3-4bec-adfd-25a9b60456f7\") " Dec 11 08:38:55 crc kubenswrapper[4881]: I1211 08:38:55.747177 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/020cd7f3-faa3-4bec-adfd-25a9b60456f7-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "020cd7f3-faa3-4bec-adfd-25a9b60456f7" (UID: "020cd7f3-faa3-4bec-adfd-25a9b60456f7"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:38:55 crc kubenswrapper[4881]: I1211 08:38:55.749488 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/020cd7f3-faa3-4bec-adfd-25a9b60456f7-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "020cd7f3-faa3-4bec-adfd-25a9b60456f7" (UID: "020cd7f3-faa3-4bec-adfd-25a9b60456f7"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:38:55 crc kubenswrapper[4881]: I1211 08:38:55.749568 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Dec 11 08:38:55 crc kubenswrapper[4881]: I1211 08:38:55.749776 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/020cd7f3-faa3-4bec-adfd-25a9b60456f7-kube-api-access-xqhl8" (OuterVolumeSpecName: "kube-api-access-xqhl8") pod "020cd7f3-faa3-4bec-adfd-25a9b60456f7" (UID: "020cd7f3-faa3-4bec-adfd-25a9b60456f7"). InnerVolumeSpecName "kube-api-access-xqhl8". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:38:55 crc kubenswrapper[4881]: I1211 08:38:55.803075 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/020cd7f3-faa3-4bec-adfd-25a9b60456f7-config-data" (OuterVolumeSpecName: "config-data") pod "020cd7f3-faa3-4bec-adfd-25a9b60456f7" (UID: "020cd7f3-faa3-4bec-adfd-25a9b60456f7"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:38:55 crc kubenswrapper[4881]: I1211 08:38:55.817652 4881 scope.go:117] "RemoveContainer" containerID="eba0ce956a1aac2cf1a8ee1e04ac0ea51d7e0612e1999ee09dbf2f2cb3531029" Dec 11 08:38:55 crc kubenswrapper[4881]: I1211 08:38:55.819567 4881 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/020cd7f3-faa3-4bec-adfd-25a9b60456f7-config-data-custom\") on node \"crc\" DevicePath \"\"" Dec 11 08:38:55 crc kubenswrapper[4881]: I1211 08:38:55.819589 4881 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/020cd7f3-faa3-4bec-adfd-25a9b60456f7-config-data\") on node \"crc\" DevicePath \"\"" Dec 11 08:38:55 crc kubenswrapper[4881]: I1211 08:38:55.819599 4881 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/020cd7f3-faa3-4bec-adfd-25a9b60456f7-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 11 08:38:55 crc kubenswrapper[4881]: I1211 08:38:55.819610 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xqhl8\" (UniqueName: \"kubernetes.io/projected/020cd7f3-faa3-4bec-adfd-25a9b60456f7-kube-api-access-xqhl8\") on node \"crc\" DevicePath \"\"" Dec 11 08:38:55 crc kubenswrapper[4881]: I1211 08:38:55.825610 4881 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Dec 11 08:38:55 crc kubenswrapper[4881]: I1211 08:38:55.854703 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Dec 11 08:38:55 crc kubenswrapper[4881]: I1211 08:38:55.857871 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Dec 11 08:38:55 crc kubenswrapper[4881]: I1211 08:38:55.861127 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Dec 11 08:38:55 crc kubenswrapper[4881]: I1211 08:38:55.907048 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Dec 11 08:38:55 crc kubenswrapper[4881]: I1211 08:38:55.951386 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/03bd1873-0976-4da1-a4f0-4bc1ab183cda-logs\") pod \"glance-default-external-api-0\" (UID: \"03bd1873-0976-4da1-a4f0-4bc1ab183cda\") " pod="openstack/glance-default-external-api-0" Dec 11 08:38:55 crc kubenswrapper[4881]: I1211 08:38:55.951629 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tfg6r\" (UniqueName: \"kubernetes.io/projected/03bd1873-0976-4da1-a4f0-4bc1ab183cda-kube-api-access-tfg6r\") pod \"glance-default-external-api-0\" (UID: \"03bd1873-0976-4da1-a4f0-4bc1ab183cda\") " pod="openstack/glance-default-external-api-0" Dec 11 08:38:55 crc kubenswrapper[4881]: I1211 08:38:55.951747 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/03bd1873-0976-4da1-a4f0-4bc1ab183cda-scripts\") pod \"glance-default-external-api-0\" (UID: \"03bd1873-0976-4da1-a4f0-4bc1ab183cda\") " pod="openstack/glance-default-external-api-0" Dec 11 08:38:55 crc kubenswrapper[4881]: I1211 08:38:55.951792 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/03bd1873-0976-4da1-a4f0-4bc1ab183cda-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"03bd1873-0976-4da1-a4f0-4bc1ab183cda\") " pod="openstack/glance-default-external-api-0" Dec 11 08:38:55 crc kubenswrapper[4881]: I1211 08:38:55.951923 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"glance-default-external-api-0\" (UID: \"03bd1873-0976-4da1-a4f0-4bc1ab183cda\") " pod="openstack/glance-default-external-api-0" Dec 11 08:38:55 crc kubenswrapper[4881]: I1211 08:38:55.951957 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/03bd1873-0976-4da1-a4f0-4bc1ab183cda-config-data\") pod \"glance-default-external-api-0\" (UID: \"03bd1873-0976-4da1-a4f0-4bc1ab183cda\") " pod="openstack/glance-default-external-api-0" Dec 11 08:38:55 crc kubenswrapper[4881]: I1211 08:38:55.951994 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/03bd1873-0976-4da1-a4f0-4bc1ab183cda-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"03bd1873-0976-4da1-a4f0-4bc1ab183cda\") " pod="openstack/glance-default-external-api-0" Dec 11 08:38:55 crc kubenswrapper[4881]: I1211 08:38:55.952131 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/03bd1873-0976-4da1-a4f0-4bc1ab183cda-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"03bd1873-0976-4da1-a4f0-4bc1ab183cda\") " pod="openstack/glance-default-external-api-0" Dec 11 08:38:56 crc kubenswrapper[4881]: I1211 08:38:56.061825 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Dec 11 08:38:56 crc kubenswrapper[4881]: I1211 08:38:56.100995 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/03bd1873-0976-4da1-a4f0-4bc1ab183cda-scripts\") pod \"glance-default-external-api-0\" (UID: \"03bd1873-0976-4da1-a4f0-4bc1ab183cda\") " pod="openstack/glance-default-external-api-0" Dec 11 08:38:56 crc kubenswrapper[4881]: I1211 08:38:56.101076 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/03bd1873-0976-4da1-a4f0-4bc1ab183cda-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"03bd1873-0976-4da1-a4f0-4bc1ab183cda\") " pod="openstack/glance-default-external-api-0" Dec 11 08:38:56 crc kubenswrapper[4881]: I1211 08:38:56.102715 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"glance-default-external-api-0\" (UID: \"03bd1873-0976-4da1-a4f0-4bc1ab183cda\") " pod="openstack/glance-default-external-api-0" Dec 11 08:38:56 crc kubenswrapper[4881]: I1211 08:38:56.103132 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/03bd1873-0976-4da1-a4f0-4bc1ab183cda-config-data\") pod \"glance-default-external-api-0\" (UID: \"03bd1873-0976-4da1-a4f0-4bc1ab183cda\") " pod="openstack/glance-default-external-api-0" Dec 11 08:38:56 crc kubenswrapper[4881]: I1211 08:38:56.103190 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/03bd1873-0976-4da1-a4f0-4bc1ab183cda-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"03bd1873-0976-4da1-a4f0-4bc1ab183cda\") " pod="openstack/glance-default-external-api-0" Dec 11 08:38:56 crc kubenswrapper[4881]: I1211 08:38:56.104128 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/03bd1873-0976-4da1-a4f0-4bc1ab183cda-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"03bd1873-0976-4da1-a4f0-4bc1ab183cda\") " pod="openstack/glance-default-external-api-0" Dec 11 08:38:56 crc kubenswrapper[4881]: I1211 08:38:56.104249 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/03bd1873-0976-4da1-a4f0-4bc1ab183cda-logs\") pod \"glance-default-external-api-0\" (UID: \"03bd1873-0976-4da1-a4f0-4bc1ab183cda\") " pod="openstack/glance-default-external-api-0" Dec 11 08:38:56 crc kubenswrapper[4881]: I1211 08:38:56.104551 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tfg6r\" (UniqueName: \"kubernetes.io/projected/03bd1873-0976-4da1-a4f0-4bc1ab183cda-kube-api-access-tfg6r\") pod \"glance-default-external-api-0\" (UID: \"03bd1873-0976-4da1-a4f0-4bc1ab183cda\") " pod="openstack/glance-default-external-api-0" Dec 11 08:38:56 crc kubenswrapper[4881]: I1211 08:38:56.105083 4881 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"glance-default-external-api-0\" (UID: \"03bd1873-0976-4da1-a4f0-4bc1ab183cda\") device mount path \"/mnt/openstack/pv02\"" pod="openstack/glance-default-external-api-0" Dec 11 08:38:56 crc kubenswrapper[4881]: I1211 08:38:56.106830 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/03bd1873-0976-4da1-a4f0-4bc1ab183cda-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"03bd1873-0976-4da1-a4f0-4bc1ab183cda\") " pod="openstack/glance-default-external-api-0" Dec 11 08:38:56 crc kubenswrapper[4881]: I1211 08:38:56.113961 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/03bd1873-0976-4da1-a4f0-4bc1ab183cda-logs\") pod \"glance-default-external-api-0\" (UID: \"03bd1873-0976-4da1-a4f0-4bc1ab183cda\") " pod="openstack/glance-default-external-api-0" Dec 11 08:38:56 crc kubenswrapper[4881]: I1211 08:38:56.125797 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/03bd1873-0976-4da1-a4f0-4bc1ab183cda-scripts\") pod \"glance-default-external-api-0\" (UID: \"03bd1873-0976-4da1-a4f0-4bc1ab183cda\") " pod="openstack/glance-default-external-api-0" Dec 11 08:38:56 crc kubenswrapper[4881]: I1211 08:38:56.126154 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/03bd1873-0976-4da1-a4f0-4bc1ab183cda-config-data\") pod \"glance-default-external-api-0\" (UID: \"03bd1873-0976-4da1-a4f0-4bc1ab183cda\") " pod="openstack/glance-default-external-api-0" Dec 11 08:38:56 crc kubenswrapper[4881]: I1211 08:38:56.126820 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/03bd1873-0976-4da1-a4f0-4bc1ab183cda-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"03bd1873-0976-4da1-a4f0-4bc1ab183cda\") " pod="openstack/glance-default-external-api-0" Dec 11 08:38:56 crc kubenswrapper[4881]: I1211 08:38:56.133298 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/03bd1873-0976-4da1-a4f0-4bc1ab183cda-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"03bd1873-0976-4da1-a4f0-4bc1ab183cda\") " pod="openstack/glance-default-external-api-0" Dec 11 08:38:56 crc kubenswrapper[4881]: W1211 08:38:56.133988 4881 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod98c33c2d_b3e5_450d_8c52_544acac89c74.slice/crio-fb7ba9fb71c6e24601cdd4f6c5a37171e2790445c7f438e4b9b7460575d34234 WatchSource:0}: Error finding container fb7ba9fb71c6e24601cdd4f6c5a37171e2790445c7f438e4b9b7460575d34234: Status 404 returned error can't find the container with id fb7ba9fb71c6e24601cdd4f6c5a37171e2790445c7f438e4b9b7460575d34234 Dec 11 08:38:56 crc kubenswrapper[4881]: I1211 08:38:56.144809 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tfg6r\" (UniqueName: \"kubernetes.io/projected/03bd1873-0976-4da1-a4f0-4bc1ab183cda-kube-api-access-tfg6r\") pod \"glance-default-external-api-0\" (UID: \"03bd1873-0976-4da1-a4f0-4bc1ab183cda\") " pod="openstack/glance-default-external-api-0" Dec 11 08:38:56 crc kubenswrapper[4881]: I1211 08:38:56.148287 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-engine-d56bd9469-fm4xb" event={"ID":"8f8f4fc0-e759-433b-835a-f2c0db79850f","Type":"ContainerStarted","Data":"390d424708a7d4a7dfb5154fd7148ee8b9df67199b2143b9fc78c1855fea8cd7"} Dec 11 08:38:56 crc kubenswrapper[4881]: I1211 08:38:56.153881 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-86c494669f-2l5s8" event={"ID":"773654f0-c183-4962-a942-9fc33e9f44a7","Type":"ContainerStarted","Data":"81dc2cfd55617d4d9e76b59ccc9623e6a6126c1f8084fa4b757052cfb6fa1dc0"} Dec 11 08:38:56 crc kubenswrapper[4881]: I1211 08:38:56.154047 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-cfnapi-86c494669f-2l5s8" Dec 11 08:38:56 crc kubenswrapper[4881]: I1211 08:38:56.154164 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/heat-cfnapi-86c494669f-2l5s8" podUID="773654f0-c183-4962-a942-9fc33e9f44a7" containerName="heat-cfnapi" containerID="cri-o://81dc2cfd55617d4d9e76b59ccc9623e6a6126c1f8084fa4b757052cfb6fa1dc0" gracePeriod=60 Dec 11 08:38:56 crc kubenswrapper[4881]: I1211 08:38:56.158286 4881 scope.go:117] "RemoveContainer" containerID="9f2b61792cd2e899dae69b7bf99ab2a846b2cf8cd5c7bc92f5683c5e3af317f1" Dec 11 08:38:56 crc kubenswrapper[4881]: E1211 08:38:56.159815 4881 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"eba0ce956a1aac2cf1a8ee1e04ac0ea51d7e0612e1999ee09dbf2f2cb3531029\": container with ID starting with eba0ce956a1aac2cf1a8ee1e04ac0ea51d7e0612e1999ee09dbf2f2cb3531029 not found: ID does not exist" containerID="eba0ce956a1aac2cf1a8ee1e04ac0ea51d7e0612e1999ee09dbf2f2cb3531029" Dec 11 08:38:56 crc kubenswrapper[4881]: I1211 08:38:56.170848 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-5497987457-dkncd" Dec 11 08:38:56 crc kubenswrapper[4881]: I1211 08:38:56.172446 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-5497987457-dkncd" event={"ID":"020cd7f3-faa3-4bec-adfd-25a9b60456f7","Type":"ContainerDied","Data":"53319dd70f9021440e509374c4dc13c0ce9ba56b8ec0ac5a0c70301ff8500648"} Dec 11 08:38:56 crc kubenswrapper[4881]: W1211 08:38:56.192995 4881 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7a116558_2db5_4fe1_9f64_888d7cd93f57.slice/crio-6ed69b084e2e31f9eeb1d2b5bd0dfd9c8412d1840fad56f2751aa22cfafa3edb WatchSource:0}: Error finding container 6ed69b084e2e31f9eeb1d2b5bd0dfd9c8412d1840fad56f2751aa22cfafa3edb: Status 404 returned error can't find the container with id 6ed69b084e2e31f9eeb1d2b5bd0dfd9c8412d1840fad56f2751aa22cfafa3edb Dec 11 08:38:56 crc kubenswrapper[4881]: I1211 08:38:56.200213 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Dec 11 08:38:56 crc kubenswrapper[4881]: I1211 08:38:56.217746 4881 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/swift-proxy-5f6c547b6c-rjk9h" podUID="910014af-7b9e-49b8-99e3-b80a15d72faf" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Dec 11 08:38:56 crc kubenswrapper[4881]: W1211 08:38:56.218500 4881 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd5902bc2_d738_4bab_9b99_78b827e3a003.slice/crio-3574c6b82044712c60ab82683216ef5f4ba20d451e77f5bc9f96f405bbe75a74 WatchSource:0}: Error finding container 3574c6b82044712c60ab82683216ef5f4ba20d451e77f5bc9f96f405bbe75a74: Status 404 returned error can't find the container with id 3574c6b82044712c60ab82683216ef5f4ba20d451e77f5bc9f96f405bbe75a74 Dec 11 08:38:56 crc kubenswrapper[4881]: I1211 08:38:56.249824 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Dec 11 08:38:56 crc kubenswrapper[4881]: I1211 08:38:56.260097 4881 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-scheduler-0"] Dec 11 08:38:56 crc kubenswrapper[4881]: I1211 08:38:56.290535 4881 scope.go:117] "RemoveContainer" containerID="4468e46bc7e99e4bd0e2b6b2c10187478a1315d51fac1b2b9e5dec3b360d5c8f" Dec 11 08:38:56 crc kubenswrapper[4881]: I1211 08:38:56.313026 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/fb741205-ce03-4c1d-9181-b2efc3c92319-httpd-run\") pod \"fb741205-ce03-4c1d-9181-b2efc3c92319\" (UID: \"fb741205-ce03-4c1d-9181-b2efc3c92319\") " Dec 11 08:38:56 crc kubenswrapper[4881]: I1211 08:38:56.313141 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"fb741205-ce03-4c1d-9181-b2efc3c92319\" (UID: \"fb741205-ce03-4c1d-9181-b2efc3c92319\") " Dec 11 08:38:56 crc kubenswrapper[4881]: I1211 08:38:56.313166 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fb741205-ce03-4c1d-9181-b2efc3c92319-scripts\") pod \"fb741205-ce03-4c1d-9181-b2efc3c92319\" (UID: \"fb741205-ce03-4c1d-9181-b2efc3c92319\") " Dec 11 08:38:56 crc kubenswrapper[4881]: I1211 08:38:56.313237 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fb741205-ce03-4c1d-9181-b2efc3c92319-config-data\") pod \"fb741205-ce03-4c1d-9181-b2efc3c92319\" (UID: \"fb741205-ce03-4c1d-9181-b2efc3c92319\") " Dec 11 08:38:56 crc kubenswrapper[4881]: I1211 08:38:56.313368 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fb741205-ce03-4c1d-9181-b2efc3c92319-combined-ca-bundle\") pod \"fb741205-ce03-4c1d-9181-b2efc3c92319\" (UID: \"fb741205-ce03-4c1d-9181-b2efc3c92319\") " Dec 11 08:38:56 crc kubenswrapper[4881]: I1211 08:38:56.313400 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/fb741205-ce03-4c1d-9181-b2efc3c92319-logs\") pod \"fb741205-ce03-4c1d-9181-b2efc3c92319\" (UID: \"fb741205-ce03-4c1d-9181-b2efc3c92319\") " Dec 11 08:38:56 crc kubenswrapper[4881]: I1211 08:38:56.313432 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9fbd9\" (UniqueName: \"kubernetes.io/projected/fb741205-ce03-4c1d-9181-b2efc3c92319-kube-api-access-9fbd9\") pod \"fb741205-ce03-4c1d-9181-b2efc3c92319\" (UID: \"fb741205-ce03-4c1d-9181-b2efc3c92319\") " Dec 11 08:38:56 crc kubenswrapper[4881]: I1211 08:38:56.313516 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/fb741205-ce03-4c1d-9181-b2efc3c92319-internal-tls-certs\") pod \"fb741205-ce03-4c1d-9181-b2efc3c92319\" (UID: \"fb741205-ce03-4c1d-9181-b2efc3c92319\") " Dec 11 08:38:56 crc kubenswrapper[4881]: I1211 08:38:56.323850 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fb741205-ce03-4c1d-9181-b2efc3c92319-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "fb741205-ce03-4c1d-9181-b2efc3c92319" (UID: "fb741205-ce03-4c1d-9181-b2efc3c92319"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 08:38:56 crc kubenswrapper[4881]: I1211 08:38:56.325053 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fb741205-ce03-4c1d-9181-b2efc3c92319-logs" (OuterVolumeSpecName: "logs") pod "fb741205-ce03-4c1d-9181-b2efc3c92319" (UID: "fb741205-ce03-4c1d-9181-b2efc3c92319"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 08:38:56 crc kubenswrapper[4881]: I1211 08:38:56.340120 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-scheduler-0"] Dec 11 08:38:56 crc kubenswrapper[4881]: E1211 08:38:56.341406 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fb741205-ce03-4c1d-9181-b2efc3c92319" containerName="glance-httpd" Dec 11 08:38:56 crc kubenswrapper[4881]: I1211 08:38:56.341430 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="fb741205-ce03-4c1d-9181-b2efc3c92319" containerName="glance-httpd" Dec 11 08:38:56 crc kubenswrapper[4881]: E1211 08:38:56.341456 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fb741205-ce03-4c1d-9181-b2efc3c92319" containerName="glance-log" Dec 11 08:38:56 crc kubenswrapper[4881]: I1211 08:38:56.341463 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="fb741205-ce03-4c1d-9181-b2efc3c92319" containerName="glance-log" Dec 11 08:38:56 crc kubenswrapper[4881]: I1211 08:38:56.341832 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="fb741205-ce03-4c1d-9181-b2efc3c92319" containerName="glance-log" Dec 11 08:38:56 crc kubenswrapper[4881]: I1211 08:38:56.341853 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="fb741205-ce03-4c1d-9181-b2efc3c92319" containerName="glance-httpd" Dec 11 08:38:56 crc kubenswrapper[4881]: I1211 08:38:56.342436 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fb741205-ce03-4c1d-9181-b2efc3c92319-scripts" (OuterVolumeSpecName: "scripts") pod "fb741205-ce03-4c1d-9181-b2efc3c92319" (UID: "fb741205-ce03-4c1d-9181-b2efc3c92319"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:38:56 crc kubenswrapper[4881]: I1211 08:38:56.345838 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Dec 11 08:38:56 crc kubenswrapper[4881]: I1211 08:38:56.353955 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage05-crc" (OuterVolumeSpecName: "glance") pod "fb741205-ce03-4c1d-9181-b2efc3c92319" (UID: "fb741205-ce03-4c1d-9181-b2efc3c92319"). InnerVolumeSpecName "local-storage05-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Dec 11 08:38:56 crc kubenswrapper[4881]: I1211 08:38:56.359075 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scheduler-config-data" Dec 11 08:38:56 crc kubenswrapper[4881]: I1211 08:38:56.360089 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Dec 11 08:38:56 crc kubenswrapper[4881]: I1211 08:38:56.360650 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fb741205-ce03-4c1d-9181-b2efc3c92319-kube-api-access-9fbd9" (OuterVolumeSpecName: "kube-api-access-9fbd9") pod "fb741205-ce03-4c1d-9181-b2efc3c92319" (UID: "fb741205-ce03-4c1d-9181-b2efc3c92319"). InnerVolumeSpecName "kube-api-access-9fbd9". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:38:56 crc kubenswrapper[4881]: I1211 08:38:56.417292 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e9aa88e0-71a6-40a0-92ec-88084b425df9-config-data\") pod \"cinder-scheduler-0\" (UID: \"e9aa88e0-71a6-40a0-92ec-88084b425df9\") " pod="openstack/cinder-scheduler-0" Dec 11 08:38:56 crc kubenswrapper[4881]: I1211 08:38:56.417419 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-56v92\" (UniqueName: \"kubernetes.io/projected/e9aa88e0-71a6-40a0-92ec-88084b425df9-kube-api-access-56v92\") pod \"cinder-scheduler-0\" (UID: \"e9aa88e0-71a6-40a0-92ec-88084b425df9\") " pod="openstack/cinder-scheduler-0" Dec 11 08:38:56 crc kubenswrapper[4881]: I1211 08:38:56.417705 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/e9aa88e0-71a6-40a0-92ec-88084b425df9-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"e9aa88e0-71a6-40a0-92ec-88084b425df9\") " pod="openstack/cinder-scheduler-0" Dec 11 08:38:56 crc kubenswrapper[4881]: I1211 08:38:56.417745 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/e9aa88e0-71a6-40a0-92ec-88084b425df9-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"e9aa88e0-71a6-40a0-92ec-88084b425df9\") " pod="openstack/cinder-scheduler-0" Dec 11 08:38:56 crc kubenswrapper[4881]: I1211 08:38:56.417765 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e9aa88e0-71a6-40a0-92ec-88084b425df9-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"e9aa88e0-71a6-40a0-92ec-88084b425df9\") " pod="openstack/cinder-scheduler-0" Dec 11 08:38:56 crc kubenswrapper[4881]: I1211 08:38:56.417894 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e9aa88e0-71a6-40a0-92ec-88084b425df9-scripts\") pod \"cinder-scheduler-0\" (UID: \"e9aa88e0-71a6-40a0-92ec-88084b425df9\") " pod="openstack/cinder-scheduler-0" Dec 11 08:38:56 crc kubenswrapper[4881]: I1211 08:38:56.418111 4881 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/fb741205-ce03-4c1d-9181-b2efc3c92319-httpd-run\") on node \"crc\" DevicePath \"\"" Dec 11 08:38:56 crc kubenswrapper[4881]: I1211 08:38:56.418138 4881 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") on node \"crc\" " Dec 11 08:38:56 crc kubenswrapper[4881]: I1211 08:38:56.418155 4881 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fb741205-ce03-4c1d-9181-b2efc3c92319-scripts\") on node \"crc\" DevicePath \"\"" Dec 11 08:38:56 crc kubenswrapper[4881]: I1211 08:38:56.418166 4881 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/fb741205-ce03-4c1d-9181-b2efc3c92319-logs\") on node \"crc\" DevicePath \"\"" Dec 11 08:38:56 crc kubenswrapper[4881]: I1211 08:38:56.418178 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9fbd9\" (UniqueName: \"kubernetes.io/projected/fb741205-ce03-4c1d-9181-b2efc3c92319-kube-api-access-9fbd9\") on node \"crc\" DevicePath \"\"" Dec 11 08:38:56 crc kubenswrapper[4881]: I1211 08:38:56.422913 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"glance-default-external-api-0\" (UID: \"03bd1873-0976-4da1-a4f0-4bc1ab183cda\") " pod="openstack/glance-default-external-api-0" Dec 11 08:38:56 crc kubenswrapper[4881]: I1211 08:38:56.464276 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-cfnapi-579694df8d-vpn5m"] Dec 11 08:38:56 crc kubenswrapper[4881]: I1211 08:38:56.473733 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-api-b5858866f-srtbn"] Dec 11 08:38:56 crc kubenswrapper[4881]: I1211 08:38:56.520883 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e9aa88e0-71a6-40a0-92ec-88084b425df9-scripts\") pod \"cinder-scheduler-0\" (UID: \"e9aa88e0-71a6-40a0-92ec-88084b425df9\") " pod="openstack/cinder-scheduler-0" Dec 11 08:38:56 crc kubenswrapper[4881]: I1211 08:38:56.521190 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e9aa88e0-71a6-40a0-92ec-88084b425df9-config-data\") pod \"cinder-scheduler-0\" (UID: \"e9aa88e0-71a6-40a0-92ec-88084b425df9\") " pod="openstack/cinder-scheduler-0" Dec 11 08:38:56 crc kubenswrapper[4881]: I1211 08:38:56.521361 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-56v92\" (UniqueName: \"kubernetes.io/projected/e9aa88e0-71a6-40a0-92ec-88084b425df9-kube-api-access-56v92\") pod \"cinder-scheduler-0\" (UID: \"e9aa88e0-71a6-40a0-92ec-88084b425df9\") " pod="openstack/cinder-scheduler-0" Dec 11 08:38:56 crc kubenswrapper[4881]: I1211 08:38:56.521577 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/e9aa88e0-71a6-40a0-92ec-88084b425df9-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"e9aa88e0-71a6-40a0-92ec-88084b425df9\") " pod="openstack/cinder-scheduler-0" Dec 11 08:38:56 crc kubenswrapper[4881]: I1211 08:38:56.521683 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/e9aa88e0-71a6-40a0-92ec-88084b425df9-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"e9aa88e0-71a6-40a0-92ec-88084b425df9\") " pod="openstack/cinder-scheduler-0" Dec 11 08:38:56 crc kubenswrapper[4881]: I1211 08:38:56.521872 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e9aa88e0-71a6-40a0-92ec-88084b425df9-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"e9aa88e0-71a6-40a0-92ec-88084b425df9\") " pod="openstack/cinder-scheduler-0" Dec 11 08:38:56 crc kubenswrapper[4881]: I1211 08:38:56.532833 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/e9aa88e0-71a6-40a0-92ec-88084b425df9-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"e9aa88e0-71a6-40a0-92ec-88084b425df9\") " pod="openstack/cinder-scheduler-0" Dec 11 08:38:56 crc kubenswrapper[4881]: I1211 08:38:56.532896 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Dec 11 08:38:56 crc kubenswrapper[4881]: I1211 08:38:56.534793 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Dec 11 08:38:56 crc kubenswrapper[4881]: I1211 08:38:56.553508 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e9aa88e0-71a6-40a0-92ec-88084b425df9-scripts\") pod \"cinder-scheduler-0\" (UID: \"e9aa88e0-71a6-40a0-92ec-88084b425df9\") " pod="openstack/cinder-scheduler-0" Dec 11 08:38:56 crc kubenswrapper[4881]: I1211 08:38:56.560430 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fb741205-ce03-4c1d-9181-b2efc3c92319-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "fb741205-ce03-4c1d-9181-b2efc3c92319" (UID: "fb741205-ce03-4c1d-9181-b2efc3c92319"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:38:56 crc kubenswrapper[4881]: I1211 08:38:56.561364 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e9aa88e0-71a6-40a0-92ec-88084b425df9-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"e9aa88e0-71a6-40a0-92ec-88084b425df9\") " pod="openstack/cinder-scheduler-0" Dec 11 08:38:56 crc kubenswrapper[4881]: I1211 08:38:56.563307 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-db-create-gs9fw"] Dec 11 08:38:56 crc kubenswrapper[4881]: I1211 08:38:56.569982 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-56v92\" (UniqueName: \"kubernetes.io/projected/e9aa88e0-71a6-40a0-92ec-88084b425df9-kube-api-access-56v92\") pod \"cinder-scheduler-0\" (UID: \"e9aa88e0-71a6-40a0-92ec-88084b425df9\") " pod="openstack/cinder-scheduler-0" Dec 11 08:38:56 crc kubenswrapper[4881]: I1211 08:38:56.581011 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-cfnapi-bd877888f-jfg9m"] Dec 11 08:38:56 crc kubenswrapper[4881]: I1211 08:38:56.598079 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/e9aa88e0-71a6-40a0-92ec-88084b425df9-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"e9aa88e0-71a6-40a0-92ec-88084b425df9\") " pod="openstack/cinder-scheduler-0" Dec 11 08:38:56 crc kubenswrapper[4881]: I1211 08:38:56.609926 4881 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage05-crc" (UniqueName: "kubernetes.io/local-volume/local-storage05-crc") on node "crc" Dec 11 08:38:56 crc kubenswrapper[4881]: I1211 08:38:56.616131 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-db-create-lhqk7"] Dec 11 08:38:56 crc kubenswrapper[4881]: I1211 08:38:56.619775 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e9aa88e0-71a6-40a0-92ec-88084b425df9-config-data\") pod \"cinder-scheduler-0\" (UID: \"e9aa88e0-71a6-40a0-92ec-88084b425df9\") " pod="openstack/cinder-scheduler-0" Dec 11 08:38:56 crc kubenswrapper[4881]: I1211 08:38:56.625171 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-cfnapi-86c494669f-2l5s8" podStartSLOduration=4.756662332 podStartE2EDuration="21.625130671s" podCreationTimestamp="2025-12-11 08:38:35 +0000 UTC" firstStartedPulling="2025-12-11 08:38:36.496542465 +0000 UTC m=+1364.873911162" lastFinishedPulling="2025-12-11 08:38:53.365010804 +0000 UTC m=+1381.742379501" observedRunningTime="2025-12-11 08:38:56.186945762 +0000 UTC m=+1384.564314459" watchObservedRunningTime="2025-12-11 08:38:56.625130671 +0000 UTC m=+1385.002499378" Dec 11 08:38:56 crc kubenswrapper[4881]: I1211 08:38:56.643194 4881 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fb741205-ce03-4c1d-9181-b2efc3c92319-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 11 08:38:56 crc kubenswrapper[4881]: I1211 08:38:56.643249 4881 reconciler_common.go:293] "Volume detached for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") on node \"crc\" DevicePath \"\"" Dec 11 08:38:56 crc kubenswrapper[4881]: I1211 08:38:56.737349 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-db-create-bjwpq"] Dec 11 08:38:56 crc kubenswrapper[4881]: I1211 08:38:56.751867 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fb741205-ce03-4c1d-9181-b2efc3c92319-config-data" (OuterVolumeSpecName: "config-data") pod "fb741205-ce03-4c1d-9181-b2efc3c92319" (UID: "fb741205-ce03-4c1d-9181-b2efc3c92319"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:38:56 crc kubenswrapper[4881]: I1211 08:38:56.772229 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-api-5497987457-dkncd"] Dec 11 08:38:56 crc kubenswrapper[4881]: I1211 08:38:56.792327 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fb741205-ce03-4c1d-9181-b2efc3c92319-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "fb741205-ce03-4c1d-9181-b2efc3c92319" (UID: "fb741205-ce03-4c1d-9181-b2efc3c92319"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:38:56 crc kubenswrapper[4881]: I1211 08:38:56.793999 4881 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-api-5497987457-dkncd"] Dec 11 08:38:56 crc kubenswrapper[4881]: I1211 08:38:56.848435 4881 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/fb741205-ce03-4c1d-9181-b2efc3c92319-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Dec 11 08:38:56 crc kubenswrapper[4881]: I1211 08:38:56.848488 4881 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fb741205-ce03-4c1d-9181-b2efc3c92319-config-data\") on node \"crc\" DevicePath \"\"" Dec 11 08:38:56 crc kubenswrapper[4881]: I1211 08:38:56.887430 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Dec 11 08:38:57 crc kubenswrapper[4881]: I1211 08:38:57.041413 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="020cd7f3-faa3-4bec-adfd-25a9b60456f7" path="/var/lib/kubelet/pods/020cd7f3-faa3-4bec-adfd-25a9b60456f7/volumes" Dec 11 08:38:57 crc kubenswrapper[4881]: I1211 08:38:57.042593 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="96a878c1-c8dc-443b-a6a2-e9e2b3833213" path="/var/lib/kubelet/pods/96a878c1-c8dc-443b-a6a2-e9e2b3833213/volumes" Dec 11 08:38:57 crc kubenswrapper[4881]: I1211 08:38:57.043420 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a982ebde-b33a-4397-bd71-34da9b1efc2e" path="/var/lib/kubelet/pods/a982ebde-b33a-4397-bd71-34da9b1efc2e/volumes" Dec 11 08:38:57 crc kubenswrapper[4881]: I1211 08:38:57.145668 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Dec 11 08:38:57 crc kubenswrapper[4881]: I1211 08:38:57.231038 4881 generic.go:334] "Generic (PLEG): container finished" podID="773654f0-c183-4962-a942-9fc33e9f44a7" containerID="81dc2cfd55617d4d9e76b59ccc9623e6a6126c1f8084fa4b757052cfb6fa1dc0" exitCode=0 Dec 11 08:38:57 crc kubenswrapper[4881]: I1211 08:38:57.231159 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-86c494669f-2l5s8" event={"ID":"773654f0-c183-4962-a942-9fc33e9f44a7","Type":"ContainerDied","Data":"81dc2cfd55617d4d9e76b59ccc9623e6a6126c1f8084fa4b757052cfb6fa1dc0"} Dec 11 08:38:57 crc kubenswrapper[4881]: I1211 08:38:57.232846 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"98c33c2d-b3e5-450d-8c52-544acac89c74","Type":"ContainerStarted","Data":"fb7ba9fb71c6e24601cdd4f6c5a37171e2790445c7f438e4b9b7460575d34234"} Dec 11 08:38:57 crc kubenswrapper[4881]: I1211 08:38:57.262902 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-579694df8d-vpn5m" event={"ID":"8bc9eb50-582d-468f-8286-f9a0d3c1def4","Type":"ContainerStarted","Data":"c2a925a563afd8b45732a44ef64c5c31aac3671db53e2c458d5f1dea0e099ec5"} Dec 11 08:38:57 crc kubenswrapper[4881]: I1211 08:38:57.285381 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"64b0f79e-085c-4fa8-938d-888f5cea99e3","Type":"ContainerStarted","Data":"7bbd26efbd176f2208957fed209f6924b2f9c33702be98e4e95afe963b7e1585"} Dec 11 08:38:57 crc kubenswrapper[4881]: I1211 08:38:57.312205 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-engine-d56bd9469-fm4xb" event={"ID":"8f8f4fc0-e759-433b-835a-f2c0db79850f","Type":"ContainerStarted","Data":"9702f55e0be53bfb061d47124237f59ade3b5b78113b501d7ba47d3f8fac1afa"} Dec 11 08:38:57 crc kubenswrapper[4881]: I1211 08:38:57.312652 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-engine-d56bd9469-fm4xb" Dec 11 08:38:57 crc kubenswrapper[4881]: I1211 08:38:57.348420 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-b5858866f-srtbn" event={"ID":"a466820d-9c8f-4aa5-8e39-485b6212a154","Type":"ContainerStarted","Data":"8916d11191fdd602bde7332b6da597dddfe517a05db2f9e9152321b82a08085b"} Dec 11 08:38:57 crc kubenswrapper[4881]: I1211 08:38:57.350312 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-lhqk7" event={"ID":"0152813f-e688-49b2-88d2-afba5096bd0e","Type":"ContainerStarted","Data":"b3e71eb02cfb0d4c62059d5327fa1b467f8bbe7b770bff4b561d5ed641709cfc"} Dec 11 08:38:57 crc kubenswrapper[4881]: I1211 08:38:57.354883 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-engine-d56bd9469-fm4xb" podStartSLOduration=14.354861742 podStartE2EDuration="14.354861742s" podCreationTimestamp="2025-12-11 08:38:43 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 08:38:57.339557396 +0000 UTC m=+1385.716926093" watchObservedRunningTime="2025-12-11 08:38:57.354861742 +0000 UTC m=+1385.732230439" Dec 11 08:38:57 crc kubenswrapper[4881]: I1211 08:38:57.355598 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-bd877888f-jfg9m" event={"ID":"d5902bc2-d738-4bab-9b99-78b827e3a003","Type":"ContainerStarted","Data":"3574c6b82044712c60ab82683216ef5f4ba20d451e77f5bc9f96f405bbe75a74"} Dec 11 08:38:57 crc kubenswrapper[4881]: I1211 08:38:57.359687 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-gs9fw" event={"ID":"7a116558-2db5-4fe1-9f64-888d7cd93f57","Type":"ContainerStarted","Data":"6ed69b084e2e31f9eeb1d2b5bd0dfd9c8412d1840fad56f2751aa22cfafa3edb"} Dec 11 08:38:57 crc kubenswrapper[4881]: I1211 08:38:57.387556 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-bjwpq" event={"ID":"556e7646-0cc0-4686-875f-6738267e467e","Type":"ContainerStarted","Data":"ef5f60aac6b94e2e83c51a1c46d43c2cf51fb69865d2ac2ac89dff153059ef35"} Dec 11 08:38:57 crc kubenswrapper[4881]: I1211 08:38:57.495371 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"fb741205-ce03-4c1d-9181-b2efc3c92319","Type":"ContainerDied","Data":"fe9b40206aeabc8792f161749c582ed8e2de3507772f4fd9e5b141bfae06ac29"} Dec 11 08:38:57 crc kubenswrapper[4881]: I1211 08:38:57.495436 4881 scope.go:117] "RemoveContainer" containerID="e63123dac3e0c7900898c6e9b6eb12295de5e928c5abc16df766d433bbaf73cb" Dec 11 08:38:57 crc kubenswrapper[4881]: I1211 08:38:57.495597 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Dec 11 08:38:57 crc kubenswrapper[4881]: I1211 08:38:57.714782 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Dec 11 08:38:59 crc kubenswrapper[4881]: I1211 08:38:58.013858 4881 scope.go:117] "RemoveContainer" containerID="c4006b0e738d45d323e12ce6001a2110fff8d26a20bbcb970da9a399dd8e3e62" Dec 11 08:38:59 crc kubenswrapper[4881]: I1211 08:38:58.020575 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-86c494669f-2l5s8" Dec 11 08:38:59 crc kubenswrapper[4881]: I1211 08:38:58.098179 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/773654f0-c183-4962-a942-9fc33e9f44a7-config-data-custom\") pod \"773654f0-c183-4962-a942-9fc33e9f44a7\" (UID: \"773654f0-c183-4962-a942-9fc33e9f44a7\") " Dec 11 08:38:59 crc kubenswrapper[4881]: I1211 08:38:58.098236 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/773654f0-c183-4962-a942-9fc33e9f44a7-config-data\") pod \"773654f0-c183-4962-a942-9fc33e9f44a7\" (UID: \"773654f0-c183-4962-a942-9fc33e9f44a7\") " Dec 11 08:38:59 crc kubenswrapper[4881]: I1211 08:38:58.098385 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/773654f0-c183-4962-a942-9fc33e9f44a7-combined-ca-bundle\") pod \"773654f0-c183-4962-a942-9fc33e9f44a7\" (UID: \"773654f0-c183-4962-a942-9fc33e9f44a7\") " Dec 11 08:38:59 crc kubenswrapper[4881]: I1211 08:38:58.098580 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bkzqs\" (UniqueName: \"kubernetes.io/projected/773654f0-c183-4962-a942-9fc33e9f44a7-kube-api-access-bkzqs\") pod \"773654f0-c183-4962-a942-9fc33e9f44a7\" (UID: \"773654f0-c183-4962-a942-9fc33e9f44a7\") " Dec 11 08:38:59 crc kubenswrapper[4881]: I1211 08:38:58.110286 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/773654f0-c183-4962-a942-9fc33e9f44a7-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "773654f0-c183-4962-a942-9fc33e9f44a7" (UID: "773654f0-c183-4962-a942-9fc33e9f44a7"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:38:59 crc kubenswrapper[4881]: I1211 08:38:58.117505 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/773654f0-c183-4962-a942-9fc33e9f44a7-kube-api-access-bkzqs" (OuterVolumeSpecName: "kube-api-access-bkzqs") pod "773654f0-c183-4962-a942-9fc33e9f44a7" (UID: "773654f0-c183-4962-a942-9fc33e9f44a7"). InnerVolumeSpecName "kube-api-access-bkzqs". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:38:59 crc kubenswrapper[4881]: I1211 08:38:58.202518 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bkzqs\" (UniqueName: \"kubernetes.io/projected/773654f0-c183-4962-a942-9fc33e9f44a7-kube-api-access-bkzqs\") on node \"crc\" DevicePath \"\"" Dec 11 08:38:59 crc kubenswrapper[4881]: I1211 08:38:58.202554 4881 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/773654f0-c183-4962-a942-9fc33e9f44a7-config-data-custom\") on node \"crc\" DevicePath \"\"" Dec 11 08:38:59 crc kubenswrapper[4881]: I1211 08:38:58.315715 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/773654f0-c183-4962-a942-9fc33e9f44a7-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "773654f0-c183-4962-a942-9fc33e9f44a7" (UID: "773654f0-c183-4962-a942-9fc33e9f44a7"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:38:59 crc kubenswrapper[4881]: I1211 08:38:58.364746 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/773654f0-c183-4962-a942-9fc33e9f44a7-config-data" (OuterVolumeSpecName: "config-data") pod "773654f0-c183-4962-a942-9fc33e9f44a7" (UID: "773654f0-c183-4962-a942-9fc33e9f44a7"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:38:59 crc kubenswrapper[4881]: I1211 08:38:58.407646 4881 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/773654f0-c183-4962-a942-9fc33e9f44a7-config-data\") on node \"crc\" DevicePath \"\"" Dec 11 08:38:59 crc kubenswrapper[4881]: I1211 08:38:58.407685 4881 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/773654f0-c183-4962-a942-9fc33e9f44a7-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 11 08:38:59 crc kubenswrapper[4881]: I1211 08:38:58.541279 4881 generic.go:334] "Generic (PLEG): container finished" podID="7a116558-2db5-4fe1-9f64-888d7cd93f57" containerID="45fc249c3889799d7ee4d73940d28ed200dae5bc2594643f31c5bdfb3bc35802" exitCode=0 Dec 11 08:38:59 crc kubenswrapper[4881]: I1211 08:38:58.541494 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-gs9fw" event={"ID":"7a116558-2db5-4fe1-9f64-888d7cd93f57","Type":"ContainerDied","Data":"45fc249c3889799d7ee4d73940d28ed200dae5bc2594643f31c5bdfb3bc35802"} Dec 11 08:38:59 crc kubenswrapper[4881]: I1211 08:38:58.551019 4881 generic.go:334] "Generic (PLEG): container finished" podID="556e7646-0cc0-4686-875f-6738267e467e" containerID="47469da148bd551d6a297aec48b47e90f615ba6d1655f095fdb9f1d26bb23515" exitCode=0 Dec 11 08:38:59 crc kubenswrapper[4881]: I1211 08:38:58.551127 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-bjwpq" event={"ID":"556e7646-0cc0-4686-875f-6738267e467e","Type":"ContainerDied","Data":"47469da148bd551d6a297aec48b47e90f615ba6d1655f095fdb9f1d26bb23515"} Dec 11 08:38:59 crc kubenswrapper[4881]: I1211 08:38:58.574688 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-579694df8d-vpn5m" event={"ID":"8bc9eb50-582d-468f-8286-f9a0d3c1def4","Type":"ContainerStarted","Data":"901ce1a433f29c0b30a0d9382e385ecab56a34197594778ae7cfdc2e92ed9955"} Dec 11 08:38:59 crc kubenswrapper[4881]: I1211 08:38:58.575385 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-cfnapi-579694df8d-vpn5m" Dec 11 08:38:59 crc kubenswrapper[4881]: I1211 08:38:58.582638 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Dec 11 08:38:59 crc kubenswrapper[4881]: I1211 08:38:58.593534 4881 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Dec 11 08:38:59 crc kubenswrapper[4881]: I1211 08:38:58.598000 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-7b446c9cf4-8fv29" event={"ID":"7eca07f6-40f2-48b2-90fe-b5b9b332707f","Type":"ContainerStarted","Data":"5eb2d57a6beb3080b599766ec050979d3787cb1957f59bbbfd2cbe695e44ed38"} Dec 11 08:38:59 crc kubenswrapper[4881]: I1211 08:38:58.598360 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-api-7b446c9cf4-8fv29" Dec 11 08:38:59 crc kubenswrapper[4881]: I1211 08:38:58.618132 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"03bd1873-0976-4da1-a4f0-4bc1ab183cda","Type":"ContainerStarted","Data":"66d7b2b51f1545f304ef094dd875c2897770829af4c9fa23f8cbd8d3188af30f"} Dec 11 08:38:59 crc kubenswrapper[4881]: I1211 08:38:58.629146 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Dec 11 08:38:59 crc kubenswrapper[4881]: E1211 08:38:58.630461 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="773654f0-c183-4962-a942-9fc33e9f44a7" containerName="heat-cfnapi" Dec 11 08:38:59 crc kubenswrapper[4881]: I1211 08:38:58.630483 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="773654f0-c183-4962-a942-9fc33e9f44a7" containerName="heat-cfnapi" Dec 11 08:38:59 crc kubenswrapper[4881]: I1211 08:38:58.630793 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="773654f0-c183-4962-a942-9fc33e9f44a7" containerName="heat-cfnapi" Dec 11 08:38:59 crc kubenswrapper[4881]: I1211 08:38:58.632824 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Dec 11 08:38:59 crc kubenswrapper[4881]: I1211 08:38:58.642479 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Dec 11 08:38:59 crc kubenswrapper[4881]: I1211 08:38:58.642780 4881 generic.go:334] "Generic (PLEG): container finished" podID="d5902bc2-d738-4bab-9b99-78b827e3a003" containerID="f5dd2c135a68c085a36bb2ef21bc30803ab55188615f2e753a0a36f3275d6af8" exitCode=1 Dec 11 08:38:59 crc kubenswrapper[4881]: I1211 08:38:58.642856 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-bd877888f-jfg9m" event={"ID":"d5902bc2-d738-4bab-9b99-78b827e3a003","Type":"ContainerDied","Data":"f5dd2c135a68c085a36bb2ef21bc30803ab55188615f2e753a0a36f3275d6af8"} Dec 11 08:38:59 crc kubenswrapper[4881]: I1211 08:38:58.643603 4881 scope.go:117] "RemoveContainer" containerID="f5dd2c135a68c085a36bb2ef21bc30803ab55188615f2e753a0a36f3275d6af8" Dec 11 08:38:59 crc kubenswrapper[4881]: I1211 08:38:58.644086 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Dec 11 08:38:59 crc kubenswrapper[4881]: I1211 08:38:58.648090 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Dec 11 08:38:59 crc kubenswrapper[4881]: I1211 08:38:58.666513 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-86c494669f-2l5s8" Dec 11 08:38:59 crc kubenswrapper[4881]: I1211 08:38:58.667064 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-86c494669f-2l5s8" event={"ID":"773654f0-c183-4962-a942-9fc33e9f44a7","Type":"ContainerDied","Data":"40f52ed3f2d0d999b613255b598e85ac711267670f850cfd8794876c5620bb28"} Dec 11 08:38:59 crc kubenswrapper[4881]: I1211 08:38:58.667109 4881 scope.go:117] "RemoveContainer" containerID="81dc2cfd55617d4d9e76b59ccc9623e6a6126c1f8084fa4b757052cfb6fa1dc0" Dec 11 08:38:59 crc kubenswrapper[4881]: I1211 08:38:58.699880 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-cfnapi-579694df8d-vpn5m" podStartSLOduration=13.699820418 podStartE2EDuration="13.699820418s" podCreationTimestamp="2025-12-11 08:38:45 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 08:38:58.62417677 +0000 UTC m=+1387.001545467" watchObservedRunningTime="2025-12-11 08:38:58.699820418 +0000 UTC m=+1387.077189105" Dec 11 08:38:59 crc kubenswrapper[4881]: I1211 08:38:58.715157 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-api-7b446c9cf4-8fv29" podStartSLOduration=14.872954508 podStartE2EDuration="15.715137394s" podCreationTimestamp="2025-12-11 08:38:43 +0000 UTC" firstStartedPulling="2025-12-11 08:38:54.741584546 +0000 UTC m=+1383.118953243" lastFinishedPulling="2025-12-11 08:38:55.583767432 +0000 UTC m=+1383.961136129" observedRunningTime="2025-12-11 08:38:58.659020299 +0000 UTC m=+1387.036388996" watchObservedRunningTime="2025-12-11 08:38:58.715137394 +0000 UTC m=+1387.092506091" Dec 11 08:38:59 crc kubenswrapper[4881]: I1211 08:38:58.715243 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4cde142d-aa3a-4c3b-9e63-efcbce032089-scripts\") pod \"glance-default-internal-api-0\" (UID: \"4cde142d-aa3a-4c3b-9e63-efcbce032089\") " pod="openstack/glance-default-internal-api-0" Dec 11 08:38:59 crc kubenswrapper[4881]: I1211 08:38:58.715304 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4cde142d-aa3a-4c3b-9e63-efcbce032089-config-data\") pod \"glance-default-internal-api-0\" (UID: \"4cde142d-aa3a-4c3b-9e63-efcbce032089\") " pod="openstack/glance-default-internal-api-0" Dec 11 08:38:59 crc kubenswrapper[4881]: I1211 08:38:58.715386 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"glance-default-internal-api-0\" (UID: \"4cde142d-aa3a-4c3b-9e63-efcbce032089\") " pod="openstack/glance-default-internal-api-0" Dec 11 08:38:59 crc kubenswrapper[4881]: I1211 08:38:58.715456 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zcr8w\" (UniqueName: \"kubernetes.io/projected/4cde142d-aa3a-4c3b-9e63-efcbce032089-kube-api-access-zcr8w\") pod \"glance-default-internal-api-0\" (UID: \"4cde142d-aa3a-4c3b-9e63-efcbce032089\") " pod="openstack/glance-default-internal-api-0" Dec 11 08:38:59 crc kubenswrapper[4881]: I1211 08:38:58.715519 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/4cde142d-aa3a-4c3b-9e63-efcbce032089-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"4cde142d-aa3a-4c3b-9e63-efcbce032089\") " pod="openstack/glance-default-internal-api-0" Dec 11 08:38:59 crc kubenswrapper[4881]: I1211 08:38:58.715635 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4cde142d-aa3a-4c3b-9e63-efcbce032089-logs\") pod \"glance-default-internal-api-0\" (UID: \"4cde142d-aa3a-4c3b-9e63-efcbce032089\") " pod="openstack/glance-default-internal-api-0" Dec 11 08:38:59 crc kubenswrapper[4881]: I1211 08:38:58.715774 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4cde142d-aa3a-4c3b-9e63-efcbce032089-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"4cde142d-aa3a-4c3b-9e63-efcbce032089\") " pod="openstack/glance-default-internal-api-0" Dec 11 08:38:59 crc kubenswrapper[4881]: I1211 08:38:58.716027 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/4cde142d-aa3a-4c3b-9e63-efcbce032089-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"4cde142d-aa3a-4c3b-9e63-efcbce032089\") " pod="openstack/glance-default-internal-api-0" Dec 11 08:38:59 crc kubenswrapper[4881]: I1211 08:38:58.782354 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-cfnapi-86c494669f-2l5s8"] Dec 11 08:38:59 crc kubenswrapper[4881]: I1211 08:38:58.820374 4881 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-cfnapi-86c494669f-2l5s8"] Dec 11 08:38:59 crc kubenswrapper[4881]: I1211 08:38:58.825933 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4cde142d-aa3a-4c3b-9e63-efcbce032089-logs\") pod \"glance-default-internal-api-0\" (UID: \"4cde142d-aa3a-4c3b-9e63-efcbce032089\") " pod="openstack/glance-default-internal-api-0" Dec 11 08:38:59 crc kubenswrapper[4881]: I1211 08:38:58.825991 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4cde142d-aa3a-4c3b-9e63-efcbce032089-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"4cde142d-aa3a-4c3b-9e63-efcbce032089\") " pod="openstack/glance-default-internal-api-0" Dec 11 08:38:59 crc kubenswrapper[4881]: I1211 08:38:58.826027 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/4cde142d-aa3a-4c3b-9e63-efcbce032089-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"4cde142d-aa3a-4c3b-9e63-efcbce032089\") " pod="openstack/glance-default-internal-api-0" Dec 11 08:38:59 crc kubenswrapper[4881]: I1211 08:38:58.826088 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4cde142d-aa3a-4c3b-9e63-efcbce032089-scripts\") pod \"glance-default-internal-api-0\" (UID: \"4cde142d-aa3a-4c3b-9e63-efcbce032089\") " pod="openstack/glance-default-internal-api-0" Dec 11 08:38:59 crc kubenswrapper[4881]: I1211 08:38:58.826125 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4cde142d-aa3a-4c3b-9e63-efcbce032089-config-data\") pod \"glance-default-internal-api-0\" (UID: \"4cde142d-aa3a-4c3b-9e63-efcbce032089\") " pod="openstack/glance-default-internal-api-0" Dec 11 08:38:59 crc kubenswrapper[4881]: I1211 08:38:58.826147 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"glance-default-internal-api-0\" (UID: \"4cde142d-aa3a-4c3b-9e63-efcbce032089\") " pod="openstack/glance-default-internal-api-0" Dec 11 08:38:59 crc kubenswrapper[4881]: I1211 08:38:58.826171 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zcr8w\" (UniqueName: \"kubernetes.io/projected/4cde142d-aa3a-4c3b-9e63-efcbce032089-kube-api-access-zcr8w\") pod \"glance-default-internal-api-0\" (UID: \"4cde142d-aa3a-4c3b-9e63-efcbce032089\") " pod="openstack/glance-default-internal-api-0" Dec 11 08:38:59 crc kubenswrapper[4881]: I1211 08:38:58.826206 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/4cde142d-aa3a-4c3b-9e63-efcbce032089-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"4cde142d-aa3a-4c3b-9e63-efcbce032089\") " pod="openstack/glance-default-internal-api-0" Dec 11 08:38:59 crc kubenswrapper[4881]: I1211 08:38:58.827808 4881 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"glance-default-internal-api-0\" (UID: \"4cde142d-aa3a-4c3b-9e63-efcbce032089\") device mount path \"/mnt/openstack/pv05\"" pod="openstack/glance-default-internal-api-0" Dec 11 08:38:59 crc kubenswrapper[4881]: I1211 08:38:58.832751 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4cde142d-aa3a-4c3b-9e63-efcbce032089-logs\") pod \"glance-default-internal-api-0\" (UID: \"4cde142d-aa3a-4c3b-9e63-efcbce032089\") " pod="openstack/glance-default-internal-api-0" Dec 11 08:38:59 crc kubenswrapper[4881]: I1211 08:38:58.833376 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/4cde142d-aa3a-4c3b-9e63-efcbce032089-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"4cde142d-aa3a-4c3b-9e63-efcbce032089\") " pod="openstack/glance-default-internal-api-0" Dec 11 08:38:59 crc kubenswrapper[4881]: I1211 08:38:58.840666 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/4cde142d-aa3a-4c3b-9e63-efcbce032089-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"4cde142d-aa3a-4c3b-9e63-efcbce032089\") " pod="openstack/glance-default-internal-api-0" Dec 11 08:38:59 crc kubenswrapper[4881]: I1211 08:38:58.841988 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4cde142d-aa3a-4c3b-9e63-efcbce032089-config-data\") pod \"glance-default-internal-api-0\" (UID: \"4cde142d-aa3a-4c3b-9e63-efcbce032089\") " pod="openstack/glance-default-internal-api-0" Dec 11 08:38:59 crc kubenswrapper[4881]: I1211 08:38:58.842668 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4cde142d-aa3a-4c3b-9e63-efcbce032089-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"4cde142d-aa3a-4c3b-9e63-efcbce032089\") " pod="openstack/glance-default-internal-api-0" Dec 11 08:38:59 crc kubenswrapper[4881]: I1211 08:38:58.867126 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4cde142d-aa3a-4c3b-9e63-efcbce032089-scripts\") pod \"glance-default-internal-api-0\" (UID: \"4cde142d-aa3a-4c3b-9e63-efcbce032089\") " pod="openstack/glance-default-internal-api-0" Dec 11 08:38:59 crc kubenswrapper[4881]: I1211 08:38:58.877874 4881 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/heat-cfnapi-bd877888f-jfg9m" Dec 11 08:38:59 crc kubenswrapper[4881]: I1211 08:38:58.877940 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-cfnapi-bd877888f-jfg9m" Dec 11 08:38:59 crc kubenswrapper[4881]: I1211 08:38:58.880179 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zcr8w\" (UniqueName: \"kubernetes.io/projected/4cde142d-aa3a-4c3b-9e63-efcbce032089-kube-api-access-zcr8w\") pod \"glance-default-internal-api-0\" (UID: \"4cde142d-aa3a-4c3b-9e63-efcbce032089\") " pod="openstack/glance-default-internal-api-0" Dec 11 08:38:59 crc kubenswrapper[4881]: I1211 08:38:59.061574 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="773654f0-c183-4962-a942-9fc33e9f44a7" path="/var/lib/kubelet/pods/773654f0-c183-4962-a942-9fc33e9f44a7/volumes" Dec 11 08:38:59 crc kubenswrapper[4881]: I1211 08:38:59.063386 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fb741205-ce03-4c1d-9181-b2efc3c92319" path="/var/lib/kubelet/pods/fb741205-ce03-4c1d-9181-b2efc3c92319/volumes" Dec 11 08:38:59 crc kubenswrapper[4881]: I1211 08:38:59.085413 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"glance-default-internal-api-0\" (UID: \"4cde142d-aa3a-4c3b-9e63-efcbce032089\") " pod="openstack/glance-default-internal-api-0" Dec 11 08:38:59 crc kubenswrapper[4881]: I1211 08:38:59.282662 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Dec 11 08:38:59 crc kubenswrapper[4881]: I1211 08:38:59.345129 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Dec 11 08:38:59 crc kubenswrapper[4881]: I1211 08:38:59.399143 4881 patch_prober.go:28] interesting pod/machine-config-daemon-z9nnh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 11 08:38:59 crc kubenswrapper[4881]: I1211 08:38:59.399210 4881 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 11 08:38:59 crc kubenswrapper[4881]: I1211 08:38:59.436300 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/swift-proxy-5f6c547b6c-rjk9h" Dec 11 08:38:59 crc kubenswrapper[4881]: I1211 08:38:59.436941 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/swift-proxy-5f6c547b6c-rjk9h" Dec 11 08:38:59 crc kubenswrapper[4881]: I1211 08:38:59.726624 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-bd877888f-jfg9m" event={"ID":"d5902bc2-d738-4bab-9b99-78b827e3a003","Type":"ContainerStarted","Data":"389b55b4a7ab2a313d39200970c6b44deb752f5196fe1fed6bf750a53a364364"} Dec 11 08:38:59 crc kubenswrapper[4881]: I1211 08:38:59.728777 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-cfnapi-bd877888f-jfg9m" Dec 11 08:38:59 crc kubenswrapper[4881]: I1211 08:38:59.758627 4881 generic.go:334] "Generic (PLEG): container finished" podID="0152813f-e688-49b2-88d2-afba5096bd0e" containerID="c2b6075393d1d2f9e783c96ac6f361d451182813df29b4c7eee0930a516eb222" exitCode=0 Dec 11 08:38:59 crc kubenswrapper[4881]: I1211 08:38:59.758755 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-lhqk7" event={"ID":"0152813f-e688-49b2-88d2-afba5096bd0e","Type":"ContainerDied","Data":"c2b6075393d1d2f9e783c96ac6f361d451182813df29b4c7eee0930a516eb222"} Dec 11 08:38:59 crc kubenswrapper[4881]: I1211 08:38:59.758840 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-cfnapi-bd877888f-jfg9m" podStartSLOduration=16.758819961 podStartE2EDuration="16.758819961s" podCreationTimestamp="2025-12-11 08:38:43 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 08:38:59.746761158 +0000 UTC m=+1388.124129855" watchObservedRunningTime="2025-12-11 08:38:59.758819961 +0000 UTC m=+1388.136188648" Dec 11 08:38:59 crc kubenswrapper[4881]: I1211 08:38:59.802324 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"98c33c2d-b3e5-450d-8c52-544acac89c74","Type":"ContainerStarted","Data":"5e9c1b5a1bc4c22e9866d0b4c64c173b0c01868cf9daf4851fc42cc401926348"} Dec 11 08:38:59 crc kubenswrapper[4881]: I1211 08:38:59.808692 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"e9aa88e0-71a6-40a0-92ec-88084b425df9","Type":"ContainerStarted","Data":"bb339eb387d9ce467737ac95d3172080e28ac327e5f5da90cc6a7d4bda2f7016"} Dec 11 08:38:59 crc kubenswrapper[4881]: I1211 08:38:59.840196 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-b5858866f-srtbn" event={"ID":"a466820d-9c8f-4aa5-8e39-485b6212a154","Type":"ContainerStarted","Data":"83689b425d023b54b62be213c3d9fa1dd9c5fc9184664aa22970bbd9dd3b05c0"} Dec 11 08:38:59 crc kubenswrapper[4881]: I1211 08:38:59.841603 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-api-b5858866f-srtbn" Dec 11 08:38:59 crc kubenswrapper[4881]: I1211 08:38:59.864663 4881 generic.go:334] "Generic (PLEG): container finished" podID="7eca07f6-40f2-48b2-90fe-b5b9b332707f" containerID="5eb2d57a6beb3080b599766ec050979d3787cb1957f59bbbfd2cbe695e44ed38" exitCode=1 Dec 11 08:38:59 crc kubenswrapper[4881]: I1211 08:38:59.864748 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-7b446c9cf4-8fv29" event={"ID":"7eca07f6-40f2-48b2-90fe-b5b9b332707f","Type":"ContainerDied","Data":"5eb2d57a6beb3080b599766ec050979d3787cb1957f59bbbfd2cbe695e44ed38"} Dec 11 08:38:59 crc kubenswrapper[4881]: I1211 08:38:59.865533 4881 scope.go:117] "RemoveContainer" containerID="5eb2d57a6beb3080b599766ec050979d3787cb1957f59bbbfd2cbe695e44ed38" Dec 11 08:38:59 crc kubenswrapper[4881]: I1211 08:38:59.877587 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"64b0f79e-085c-4fa8-938d-888f5cea99e3","Type":"ContainerStarted","Data":"3cb6b7be631f37f0178f2d8f98e6344a0b84c9d7af40f878476956b2833efae4"} Dec 11 08:38:59 crc kubenswrapper[4881]: I1211 08:38:59.886875 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-api-b5858866f-srtbn" podStartSLOduration=14.88685463 podStartE2EDuration="14.88685463s" podCreationTimestamp="2025-12-11 08:38:45 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 08:38:59.870461997 +0000 UTC m=+1388.247830704" watchObservedRunningTime="2025-12-11 08:38:59.88685463 +0000 UTC m=+1388.264223327" Dec 11 08:39:00 crc kubenswrapper[4881]: I1211 08:39:00.240431 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Dec 11 08:39:00 crc kubenswrapper[4881]: I1211 08:39:00.733587 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-gs9fw" Dec 11 08:39:00 crc kubenswrapper[4881]: I1211 08:39:00.773660 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-bjwpq" Dec 11 08:39:00 crc kubenswrapper[4881]: I1211 08:39:00.844702 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tgkxg\" (UniqueName: \"kubernetes.io/projected/556e7646-0cc0-4686-875f-6738267e467e-kube-api-access-tgkxg\") pod \"556e7646-0cc0-4686-875f-6738267e467e\" (UID: \"556e7646-0cc0-4686-875f-6738267e467e\") " Dec 11 08:39:00 crc kubenswrapper[4881]: I1211 08:39:00.844953 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wz97r\" (UniqueName: \"kubernetes.io/projected/7a116558-2db5-4fe1-9f64-888d7cd93f57-kube-api-access-wz97r\") pod \"7a116558-2db5-4fe1-9f64-888d7cd93f57\" (UID: \"7a116558-2db5-4fe1-9f64-888d7cd93f57\") " Dec 11 08:39:00 crc kubenswrapper[4881]: I1211 08:39:00.873385 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/556e7646-0cc0-4686-875f-6738267e467e-kube-api-access-tgkxg" (OuterVolumeSpecName: "kube-api-access-tgkxg") pod "556e7646-0cc0-4686-875f-6738267e467e" (UID: "556e7646-0cc0-4686-875f-6738267e467e"). InnerVolumeSpecName "kube-api-access-tgkxg". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:39:00 crc kubenswrapper[4881]: I1211 08:39:00.876629 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7a116558-2db5-4fe1-9f64-888d7cd93f57-kube-api-access-wz97r" (OuterVolumeSpecName: "kube-api-access-wz97r") pod "7a116558-2db5-4fe1-9f64-888d7cd93f57" (UID: "7a116558-2db5-4fe1-9f64-888d7cd93f57"). InnerVolumeSpecName "kube-api-access-wz97r". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:39:00 crc kubenswrapper[4881]: I1211 08:39:00.900887 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"4cde142d-aa3a-4c3b-9e63-efcbce032089","Type":"ContainerStarted","Data":"b7752a04587beb434d76b20f0a6ba21c351f9bccb2402098010834897f088dba"} Dec 11 08:39:00 crc kubenswrapper[4881]: I1211 08:39:00.906649 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"03bd1873-0976-4da1-a4f0-4bc1ab183cda","Type":"ContainerStarted","Data":"71d65a1c8b092a803992eb6704a1d1fa506990077d196f5c475dda09f70f3f63"} Dec 11 08:39:00 crc kubenswrapper[4881]: I1211 08:39:00.911756 4881 generic.go:334] "Generic (PLEG): container finished" podID="d5902bc2-d738-4bab-9b99-78b827e3a003" containerID="389b55b4a7ab2a313d39200970c6b44deb752f5196fe1fed6bf750a53a364364" exitCode=1 Dec 11 08:39:00 crc kubenswrapper[4881]: I1211 08:39:00.911864 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-bd877888f-jfg9m" event={"ID":"d5902bc2-d738-4bab-9b99-78b827e3a003","Type":"ContainerDied","Data":"389b55b4a7ab2a313d39200970c6b44deb752f5196fe1fed6bf750a53a364364"} Dec 11 08:39:00 crc kubenswrapper[4881]: I1211 08:39:00.911904 4881 scope.go:117] "RemoveContainer" containerID="f5dd2c135a68c085a36bb2ef21bc30803ab55188615f2e753a0a36f3275d6af8" Dec 11 08:39:00 crc kubenswrapper[4881]: I1211 08:39:00.912771 4881 scope.go:117] "RemoveContainer" containerID="389b55b4a7ab2a313d39200970c6b44deb752f5196fe1fed6bf750a53a364364" Dec 11 08:39:00 crc kubenswrapper[4881]: E1211 08:39:00.913205 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-cfnapi\" with CrashLoopBackOff: \"back-off 10s restarting failed container=heat-cfnapi pod=heat-cfnapi-bd877888f-jfg9m_openstack(d5902bc2-d738-4bab-9b99-78b827e3a003)\"" pod="openstack/heat-cfnapi-bd877888f-jfg9m" podUID="d5902bc2-d738-4bab-9b99-78b827e3a003" Dec 11 08:39:00 crc kubenswrapper[4881]: I1211 08:39:00.939905 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-gs9fw" event={"ID":"7a116558-2db5-4fe1-9f64-888d7cd93f57","Type":"ContainerDied","Data":"6ed69b084e2e31f9eeb1d2b5bd0dfd9c8412d1840fad56f2751aa22cfafa3edb"} Dec 11 08:39:00 crc kubenswrapper[4881]: I1211 08:39:00.939955 4881 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6ed69b084e2e31f9eeb1d2b5bd0dfd9c8412d1840fad56f2751aa22cfafa3edb" Dec 11 08:39:00 crc kubenswrapper[4881]: I1211 08:39:00.940023 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-gs9fw" Dec 11 08:39:00 crc kubenswrapper[4881]: I1211 08:39:00.948938 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wz97r\" (UniqueName: \"kubernetes.io/projected/7a116558-2db5-4fe1-9f64-888d7cd93f57-kube-api-access-wz97r\") on node \"crc\" DevicePath \"\"" Dec 11 08:39:00 crc kubenswrapper[4881]: I1211 08:39:00.948974 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tgkxg\" (UniqueName: \"kubernetes.io/projected/556e7646-0cc0-4686-875f-6738267e467e-kube-api-access-tgkxg\") on node \"crc\" DevicePath \"\"" Dec 11 08:39:00 crc kubenswrapper[4881]: I1211 08:39:00.969216 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-bjwpq" Dec 11 08:39:00 crc kubenswrapper[4881]: I1211 08:39:00.972898 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-bjwpq" event={"ID":"556e7646-0cc0-4686-875f-6738267e467e","Type":"ContainerDied","Data":"ef5f60aac6b94e2e83c51a1c46d43c2cf51fb69865d2ac2ac89dff153059ef35"} Dec 11 08:39:00 crc kubenswrapper[4881]: I1211 08:39:00.972943 4881 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ef5f60aac6b94e2e83c51a1c46d43c2cf51fb69865d2ac2ac89dff153059ef35" Dec 11 08:39:01 crc kubenswrapper[4881]: I1211 08:39:01.743832 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-lhqk7" Dec 11 08:39:01 crc kubenswrapper[4881]: I1211 08:39:01.867559 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zrr84\" (UniqueName: \"kubernetes.io/projected/0152813f-e688-49b2-88d2-afba5096bd0e-kube-api-access-zrr84\") pod \"0152813f-e688-49b2-88d2-afba5096bd0e\" (UID: \"0152813f-e688-49b2-88d2-afba5096bd0e\") " Dec 11 08:39:01 crc kubenswrapper[4881]: I1211 08:39:01.910750 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0152813f-e688-49b2-88d2-afba5096bd0e-kube-api-access-zrr84" (OuterVolumeSpecName: "kube-api-access-zrr84") pod "0152813f-e688-49b2-88d2-afba5096bd0e" (UID: "0152813f-e688-49b2-88d2-afba5096bd0e"). InnerVolumeSpecName "kube-api-access-zrr84". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:39:01 crc kubenswrapper[4881]: I1211 08:39:01.981923 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zrr84\" (UniqueName: \"kubernetes.io/projected/0152813f-e688-49b2-88d2-afba5096bd0e-kube-api-access-zrr84\") on node \"crc\" DevicePath \"\"" Dec 11 08:39:02 crc kubenswrapper[4881]: I1211 08:39:02.066306 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"64b0f79e-085c-4fa8-938d-888f5cea99e3","Type":"ContainerStarted","Data":"8c7c5ba3dbe3ee75026dc877aee6c3b47324b2e00861873671734a23ac3d1e01"} Dec 11 08:39:02 crc kubenswrapper[4881]: I1211 08:39:02.088698 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"03bd1873-0976-4da1-a4f0-4bc1ab183cda","Type":"ContainerStarted","Data":"c6f3032e8fed7f37884fe1e90b51b5916c5e58ccba079a405333d76592f55cf9"} Dec 11 08:39:02 crc kubenswrapper[4881]: I1211 08:39:02.101248 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-lhqk7" event={"ID":"0152813f-e688-49b2-88d2-afba5096bd0e","Type":"ContainerDied","Data":"b3e71eb02cfb0d4c62059d5327fa1b467f8bbe7b770bff4b561d5ed641709cfc"} Dec 11 08:39:02 crc kubenswrapper[4881]: I1211 08:39:02.101294 4881 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b3e71eb02cfb0d4c62059d5327fa1b467f8bbe7b770bff4b561d5ed641709cfc" Dec 11 08:39:02 crc kubenswrapper[4881]: I1211 08:39:02.101385 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-lhqk7" Dec 11 08:39:02 crc kubenswrapper[4881]: I1211 08:39:02.144604 4881 scope.go:117] "RemoveContainer" containerID="389b55b4a7ab2a313d39200970c6b44deb752f5196fe1fed6bf750a53a364364" Dec 11 08:39:02 crc kubenswrapper[4881]: E1211 08:39:02.144878 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-cfnapi\" with CrashLoopBackOff: \"back-off 10s restarting failed container=heat-cfnapi pod=heat-cfnapi-bd877888f-jfg9m_openstack(d5902bc2-d738-4bab-9b99-78b827e3a003)\"" pod="openstack/heat-cfnapi-bd877888f-jfg9m" podUID="d5902bc2-d738-4bab-9b99-78b827e3a003" Dec 11 08:39:02 crc kubenswrapper[4881]: I1211 08:39:02.164127 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=7.164103163 podStartE2EDuration="7.164103163s" podCreationTimestamp="2025-12-11 08:38:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 08:39:02.137011471 +0000 UTC m=+1390.514380178" watchObservedRunningTime="2025-12-11 08:39:02.164103163 +0000 UTC m=+1390.541471861" Dec 11 08:39:02 crc kubenswrapper[4881]: I1211 08:39:02.184055 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"98c33c2d-b3e5-450d-8c52-544acac89c74","Type":"ContainerStarted","Data":"d8a10910b9ce85df4d044ed737d1816e9b7e86dda657d73997e4ec7a4400f295"} Dec 11 08:39:02 crc kubenswrapper[4881]: I1211 08:39:02.184379 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cinder-api-0" Dec 11 08:39:02 crc kubenswrapper[4881]: I1211 08:39:02.191037 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"e9aa88e0-71a6-40a0-92ec-88084b425df9","Type":"ContainerStarted","Data":"78a92b85b8faf958ccb7b3e66c1e3b6ab1b6a63948f4ef2bd5f20b721b240a66"} Dec 11 08:39:02 crc kubenswrapper[4881]: I1211 08:39:02.212394 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-api-0" podStartSLOduration=17.212310169 podStartE2EDuration="17.212310169s" podCreationTimestamp="2025-12-11 08:38:45 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 08:39:02.211866098 +0000 UTC m=+1390.589234795" watchObservedRunningTime="2025-12-11 08:39:02.212310169 +0000 UTC m=+1390.589678866" Dec 11 08:39:02 crc kubenswrapper[4881]: I1211 08:39:02.227256 4881 generic.go:334] "Generic (PLEG): container finished" podID="7eca07f6-40f2-48b2-90fe-b5b9b332707f" containerID="9cc1352026398808b6efd0dc4c6bf8fe21a17530136178d25e71f1cc6a8545f7" exitCode=1 Dec 11 08:39:02 crc kubenswrapper[4881]: I1211 08:39:02.228400 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-7b446c9cf4-8fv29" event={"ID":"7eca07f6-40f2-48b2-90fe-b5b9b332707f","Type":"ContainerDied","Data":"9cc1352026398808b6efd0dc4c6bf8fe21a17530136178d25e71f1cc6a8545f7"} Dec 11 08:39:02 crc kubenswrapper[4881]: I1211 08:39:02.228525 4881 scope.go:117] "RemoveContainer" containerID="5eb2d57a6beb3080b599766ec050979d3787cb1957f59bbbfd2cbe695e44ed38" Dec 11 08:39:02 crc kubenswrapper[4881]: I1211 08:39:02.228789 4881 scope.go:117] "RemoveContainer" containerID="9cc1352026398808b6efd0dc4c6bf8fe21a17530136178d25e71f1cc6a8545f7" Dec 11 08:39:02 crc kubenswrapper[4881]: E1211 08:39:02.229093 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-api\" with CrashLoopBackOff: \"back-off 10s restarting failed container=heat-api pod=heat-api-7b446c9cf4-8fv29_openstack(7eca07f6-40f2-48b2-90fe-b5b9b332707f)\"" pod="openstack/heat-api-7b446c9cf4-8fv29" podUID="7eca07f6-40f2-48b2-90fe-b5b9b332707f" Dec 11 08:39:03 crc kubenswrapper[4881]: I1211 08:39:03.271238 4881 scope.go:117] "RemoveContainer" containerID="9cc1352026398808b6efd0dc4c6bf8fe21a17530136178d25e71f1cc6a8545f7" Dec 11 08:39:03 crc kubenswrapper[4881]: E1211 08:39:03.272376 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-api\" with CrashLoopBackOff: \"back-off 10s restarting failed container=heat-api pod=heat-api-7b446c9cf4-8fv29_openstack(7eca07f6-40f2-48b2-90fe-b5b9b332707f)\"" pod="openstack/heat-api-7b446c9cf4-8fv29" podUID="7eca07f6-40f2-48b2-90fe-b5b9b332707f" Dec 11 08:39:03 crc kubenswrapper[4881]: I1211 08:39:03.279382 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"64b0f79e-085c-4fa8-938d-888f5cea99e3","Type":"ContainerStarted","Data":"21134e6d499c3bbdb493e9798006fa4832e97d0edcc3ee19302462fc9b5d85af"} Dec 11 08:39:03 crc kubenswrapper[4881]: I1211 08:39:03.282298 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"4cde142d-aa3a-4c3b-9e63-efcbce032089","Type":"ContainerStarted","Data":"0e2fb735ecf2f664f0a9a37f5f61fc1b19ff460bda6e6090f91e3352f1475a13"} Dec 11 08:39:03 crc kubenswrapper[4881]: I1211 08:39:03.304873 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"e9aa88e0-71a6-40a0-92ec-88084b425df9","Type":"ContainerStarted","Data":"be793b9d1f771382335005f54b5b0026392617d63e2a522be92921ca57708615"} Dec 11 08:39:03 crc kubenswrapper[4881]: I1211 08:39:03.342547 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-scheduler-0" podStartSLOduration=8.342519599 podStartE2EDuration="8.342519599s" podCreationTimestamp="2025-12-11 08:38:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 08:39:03.327788667 +0000 UTC m=+1391.705157374" watchObservedRunningTime="2025-12-11 08:39:03.342519599 +0000 UTC m=+1391.719888296" Dec 11 08:39:03 crc kubenswrapper[4881]: I1211 08:39:03.868060 4881 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/heat-cfnapi-bd877888f-jfg9m" Dec 11 08:39:03 crc kubenswrapper[4881]: I1211 08:39:03.868937 4881 scope.go:117] "RemoveContainer" containerID="389b55b4a7ab2a313d39200970c6b44deb752f5196fe1fed6bf750a53a364364" Dec 11 08:39:03 crc kubenswrapper[4881]: E1211 08:39:03.869370 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-cfnapi\" with CrashLoopBackOff: \"back-off 10s restarting failed container=heat-cfnapi pod=heat-cfnapi-bd877888f-jfg9m_openstack(d5902bc2-d738-4bab-9b99-78b827e3a003)\"" pod="openstack/heat-cfnapi-bd877888f-jfg9m" podUID="d5902bc2-d738-4bab-9b99-78b827e3a003" Dec 11 08:39:03 crc kubenswrapper[4881]: I1211 08:39:03.991974 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-api-7b446c9cf4-8fv29" Dec 11 08:39:03 crc kubenswrapper[4881]: I1211 08:39:03.992259 4881 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/heat-api-7b446c9cf4-8fv29" Dec 11 08:39:04 crc kubenswrapper[4881]: I1211 08:39:04.423026 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"4cde142d-aa3a-4c3b-9e63-efcbce032089","Type":"ContainerStarted","Data":"f247d908afcf1ea37f350ea7cc90dcb044d501e82bc3d2c03a78d5f1dfa31f0e"} Dec 11 08:39:04 crc kubenswrapper[4881]: I1211 08:39:04.424093 4881 scope.go:117] "RemoveContainer" containerID="9cc1352026398808b6efd0dc4c6bf8fe21a17530136178d25e71f1cc6a8545f7" Dec 11 08:39:04 crc kubenswrapper[4881]: E1211 08:39:04.424484 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-api\" with CrashLoopBackOff: \"back-off 10s restarting failed container=heat-api pod=heat-api-7b446c9cf4-8fv29_openstack(7eca07f6-40f2-48b2-90fe-b5b9b332707f)\"" pod="openstack/heat-api-7b446c9cf4-8fv29" podUID="7eca07f6-40f2-48b2-90fe-b5b9b332707f" Dec 11 08:39:04 crc kubenswrapper[4881]: I1211 08:39:04.494874 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=6.494857127 podStartE2EDuration="6.494857127s" podCreationTimestamp="2025-12-11 08:38:58 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 08:39:04.49380553 +0000 UTC m=+1392.871174237" watchObservedRunningTime="2025-12-11 08:39:04.494857127 +0000 UTC m=+1392.872225824" Dec 11 08:39:05 crc kubenswrapper[4881]: I1211 08:39:05.338801 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Dec 11 08:39:05 crc kubenswrapper[4881]: I1211 08:39:05.436001 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"64b0f79e-085c-4fa8-938d-888f5cea99e3","Type":"ContainerStarted","Data":"d623c7b71494d1b3ce18ffc1d62ed9c941819533667f68e902b48c268e4e842f"} Dec 11 08:39:05 crc kubenswrapper[4881]: I1211 08:39:05.452900 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Dec 11 08:39:05 crc kubenswrapper[4881]: I1211 08:39:05.485582 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=4.097617612 podStartE2EDuration="11.485564628s" podCreationTimestamp="2025-12-11 08:38:54 +0000 UTC" firstStartedPulling="2025-12-11 08:38:56.750978705 +0000 UTC m=+1385.128347402" lastFinishedPulling="2025-12-11 08:39:04.138925721 +0000 UTC m=+1392.516294418" observedRunningTime="2025-12-11 08:39:05.476660564 +0000 UTC m=+1393.854029281" watchObservedRunningTime="2025-12-11 08:39:05.485564628 +0000 UTC m=+1393.862933325" Dec 11 08:39:06 crc kubenswrapper[4881]: I1211 08:39:06.445969 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="64b0f79e-085c-4fa8-938d-888f5cea99e3" containerName="ceilometer-central-agent" containerID="cri-o://3cb6b7be631f37f0178f2d8f98e6344a0b84c9d7af40f878476956b2833efae4" gracePeriod=30 Dec 11 08:39:06 crc kubenswrapper[4881]: I1211 08:39:06.446026 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="64b0f79e-085c-4fa8-938d-888f5cea99e3" containerName="proxy-httpd" containerID="cri-o://d623c7b71494d1b3ce18ffc1d62ed9c941819533667f68e902b48c268e4e842f" gracePeriod=30 Dec 11 08:39:06 crc kubenswrapper[4881]: I1211 08:39:06.446058 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="64b0f79e-085c-4fa8-938d-888f5cea99e3" containerName="sg-core" containerID="cri-o://21134e6d499c3bbdb493e9798006fa4832e97d0edcc3ee19302462fc9b5d85af" gracePeriod=30 Dec 11 08:39:06 crc kubenswrapper[4881]: I1211 08:39:06.446104 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="64b0f79e-085c-4fa8-938d-888f5cea99e3" containerName="ceilometer-notification-agent" containerID="cri-o://8c7c5ba3dbe3ee75026dc877aee6c3b47324b2e00861873671734a23ac3d1e01" gracePeriod=30 Dec 11 08:39:06 crc kubenswrapper[4881]: I1211 08:39:06.536359 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Dec 11 08:39:06 crc kubenswrapper[4881]: I1211 08:39:06.536803 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Dec 11 08:39:06 crc kubenswrapper[4881]: I1211 08:39:06.606310 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Dec 11 08:39:06 crc kubenswrapper[4881]: I1211 08:39:06.647909 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Dec 11 08:39:07 crc kubenswrapper[4881]: I1211 08:39:07.147096 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-scheduler-0" Dec 11 08:39:07 crc kubenswrapper[4881]: I1211 08:39:07.348500 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-scheduler-0" Dec 11 08:39:07 crc kubenswrapper[4881]: I1211 08:39:07.492617 4881 generic.go:334] "Generic (PLEG): container finished" podID="64b0f79e-085c-4fa8-938d-888f5cea99e3" containerID="21134e6d499c3bbdb493e9798006fa4832e97d0edcc3ee19302462fc9b5d85af" exitCode=2 Dec 11 08:39:07 crc kubenswrapper[4881]: I1211 08:39:07.493800 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"64b0f79e-085c-4fa8-938d-888f5cea99e3","Type":"ContainerDied","Data":"21134e6d499c3bbdb493e9798006fa4832e97d0edcc3ee19302462fc9b5d85af"} Dec 11 08:39:07 crc kubenswrapper[4881]: I1211 08:39:07.493942 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Dec 11 08:39:07 crc kubenswrapper[4881]: I1211 08:39:07.494004 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Dec 11 08:39:08 crc kubenswrapper[4881]: I1211 08:39:08.540281 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"5e8c400d-4f6c-4e16-8ed3-45d19b56a0bf","Type":"ContainerStarted","Data":"0d5ccc57bea3331f6ac00b6bf11ca2e9c279ba0aea152870757fb3d9b7868a9c"} Dec 11 08:39:08 crc kubenswrapper[4881]: I1211 08:39:08.553829 4881 generic.go:334] "Generic (PLEG): container finished" podID="64b0f79e-085c-4fa8-938d-888f5cea99e3" containerID="d623c7b71494d1b3ce18ffc1d62ed9c941819533667f68e902b48c268e4e842f" exitCode=0 Dec 11 08:39:08 crc kubenswrapper[4881]: I1211 08:39:08.553859 4881 generic.go:334] "Generic (PLEG): container finished" podID="64b0f79e-085c-4fa8-938d-888f5cea99e3" containerID="8c7c5ba3dbe3ee75026dc877aee6c3b47324b2e00861873671734a23ac3d1e01" exitCode=0 Dec 11 08:39:08 crc kubenswrapper[4881]: I1211 08:39:08.553867 4881 generic.go:334] "Generic (PLEG): container finished" podID="64b0f79e-085c-4fa8-938d-888f5cea99e3" containerID="3cb6b7be631f37f0178f2d8f98e6344a0b84c9d7af40f878476956b2833efae4" exitCode=0 Dec 11 08:39:08 crc kubenswrapper[4881]: I1211 08:39:08.554494 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"64b0f79e-085c-4fa8-938d-888f5cea99e3","Type":"ContainerDied","Data":"d623c7b71494d1b3ce18ffc1d62ed9c941819533667f68e902b48c268e4e842f"} Dec 11 08:39:08 crc kubenswrapper[4881]: I1211 08:39:08.554551 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"64b0f79e-085c-4fa8-938d-888f5cea99e3","Type":"ContainerDied","Data":"8c7c5ba3dbe3ee75026dc877aee6c3b47324b2e00861873671734a23ac3d1e01"} Dec 11 08:39:08 crc kubenswrapper[4881]: I1211 08:39:08.554565 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"64b0f79e-085c-4fa8-938d-888f5cea99e3","Type":"ContainerDied","Data":"3cb6b7be631f37f0178f2d8f98e6344a0b84c9d7af40f878476956b2833efae4"} Dec 11 08:39:08 crc kubenswrapper[4881]: I1211 08:39:08.618096 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstackclient" podStartSLOduration=1.932679276 podStartE2EDuration="40.618071229s" podCreationTimestamp="2025-12-11 08:38:28 +0000 UTC" firstStartedPulling="2025-12-11 08:38:29.10208384 +0000 UTC m=+1357.479452537" lastFinishedPulling="2025-12-11 08:39:07.787475793 +0000 UTC m=+1396.164844490" observedRunningTime="2025-12-11 08:39:08.58088657 +0000 UTC m=+1396.958255277" watchObservedRunningTime="2025-12-11 08:39:08.618071229 +0000 UTC m=+1396.995439926" Dec 11 08:39:08 crc kubenswrapper[4881]: I1211 08:39:08.684502 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 11 08:39:08 crc kubenswrapper[4881]: I1211 08:39:08.685943 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/heat-cfnapi-579694df8d-vpn5m" Dec 11 08:39:08 crc kubenswrapper[4881]: I1211 08:39:08.736964 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/heat-api-b5858866f-srtbn" Dec 11 08:39:08 crc kubenswrapper[4881]: I1211 08:39:08.752933 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/64b0f79e-085c-4fa8-938d-888f5cea99e3-run-httpd\") pod \"64b0f79e-085c-4fa8-938d-888f5cea99e3\" (UID: \"64b0f79e-085c-4fa8-938d-888f5cea99e3\") " Dec 11 08:39:08 crc kubenswrapper[4881]: I1211 08:39:08.753062 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/64b0f79e-085c-4fa8-938d-888f5cea99e3-config-data\") pod \"64b0f79e-085c-4fa8-938d-888f5cea99e3\" (UID: \"64b0f79e-085c-4fa8-938d-888f5cea99e3\") " Dec 11 08:39:08 crc kubenswrapper[4881]: I1211 08:39:08.753130 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/64b0f79e-085c-4fa8-938d-888f5cea99e3-log-httpd\") pod \"64b0f79e-085c-4fa8-938d-888f5cea99e3\" (UID: \"64b0f79e-085c-4fa8-938d-888f5cea99e3\") " Dec 11 08:39:08 crc kubenswrapper[4881]: I1211 08:39:08.753166 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wg5jb\" (UniqueName: \"kubernetes.io/projected/64b0f79e-085c-4fa8-938d-888f5cea99e3-kube-api-access-wg5jb\") pod \"64b0f79e-085c-4fa8-938d-888f5cea99e3\" (UID: \"64b0f79e-085c-4fa8-938d-888f5cea99e3\") " Dec 11 08:39:08 crc kubenswrapper[4881]: I1211 08:39:08.753198 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/64b0f79e-085c-4fa8-938d-888f5cea99e3-scripts\") pod \"64b0f79e-085c-4fa8-938d-888f5cea99e3\" (UID: \"64b0f79e-085c-4fa8-938d-888f5cea99e3\") " Dec 11 08:39:08 crc kubenswrapper[4881]: I1211 08:39:08.753313 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/64b0f79e-085c-4fa8-938d-888f5cea99e3-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "64b0f79e-085c-4fa8-938d-888f5cea99e3" (UID: "64b0f79e-085c-4fa8-938d-888f5cea99e3"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 08:39:08 crc kubenswrapper[4881]: I1211 08:39:08.753399 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/64b0f79e-085c-4fa8-938d-888f5cea99e3-combined-ca-bundle\") pod \"64b0f79e-085c-4fa8-938d-888f5cea99e3\" (UID: \"64b0f79e-085c-4fa8-938d-888f5cea99e3\") " Dec 11 08:39:08 crc kubenswrapper[4881]: I1211 08:39:08.753419 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/64b0f79e-085c-4fa8-938d-888f5cea99e3-sg-core-conf-yaml\") pod \"64b0f79e-085c-4fa8-938d-888f5cea99e3\" (UID: \"64b0f79e-085c-4fa8-938d-888f5cea99e3\") " Dec 11 08:39:08 crc kubenswrapper[4881]: I1211 08:39:08.753608 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/64b0f79e-085c-4fa8-938d-888f5cea99e3-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "64b0f79e-085c-4fa8-938d-888f5cea99e3" (UID: "64b0f79e-085c-4fa8-938d-888f5cea99e3"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 08:39:08 crc kubenswrapper[4881]: I1211 08:39:08.754041 4881 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/64b0f79e-085c-4fa8-938d-888f5cea99e3-run-httpd\") on node \"crc\" DevicePath \"\"" Dec 11 08:39:08 crc kubenswrapper[4881]: I1211 08:39:08.754068 4881 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/64b0f79e-085c-4fa8-938d-888f5cea99e3-log-httpd\") on node \"crc\" DevicePath \"\"" Dec 11 08:39:08 crc kubenswrapper[4881]: I1211 08:39:08.763049 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/64b0f79e-085c-4fa8-938d-888f5cea99e3-kube-api-access-wg5jb" (OuterVolumeSpecName: "kube-api-access-wg5jb") pod "64b0f79e-085c-4fa8-938d-888f5cea99e3" (UID: "64b0f79e-085c-4fa8-938d-888f5cea99e3"). InnerVolumeSpecName "kube-api-access-wg5jb". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:39:08 crc kubenswrapper[4881]: I1211 08:39:08.764084 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/64b0f79e-085c-4fa8-938d-888f5cea99e3-scripts" (OuterVolumeSpecName: "scripts") pod "64b0f79e-085c-4fa8-938d-888f5cea99e3" (UID: "64b0f79e-085c-4fa8-938d-888f5cea99e3"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:39:08 crc kubenswrapper[4881]: I1211 08:39:08.862671 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wg5jb\" (UniqueName: \"kubernetes.io/projected/64b0f79e-085c-4fa8-938d-888f5cea99e3-kube-api-access-wg5jb\") on node \"crc\" DevicePath \"\"" Dec 11 08:39:08 crc kubenswrapper[4881]: I1211 08:39:08.862693 4881 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/64b0f79e-085c-4fa8-938d-888f5cea99e3-scripts\") on node \"crc\" DevicePath \"\"" Dec 11 08:39:08 crc kubenswrapper[4881]: I1211 08:39:08.956539 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/64b0f79e-085c-4fa8-938d-888f5cea99e3-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "64b0f79e-085c-4fa8-938d-888f5cea99e3" (UID: "64b0f79e-085c-4fa8-938d-888f5cea99e3"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:39:08 crc kubenswrapper[4881]: I1211 08:39:08.968281 4881 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/64b0f79e-085c-4fa8-938d-888f5cea99e3-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Dec 11 08:39:08 crc kubenswrapper[4881]: I1211 08:39:08.997490 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-cfnapi-bd877888f-jfg9m"] Dec 11 08:39:09 crc kubenswrapper[4881]: I1211 08:39:09.082905 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-api-7b446c9cf4-8fv29"] Dec 11 08:39:09 crc kubenswrapper[4881]: I1211 08:39:09.122299 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/64b0f79e-085c-4fa8-938d-888f5cea99e3-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "64b0f79e-085c-4fa8-938d-888f5cea99e3" (UID: "64b0f79e-085c-4fa8-938d-888f5cea99e3"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:39:09 crc kubenswrapper[4881]: I1211 08:39:09.151917 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/64b0f79e-085c-4fa8-938d-888f5cea99e3-config-data" (OuterVolumeSpecName: "config-data") pod "64b0f79e-085c-4fa8-938d-888f5cea99e3" (UID: "64b0f79e-085c-4fa8-938d-888f5cea99e3"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:39:09 crc kubenswrapper[4881]: I1211 08:39:09.173610 4881 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/64b0f79e-085c-4fa8-938d-888f5cea99e3-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 11 08:39:09 crc kubenswrapper[4881]: I1211 08:39:09.173660 4881 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/64b0f79e-085c-4fa8-938d-888f5cea99e3-config-data\") on node \"crc\" DevicePath \"\"" Dec 11 08:39:09 crc kubenswrapper[4881]: I1211 08:39:09.283910 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Dec 11 08:39:09 crc kubenswrapper[4881]: I1211 08:39:09.283955 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Dec 11 08:39:09 crc kubenswrapper[4881]: I1211 08:39:09.370662 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Dec 11 08:39:09 crc kubenswrapper[4881]: I1211 08:39:09.401443 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Dec 11 08:39:09 crc kubenswrapper[4881]: I1211 08:39:09.505635 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-bd877888f-jfg9m" Dec 11 08:39:09 crc kubenswrapper[4881]: I1211 08:39:09.584159 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"64b0f79e-085c-4fa8-938d-888f5cea99e3","Type":"ContainerDied","Data":"7bbd26efbd176f2208957fed209f6924b2f9c33702be98e4e95afe963b7e1585"} Dec 11 08:39:09 crc kubenswrapper[4881]: I1211 08:39:09.584210 4881 scope.go:117] "RemoveContainer" containerID="d623c7b71494d1b3ce18ffc1d62ed9c941819533667f68e902b48c268e4e842f" Dec 11 08:39:09 crc kubenswrapper[4881]: I1211 08:39:09.584379 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 11 08:39:09 crc kubenswrapper[4881]: I1211 08:39:09.596025 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-bd877888f-jfg9m" event={"ID":"d5902bc2-d738-4bab-9b99-78b827e3a003","Type":"ContainerDied","Data":"3574c6b82044712c60ab82683216ef5f4ba20d451e77f5bc9f96f405bbe75a74"} Dec 11 08:39:09 crc kubenswrapper[4881]: I1211 08:39:09.596087 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Dec 11 08:39:09 crc kubenswrapper[4881]: I1211 08:39:09.596155 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-bd877888f-jfg9m" Dec 11 08:39:09 crc kubenswrapper[4881]: I1211 08:39:09.596276 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Dec 11 08:39:09 crc kubenswrapper[4881]: I1211 08:39:09.643523 4881 scope.go:117] "RemoveContainer" containerID="21134e6d499c3bbdb493e9798006fa4832e97d0edcc3ee19302462fc9b5d85af" Dec 11 08:39:09 crc kubenswrapper[4881]: I1211 08:39:09.670044 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Dec 11 08:39:09 crc kubenswrapper[4881]: I1211 08:39:09.677883 4881 scope.go:117] "RemoveContainer" containerID="8c7c5ba3dbe3ee75026dc877aee6c3b47324b2e00861873671734a23ac3d1e01" Dec 11 08:39:09 crc kubenswrapper[4881]: I1211 08:39:09.688214 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8d89p\" (UniqueName: \"kubernetes.io/projected/d5902bc2-d738-4bab-9b99-78b827e3a003-kube-api-access-8d89p\") pod \"d5902bc2-d738-4bab-9b99-78b827e3a003\" (UID: \"d5902bc2-d738-4bab-9b99-78b827e3a003\") " Dec 11 08:39:09 crc kubenswrapper[4881]: I1211 08:39:09.688282 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d5902bc2-d738-4bab-9b99-78b827e3a003-config-data-custom\") pod \"d5902bc2-d738-4bab-9b99-78b827e3a003\" (UID: \"d5902bc2-d738-4bab-9b99-78b827e3a003\") " Dec 11 08:39:09 crc kubenswrapper[4881]: I1211 08:39:09.688375 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d5902bc2-d738-4bab-9b99-78b827e3a003-config-data\") pod \"d5902bc2-d738-4bab-9b99-78b827e3a003\" (UID: \"d5902bc2-d738-4bab-9b99-78b827e3a003\") " Dec 11 08:39:09 crc kubenswrapper[4881]: I1211 08:39:09.688520 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d5902bc2-d738-4bab-9b99-78b827e3a003-combined-ca-bundle\") pod \"d5902bc2-d738-4bab-9b99-78b827e3a003\" (UID: \"d5902bc2-d738-4bab-9b99-78b827e3a003\") " Dec 11 08:39:09 crc kubenswrapper[4881]: I1211 08:39:09.695249 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d5902bc2-d738-4bab-9b99-78b827e3a003-kube-api-access-8d89p" (OuterVolumeSpecName: "kube-api-access-8d89p") pod "d5902bc2-d738-4bab-9b99-78b827e3a003" (UID: "d5902bc2-d738-4bab-9b99-78b827e3a003"). InnerVolumeSpecName "kube-api-access-8d89p". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:39:09 crc kubenswrapper[4881]: I1211 08:39:09.701905 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-7b446c9cf4-8fv29" Dec 11 08:39:09 crc kubenswrapper[4881]: I1211 08:39:09.704725 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d5902bc2-d738-4bab-9b99-78b827e3a003-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "d5902bc2-d738-4bab-9b99-78b827e3a003" (UID: "d5902bc2-d738-4bab-9b99-78b827e3a003"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:39:09 crc kubenswrapper[4881]: I1211 08:39:09.714633 4881 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Dec 11 08:39:09 crc kubenswrapper[4881]: I1211 08:39:09.716750 4881 scope.go:117] "RemoveContainer" containerID="3cb6b7be631f37f0178f2d8f98e6344a0b84c9d7af40f878476956b2833efae4" Dec 11 08:39:09 crc kubenswrapper[4881]: I1211 08:39:09.724559 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Dec 11 08:39:09 crc kubenswrapper[4881]: E1211 08:39:09.725254 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0152813f-e688-49b2-88d2-afba5096bd0e" containerName="mariadb-database-create" Dec 11 08:39:09 crc kubenswrapper[4881]: I1211 08:39:09.725277 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="0152813f-e688-49b2-88d2-afba5096bd0e" containerName="mariadb-database-create" Dec 11 08:39:09 crc kubenswrapper[4881]: E1211 08:39:09.725292 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7eca07f6-40f2-48b2-90fe-b5b9b332707f" containerName="heat-api" Dec 11 08:39:09 crc kubenswrapper[4881]: I1211 08:39:09.725300 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="7eca07f6-40f2-48b2-90fe-b5b9b332707f" containerName="heat-api" Dec 11 08:39:09 crc kubenswrapper[4881]: E1211 08:39:09.725314 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="64b0f79e-085c-4fa8-938d-888f5cea99e3" containerName="ceilometer-central-agent" Dec 11 08:39:09 crc kubenswrapper[4881]: I1211 08:39:09.725322 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="64b0f79e-085c-4fa8-938d-888f5cea99e3" containerName="ceilometer-central-agent" Dec 11 08:39:09 crc kubenswrapper[4881]: E1211 08:39:09.725465 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d5902bc2-d738-4bab-9b99-78b827e3a003" containerName="heat-cfnapi" Dec 11 08:39:09 crc kubenswrapper[4881]: I1211 08:39:09.725477 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="d5902bc2-d738-4bab-9b99-78b827e3a003" containerName="heat-cfnapi" Dec 11 08:39:09 crc kubenswrapper[4881]: E1211 08:39:09.725513 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="64b0f79e-085c-4fa8-938d-888f5cea99e3" containerName="sg-core" Dec 11 08:39:09 crc kubenswrapper[4881]: I1211 08:39:09.725523 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="64b0f79e-085c-4fa8-938d-888f5cea99e3" containerName="sg-core" Dec 11 08:39:09 crc kubenswrapper[4881]: E1211 08:39:09.725554 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="556e7646-0cc0-4686-875f-6738267e467e" containerName="mariadb-database-create" Dec 11 08:39:09 crc kubenswrapper[4881]: I1211 08:39:09.725563 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="556e7646-0cc0-4686-875f-6738267e467e" containerName="mariadb-database-create" Dec 11 08:39:09 crc kubenswrapper[4881]: E1211 08:39:09.725576 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7a116558-2db5-4fe1-9f64-888d7cd93f57" containerName="mariadb-database-create" Dec 11 08:39:09 crc kubenswrapper[4881]: I1211 08:39:09.725583 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="7a116558-2db5-4fe1-9f64-888d7cd93f57" containerName="mariadb-database-create" Dec 11 08:39:09 crc kubenswrapper[4881]: E1211 08:39:09.725598 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d5902bc2-d738-4bab-9b99-78b827e3a003" containerName="heat-cfnapi" Dec 11 08:39:09 crc kubenswrapper[4881]: I1211 08:39:09.725607 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="d5902bc2-d738-4bab-9b99-78b827e3a003" containerName="heat-cfnapi" Dec 11 08:39:09 crc kubenswrapper[4881]: E1211 08:39:09.725618 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="64b0f79e-085c-4fa8-938d-888f5cea99e3" containerName="ceilometer-notification-agent" Dec 11 08:39:09 crc kubenswrapper[4881]: I1211 08:39:09.725626 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="64b0f79e-085c-4fa8-938d-888f5cea99e3" containerName="ceilometer-notification-agent" Dec 11 08:39:09 crc kubenswrapper[4881]: E1211 08:39:09.725639 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="64b0f79e-085c-4fa8-938d-888f5cea99e3" containerName="proxy-httpd" Dec 11 08:39:09 crc kubenswrapper[4881]: I1211 08:39:09.725646 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="64b0f79e-085c-4fa8-938d-888f5cea99e3" containerName="proxy-httpd" Dec 11 08:39:09 crc kubenswrapper[4881]: I1211 08:39:09.725913 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="7a116558-2db5-4fe1-9f64-888d7cd93f57" containerName="mariadb-database-create" Dec 11 08:39:09 crc kubenswrapper[4881]: I1211 08:39:09.725943 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="7eca07f6-40f2-48b2-90fe-b5b9b332707f" containerName="heat-api" Dec 11 08:39:09 crc kubenswrapper[4881]: I1211 08:39:09.725955 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="d5902bc2-d738-4bab-9b99-78b827e3a003" containerName="heat-cfnapi" Dec 11 08:39:09 crc kubenswrapper[4881]: I1211 08:39:09.725969 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="64b0f79e-085c-4fa8-938d-888f5cea99e3" containerName="sg-core" Dec 11 08:39:09 crc kubenswrapper[4881]: I1211 08:39:09.725980 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="64b0f79e-085c-4fa8-938d-888f5cea99e3" containerName="ceilometer-central-agent" Dec 11 08:39:09 crc kubenswrapper[4881]: I1211 08:39:09.725991 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="0152813f-e688-49b2-88d2-afba5096bd0e" containerName="mariadb-database-create" Dec 11 08:39:09 crc kubenswrapper[4881]: I1211 08:39:09.726003 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="64b0f79e-085c-4fa8-938d-888f5cea99e3" containerName="proxy-httpd" Dec 11 08:39:09 crc kubenswrapper[4881]: I1211 08:39:09.726017 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="d5902bc2-d738-4bab-9b99-78b827e3a003" containerName="heat-cfnapi" Dec 11 08:39:09 crc kubenswrapper[4881]: I1211 08:39:09.726033 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="64b0f79e-085c-4fa8-938d-888f5cea99e3" containerName="ceilometer-notification-agent" Dec 11 08:39:09 crc kubenswrapper[4881]: I1211 08:39:09.726047 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="556e7646-0cc0-4686-875f-6738267e467e" containerName="mariadb-database-create" Dec 11 08:39:09 crc kubenswrapper[4881]: E1211 08:39:09.726389 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7eca07f6-40f2-48b2-90fe-b5b9b332707f" containerName="heat-api" Dec 11 08:39:09 crc kubenswrapper[4881]: I1211 08:39:09.726407 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="7eca07f6-40f2-48b2-90fe-b5b9b332707f" containerName="heat-api" Dec 11 08:39:09 crc kubenswrapper[4881]: I1211 08:39:09.726726 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="7eca07f6-40f2-48b2-90fe-b5b9b332707f" containerName="heat-api" Dec 11 08:39:09 crc kubenswrapper[4881]: I1211 08:39:09.731865 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 11 08:39:09 crc kubenswrapper[4881]: I1211 08:39:09.736123 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Dec 11 08:39:09 crc kubenswrapper[4881]: I1211 08:39:09.736382 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Dec 11 08:39:09 crc kubenswrapper[4881]: I1211 08:39:09.760857 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Dec 11 08:39:09 crc kubenswrapper[4881]: I1211 08:39:09.761485 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d5902bc2-d738-4bab-9b99-78b827e3a003-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "d5902bc2-d738-4bab-9b99-78b827e3a003" (UID: "d5902bc2-d738-4bab-9b99-78b827e3a003"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:39:09 crc kubenswrapper[4881]: I1211 08:39:09.790180 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lw5kk\" (UniqueName: \"kubernetes.io/projected/7eca07f6-40f2-48b2-90fe-b5b9b332707f-kube-api-access-lw5kk\") pod \"7eca07f6-40f2-48b2-90fe-b5b9b332707f\" (UID: \"7eca07f6-40f2-48b2-90fe-b5b9b332707f\") " Dec 11 08:39:09 crc kubenswrapper[4881]: I1211 08:39:09.790298 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7eca07f6-40f2-48b2-90fe-b5b9b332707f-config-data\") pod \"7eca07f6-40f2-48b2-90fe-b5b9b332707f\" (UID: \"7eca07f6-40f2-48b2-90fe-b5b9b332707f\") " Dec 11 08:39:09 crc kubenswrapper[4881]: I1211 08:39:09.790408 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/7eca07f6-40f2-48b2-90fe-b5b9b332707f-config-data-custom\") pod \"7eca07f6-40f2-48b2-90fe-b5b9b332707f\" (UID: \"7eca07f6-40f2-48b2-90fe-b5b9b332707f\") " Dec 11 08:39:09 crc kubenswrapper[4881]: I1211 08:39:09.790635 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7eca07f6-40f2-48b2-90fe-b5b9b332707f-combined-ca-bundle\") pod \"7eca07f6-40f2-48b2-90fe-b5b9b332707f\" (UID: \"7eca07f6-40f2-48b2-90fe-b5b9b332707f\") " Dec 11 08:39:09 crc kubenswrapper[4881]: I1211 08:39:09.792460 4881 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d5902bc2-d738-4bab-9b99-78b827e3a003-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 11 08:39:09 crc kubenswrapper[4881]: I1211 08:39:09.792492 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8d89p\" (UniqueName: \"kubernetes.io/projected/d5902bc2-d738-4bab-9b99-78b827e3a003-kube-api-access-8d89p\") on node \"crc\" DevicePath \"\"" Dec 11 08:39:09 crc kubenswrapper[4881]: I1211 08:39:09.792509 4881 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d5902bc2-d738-4bab-9b99-78b827e3a003-config-data-custom\") on node \"crc\" DevicePath \"\"" Dec 11 08:39:09 crc kubenswrapper[4881]: I1211 08:39:09.794645 4881 scope.go:117] "RemoveContainer" containerID="389b55b4a7ab2a313d39200970c6b44deb752f5196fe1fed6bf750a53a364364" Dec 11 08:39:09 crc kubenswrapper[4881]: I1211 08:39:09.827066 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7eca07f6-40f2-48b2-90fe-b5b9b332707f-kube-api-access-lw5kk" (OuterVolumeSpecName: "kube-api-access-lw5kk") pod "7eca07f6-40f2-48b2-90fe-b5b9b332707f" (UID: "7eca07f6-40f2-48b2-90fe-b5b9b332707f"). InnerVolumeSpecName "kube-api-access-lw5kk". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:39:09 crc kubenswrapper[4881]: I1211 08:39:09.833119 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7eca07f6-40f2-48b2-90fe-b5b9b332707f-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "7eca07f6-40f2-48b2-90fe-b5b9b332707f" (UID: "7eca07f6-40f2-48b2-90fe-b5b9b332707f"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:39:09 crc kubenswrapper[4881]: I1211 08:39:09.894104 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/15b38329-9fa4-4050-9d6a-ede621f328d3-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"15b38329-9fa4-4050-9d6a-ede621f328d3\") " pod="openstack/ceilometer-0" Dec 11 08:39:09 crc kubenswrapper[4881]: I1211 08:39:09.894172 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/15b38329-9fa4-4050-9d6a-ede621f328d3-log-httpd\") pod \"ceilometer-0\" (UID: \"15b38329-9fa4-4050-9d6a-ede621f328d3\") " pod="openstack/ceilometer-0" Dec 11 08:39:09 crc kubenswrapper[4881]: I1211 08:39:09.894222 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/15b38329-9fa4-4050-9d6a-ede621f328d3-config-data\") pod \"ceilometer-0\" (UID: \"15b38329-9fa4-4050-9d6a-ede621f328d3\") " pod="openstack/ceilometer-0" Dec 11 08:39:09 crc kubenswrapper[4881]: I1211 08:39:09.894288 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/15b38329-9fa4-4050-9d6a-ede621f328d3-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"15b38329-9fa4-4050-9d6a-ede621f328d3\") " pod="openstack/ceilometer-0" Dec 11 08:39:09 crc kubenswrapper[4881]: I1211 08:39:09.894353 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/15b38329-9fa4-4050-9d6a-ede621f328d3-scripts\") pod \"ceilometer-0\" (UID: \"15b38329-9fa4-4050-9d6a-ede621f328d3\") " pod="openstack/ceilometer-0" Dec 11 08:39:09 crc kubenswrapper[4881]: I1211 08:39:09.894375 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jgjch\" (UniqueName: \"kubernetes.io/projected/15b38329-9fa4-4050-9d6a-ede621f328d3-kube-api-access-jgjch\") pod \"ceilometer-0\" (UID: \"15b38329-9fa4-4050-9d6a-ede621f328d3\") " pod="openstack/ceilometer-0" Dec 11 08:39:09 crc kubenswrapper[4881]: I1211 08:39:09.894411 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/15b38329-9fa4-4050-9d6a-ede621f328d3-run-httpd\") pod \"ceilometer-0\" (UID: \"15b38329-9fa4-4050-9d6a-ede621f328d3\") " pod="openstack/ceilometer-0" Dec 11 08:39:09 crc kubenswrapper[4881]: I1211 08:39:09.894491 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lw5kk\" (UniqueName: \"kubernetes.io/projected/7eca07f6-40f2-48b2-90fe-b5b9b332707f-kube-api-access-lw5kk\") on node \"crc\" DevicePath \"\"" Dec 11 08:39:09 crc kubenswrapper[4881]: I1211 08:39:09.894503 4881 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/7eca07f6-40f2-48b2-90fe-b5b9b332707f-config-data-custom\") on node \"crc\" DevicePath \"\"" Dec 11 08:39:09 crc kubenswrapper[4881]: I1211 08:39:09.901820 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7eca07f6-40f2-48b2-90fe-b5b9b332707f-config-data" (OuterVolumeSpecName: "config-data") pod "7eca07f6-40f2-48b2-90fe-b5b9b332707f" (UID: "7eca07f6-40f2-48b2-90fe-b5b9b332707f"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:39:09 crc kubenswrapper[4881]: I1211 08:39:09.903634 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7eca07f6-40f2-48b2-90fe-b5b9b332707f-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "7eca07f6-40f2-48b2-90fe-b5b9b332707f" (UID: "7eca07f6-40f2-48b2-90fe-b5b9b332707f"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:39:09 crc kubenswrapper[4881]: I1211 08:39:09.909457 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d5902bc2-d738-4bab-9b99-78b827e3a003-config-data" (OuterVolumeSpecName: "config-data") pod "d5902bc2-d738-4bab-9b99-78b827e3a003" (UID: "d5902bc2-d738-4bab-9b99-78b827e3a003"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:39:09 crc kubenswrapper[4881]: I1211 08:39:09.996195 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/15b38329-9fa4-4050-9d6a-ede621f328d3-config-data\") pod \"ceilometer-0\" (UID: \"15b38329-9fa4-4050-9d6a-ede621f328d3\") " pod="openstack/ceilometer-0" Dec 11 08:39:09 crc kubenswrapper[4881]: I1211 08:39:09.996908 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/15b38329-9fa4-4050-9d6a-ede621f328d3-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"15b38329-9fa4-4050-9d6a-ede621f328d3\") " pod="openstack/ceilometer-0" Dec 11 08:39:09 crc kubenswrapper[4881]: I1211 08:39:09.997085 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/15b38329-9fa4-4050-9d6a-ede621f328d3-scripts\") pod \"ceilometer-0\" (UID: \"15b38329-9fa4-4050-9d6a-ede621f328d3\") " pod="openstack/ceilometer-0" Dec 11 08:39:09 crc kubenswrapper[4881]: I1211 08:39:09.997180 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jgjch\" (UniqueName: \"kubernetes.io/projected/15b38329-9fa4-4050-9d6a-ede621f328d3-kube-api-access-jgjch\") pod \"ceilometer-0\" (UID: \"15b38329-9fa4-4050-9d6a-ede621f328d3\") " pod="openstack/ceilometer-0" Dec 11 08:39:09 crc kubenswrapper[4881]: I1211 08:39:09.997305 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/15b38329-9fa4-4050-9d6a-ede621f328d3-run-httpd\") pod \"ceilometer-0\" (UID: \"15b38329-9fa4-4050-9d6a-ede621f328d3\") " pod="openstack/ceilometer-0" Dec 11 08:39:09 crc kubenswrapper[4881]: I1211 08:39:09.997475 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/15b38329-9fa4-4050-9d6a-ede621f328d3-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"15b38329-9fa4-4050-9d6a-ede621f328d3\") " pod="openstack/ceilometer-0" Dec 11 08:39:09 crc kubenswrapper[4881]: I1211 08:39:09.998712 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/15b38329-9fa4-4050-9d6a-ede621f328d3-log-httpd\") pod \"ceilometer-0\" (UID: \"15b38329-9fa4-4050-9d6a-ede621f328d3\") " pod="openstack/ceilometer-0" Dec 11 08:39:09 crc kubenswrapper[4881]: I1211 08:39:09.998901 4881 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7eca07f6-40f2-48b2-90fe-b5b9b332707f-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 11 08:39:09 crc kubenswrapper[4881]: I1211 08:39:09.998991 4881 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7eca07f6-40f2-48b2-90fe-b5b9b332707f-config-data\") on node \"crc\" DevicePath \"\"" Dec 11 08:39:09 crc kubenswrapper[4881]: I1211 08:39:09.999046 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/15b38329-9fa4-4050-9d6a-ede621f328d3-run-httpd\") pod \"ceilometer-0\" (UID: \"15b38329-9fa4-4050-9d6a-ede621f328d3\") " pod="openstack/ceilometer-0" Dec 11 08:39:09 crc kubenswrapper[4881]: I1211 08:39:09.999062 4881 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d5902bc2-d738-4bab-9b99-78b827e3a003-config-data\") on node \"crc\" DevicePath \"\"" Dec 11 08:39:09 crc kubenswrapper[4881]: I1211 08:39:09.999421 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/15b38329-9fa4-4050-9d6a-ede621f328d3-log-httpd\") pod \"ceilometer-0\" (UID: \"15b38329-9fa4-4050-9d6a-ede621f328d3\") " pod="openstack/ceilometer-0" Dec 11 08:39:10 crc kubenswrapper[4881]: I1211 08:39:10.018040 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/15b38329-9fa4-4050-9d6a-ede621f328d3-config-data\") pod \"ceilometer-0\" (UID: \"15b38329-9fa4-4050-9d6a-ede621f328d3\") " pod="openstack/ceilometer-0" Dec 11 08:39:10 crc kubenswrapper[4881]: I1211 08:39:10.018701 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/15b38329-9fa4-4050-9d6a-ede621f328d3-scripts\") pod \"ceilometer-0\" (UID: \"15b38329-9fa4-4050-9d6a-ede621f328d3\") " pod="openstack/ceilometer-0" Dec 11 08:39:10 crc kubenswrapper[4881]: I1211 08:39:10.018981 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/15b38329-9fa4-4050-9d6a-ede621f328d3-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"15b38329-9fa4-4050-9d6a-ede621f328d3\") " pod="openstack/ceilometer-0" Dec 11 08:39:10 crc kubenswrapper[4881]: I1211 08:39:10.031093 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/15b38329-9fa4-4050-9d6a-ede621f328d3-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"15b38329-9fa4-4050-9d6a-ede621f328d3\") " pod="openstack/ceilometer-0" Dec 11 08:39:10 crc kubenswrapper[4881]: I1211 08:39:10.109077 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jgjch\" (UniqueName: \"kubernetes.io/projected/15b38329-9fa4-4050-9d6a-ede621f328d3-kube-api-access-jgjch\") pod \"ceilometer-0\" (UID: \"15b38329-9fa4-4050-9d6a-ede621f328d3\") " pod="openstack/ceilometer-0" Dec 11 08:39:10 crc kubenswrapper[4881]: I1211 08:39:10.252326 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-cfnapi-bd877888f-jfg9m"] Dec 11 08:39:10 crc kubenswrapper[4881]: I1211 08:39:10.262197 4881 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-cfnapi-bd877888f-jfg9m"] Dec 11 08:39:10 crc kubenswrapper[4881]: I1211 08:39:10.368369 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 11 08:39:10 crc kubenswrapper[4881]: I1211 08:39:10.642971 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-7b446c9cf4-8fv29" Dec 11 08:39:10 crc kubenswrapper[4881]: I1211 08:39:10.644049 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-7b446c9cf4-8fv29" event={"ID":"7eca07f6-40f2-48b2-90fe-b5b9b332707f","Type":"ContainerDied","Data":"0a8eaf7dff9fe7f1dbe85244174db7474982dc8a3f35561082263e750868419e"} Dec 11 08:39:10 crc kubenswrapper[4881]: I1211 08:39:10.644105 4881 scope.go:117] "RemoveContainer" containerID="9cc1352026398808b6efd0dc4c6bf8fe21a17530136178d25e71f1cc6a8545f7" Dec 11 08:39:10 crc kubenswrapper[4881]: I1211 08:39:10.714572 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-api-7b446c9cf4-8fv29"] Dec 11 08:39:10 crc kubenswrapper[4881]: I1211 08:39:10.727422 4881 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-api-7b446c9cf4-8fv29"] Dec 11 08:39:10 crc kubenswrapper[4881]: I1211 08:39:10.925738 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Dec 11 08:39:10 crc kubenswrapper[4881]: I1211 08:39:10.993072 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Dec 11 08:39:10 crc kubenswrapper[4881]: I1211 08:39:10.993191 4881 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Dec 11 08:39:11 crc kubenswrapper[4881]: I1211 08:39:11.053810 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="64b0f79e-085c-4fa8-938d-888f5cea99e3" path="/var/lib/kubelet/pods/64b0f79e-085c-4fa8-938d-888f5cea99e3/volumes" Dec 11 08:39:11 crc kubenswrapper[4881]: I1211 08:39:11.054982 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7eca07f6-40f2-48b2-90fe-b5b9b332707f" path="/var/lib/kubelet/pods/7eca07f6-40f2-48b2-90fe-b5b9b332707f/volumes" Dec 11 08:39:11 crc kubenswrapper[4881]: I1211 08:39:11.064008 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d5902bc2-d738-4bab-9b99-78b827e3a003" path="/var/lib/kubelet/pods/d5902bc2-d738-4bab-9b99-78b827e3a003/volumes" Dec 11 08:39:11 crc kubenswrapper[4881]: I1211 08:39:11.282553 4881 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/cinder-api-0" podUID="98c33c2d-b3e5-450d-8c52-544acac89c74" containerName="cinder-api" probeResult="failure" output="Get \"https://10.217.0.213:8776/healthcheck\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Dec 11 08:39:11 crc kubenswrapper[4881]: I1211 08:39:11.730605 4881 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Dec 11 08:39:11 crc kubenswrapper[4881]: I1211 08:39:11.730634 4881 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Dec 11 08:39:11 crc kubenswrapper[4881]: I1211 08:39:11.731833 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"15b38329-9fa4-4050-9d6a-ede621f328d3","Type":"ContainerStarted","Data":"a2b6c42ed8c1dc29ddab7b307ea617bb348133110913140690f678ec6d91bb27"} Dec 11 08:39:11 crc kubenswrapper[4881]: I1211 08:39:11.860238 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Dec 11 08:39:12 crc kubenswrapper[4881]: I1211 08:39:12.744856 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"15b38329-9fa4-4050-9d6a-ede621f328d3","Type":"ContainerStarted","Data":"60e7233e95eb3ea86f725b4455ff9c94c87306716c8baf259f33e2bf37729abf"} Dec 11 08:39:13 crc kubenswrapper[4881]: I1211 08:39:13.602674 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Dec 11 08:39:13 crc kubenswrapper[4881]: I1211 08:39:13.603235 4881 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Dec 11 08:39:13 crc kubenswrapper[4881]: I1211 08:39:13.606398 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Dec 11 08:39:13 crc kubenswrapper[4881]: I1211 08:39:13.811993 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/heat-engine-d56bd9469-fm4xb" Dec 11 08:39:13 crc kubenswrapper[4881]: I1211 08:39:13.911039 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-engine-557bc7cb47-8hntq"] Dec 11 08:39:13 crc kubenswrapper[4881]: I1211 08:39:13.911232 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/heat-engine-557bc7cb47-8hntq" podUID="41f75170-bc89-4911-a5f3-5456d3512897" containerName="heat-engine" containerID="cri-o://e984e439f24989cdf8f7b0f35994a1bb5660e2301c8cd791b8a8c90727ebe50f" gracePeriod=60 Dec 11 08:39:14 crc kubenswrapper[4881]: I1211 08:39:14.435692 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Dec 11 08:39:15 crc kubenswrapper[4881]: E1211 08:39:15.315422 4881 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="e984e439f24989cdf8f7b0f35994a1bb5660e2301c8cd791b8a8c90727ebe50f" cmd=["/usr/bin/pgrep","-r","DRST","heat-engine"] Dec 11 08:39:15 crc kubenswrapper[4881]: E1211 08:39:15.317848 4881 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="e984e439f24989cdf8f7b0f35994a1bb5660e2301c8cd791b8a8c90727ebe50f" cmd=["/usr/bin/pgrep","-r","DRST","heat-engine"] Dec 11 08:39:15 crc kubenswrapper[4881]: E1211 08:39:15.319299 4881 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="e984e439f24989cdf8f7b0f35994a1bb5660e2301c8cd791b8a8c90727ebe50f" cmd=["/usr/bin/pgrep","-r","DRST","heat-engine"] Dec 11 08:39:15 crc kubenswrapper[4881]: E1211 08:39:15.319435 4881 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/heat-engine-557bc7cb47-8hntq" podUID="41f75170-bc89-4911-a5f3-5456d3512897" containerName="heat-engine" Dec 11 08:39:15 crc kubenswrapper[4881]: I1211 08:39:15.416447 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/cinder-api-0" Dec 11 08:39:15 crc kubenswrapper[4881]: I1211 08:39:15.814420 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"15b38329-9fa4-4050-9d6a-ede621f328d3","Type":"ContainerStarted","Data":"7acdc684c0ea0f1890b6f885420013c1d97425c01c38ee013ba0e378dcb48d90"} Dec 11 08:39:16 crc kubenswrapper[4881]: I1211 08:39:16.276671 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-ea12-account-create-lwkvg"] Dec 11 08:39:16 crc kubenswrapper[4881]: I1211 08:39:16.278186 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-ea12-account-create-lwkvg" Dec 11 08:39:16 crc kubenswrapper[4881]: I1211 08:39:16.282902 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-db-secret" Dec 11 08:39:16 crc kubenswrapper[4881]: I1211 08:39:16.288561 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-ea12-account-create-lwkvg"] Dec 11 08:39:16 crc kubenswrapper[4881]: I1211 08:39:16.452373 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wrjn2\" (UniqueName: \"kubernetes.io/projected/f9014fc6-5e9d-4535-9961-e9cc2ab2f357-kube-api-access-wrjn2\") pod \"nova-api-ea12-account-create-lwkvg\" (UID: \"f9014fc6-5e9d-4535-9961-e9cc2ab2f357\") " pod="openstack/nova-api-ea12-account-create-lwkvg" Dec 11 08:39:16 crc kubenswrapper[4881]: I1211 08:39:16.458785 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-ebaf-account-create-f5l7l"] Dec 11 08:39:16 crc kubenswrapper[4881]: I1211 08:39:16.466178 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-ebaf-account-create-f5l7l" Dec 11 08:39:16 crc kubenswrapper[4881]: I1211 08:39:16.468865 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-db-secret" Dec 11 08:39:16 crc kubenswrapper[4881]: I1211 08:39:16.481963 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-ebaf-account-create-f5l7l"] Dec 11 08:39:16 crc kubenswrapper[4881]: I1211 08:39:16.554479 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wrjn2\" (UniqueName: \"kubernetes.io/projected/f9014fc6-5e9d-4535-9961-e9cc2ab2f357-kube-api-access-wrjn2\") pod \"nova-api-ea12-account-create-lwkvg\" (UID: \"f9014fc6-5e9d-4535-9961-e9cc2ab2f357\") " pod="openstack/nova-api-ea12-account-create-lwkvg" Dec 11 08:39:16 crc kubenswrapper[4881]: I1211 08:39:16.581211 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wrjn2\" (UniqueName: \"kubernetes.io/projected/f9014fc6-5e9d-4535-9961-e9cc2ab2f357-kube-api-access-wrjn2\") pod \"nova-api-ea12-account-create-lwkvg\" (UID: \"f9014fc6-5e9d-4535-9961-e9cc2ab2f357\") " pod="openstack/nova-api-ea12-account-create-lwkvg" Dec 11 08:39:16 crc kubenswrapper[4881]: I1211 08:39:16.596169 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-ea12-account-create-lwkvg" Dec 11 08:39:16 crc kubenswrapper[4881]: I1211 08:39:16.656656 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b29mw\" (UniqueName: \"kubernetes.io/projected/07a8ad09-4a57-4bec-bba2-682ce3e405b6-kube-api-access-b29mw\") pod \"nova-cell0-ebaf-account-create-f5l7l\" (UID: \"07a8ad09-4a57-4bec-bba2-682ce3e405b6\") " pod="openstack/nova-cell0-ebaf-account-create-f5l7l" Dec 11 08:39:16 crc kubenswrapper[4881]: I1211 08:39:16.694275 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-2f0d-account-create-tp8rt"] Dec 11 08:39:16 crc kubenswrapper[4881]: I1211 08:39:16.696107 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-2f0d-account-create-tp8rt" Dec 11 08:39:16 crc kubenswrapper[4881]: I1211 08:39:16.698392 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-db-secret" Dec 11 08:39:16 crc kubenswrapper[4881]: I1211 08:39:16.760832 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b29mw\" (UniqueName: \"kubernetes.io/projected/07a8ad09-4a57-4bec-bba2-682ce3e405b6-kube-api-access-b29mw\") pod \"nova-cell0-ebaf-account-create-f5l7l\" (UID: \"07a8ad09-4a57-4bec-bba2-682ce3e405b6\") " pod="openstack/nova-cell0-ebaf-account-create-f5l7l" Dec 11 08:39:16 crc kubenswrapper[4881]: I1211 08:39:16.796811 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-2f0d-account-create-tp8rt"] Dec 11 08:39:16 crc kubenswrapper[4881]: I1211 08:39:16.836875 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b29mw\" (UniqueName: \"kubernetes.io/projected/07a8ad09-4a57-4bec-bba2-682ce3e405b6-kube-api-access-b29mw\") pod \"nova-cell0-ebaf-account-create-f5l7l\" (UID: \"07a8ad09-4a57-4bec-bba2-682ce3e405b6\") " pod="openstack/nova-cell0-ebaf-account-create-f5l7l" Dec 11 08:39:16 crc kubenswrapper[4881]: I1211 08:39:16.987079 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fqsht\" (UniqueName: \"kubernetes.io/projected/90e20f17-6c3d-4da8-91d5-6a4fcbb8832d-kube-api-access-fqsht\") pod \"nova-cell1-2f0d-account-create-tp8rt\" (UID: \"90e20f17-6c3d-4da8-91d5-6a4fcbb8832d\") " pod="openstack/nova-cell1-2f0d-account-create-tp8rt" Dec 11 08:39:17 crc kubenswrapper[4881]: I1211 08:39:17.090757 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fqsht\" (UniqueName: \"kubernetes.io/projected/90e20f17-6c3d-4da8-91d5-6a4fcbb8832d-kube-api-access-fqsht\") pod \"nova-cell1-2f0d-account-create-tp8rt\" (UID: \"90e20f17-6c3d-4da8-91d5-6a4fcbb8832d\") " pod="openstack/nova-cell1-2f0d-account-create-tp8rt" Dec 11 08:39:17 crc kubenswrapper[4881]: I1211 08:39:17.112577 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fqsht\" (UniqueName: \"kubernetes.io/projected/90e20f17-6c3d-4da8-91d5-6a4fcbb8832d-kube-api-access-fqsht\") pod \"nova-cell1-2f0d-account-create-tp8rt\" (UID: \"90e20f17-6c3d-4da8-91d5-6a4fcbb8832d\") " pod="openstack/nova-cell1-2f0d-account-create-tp8rt" Dec 11 08:39:17 crc kubenswrapper[4881]: I1211 08:39:17.112893 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-ebaf-account-create-f5l7l" Dec 11 08:39:17 crc kubenswrapper[4881]: I1211 08:39:17.352010 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-2f0d-account-create-tp8rt" Dec 11 08:39:17 crc kubenswrapper[4881]: I1211 08:39:17.955293 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"15b38329-9fa4-4050-9d6a-ede621f328d3","Type":"ContainerStarted","Data":"02d606f3350fe1ed49418410ffd1561549070f3191c6b94f550b345f2273996e"} Dec 11 08:39:17 crc kubenswrapper[4881]: I1211 08:39:17.990543 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-ebaf-account-create-f5l7l"] Dec 11 08:39:18 crc kubenswrapper[4881]: I1211 08:39:18.006737 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-ea12-account-create-lwkvg"] Dec 11 08:39:18 crc kubenswrapper[4881]: I1211 08:39:18.967671 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-ea12-account-create-lwkvg" event={"ID":"f9014fc6-5e9d-4535-9961-e9cc2ab2f357","Type":"ContainerStarted","Data":"27a9a4ecf30fff34ab58001db8a3c4d373f2c3fd742c056e7b9d600f7abf87a9"} Dec 11 08:39:18 crc kubenswrapper[4881]: I1211 08:39:18.969236 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-ebaf-account-create-f5l7l" event={"ID":"07a8ad09-4a57-4bec-bba2-682ce3e405b6","Type":"ContainerStarted","Data":"1229a8590ed2c4c8b317b27e47ae0d8e0c4cf3cab32d237d3920fc00f524de30"} Dec 11 08:39:19 crc kubenswrapper[4881]: I1211 08:39:19.130907 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-2f0d-account-create-tp8rt"] Dec 11 08:39:19 crc kubenswrapper[4881]: I1211 08:39:19.983073 4881 generic.go:334] "Generic (PLEG): container finished" podID="f9014fc6-5e9d-4535-9961-e9cc2ab2f357" containerID="d1c391fe68303db94bb518644ce9868d9cdae96e140449fce263f67a00d21ef6" exitCode=0 Dec 11 08:39:19 crc kubenswrapper[4881]: I1211 08:39:19.983137 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-ea12-account-create-lwkvg" event={"ID":"f9014fc6-5e9d-4535-9961-e9cc2ab2f357","Type":"ContainerDied","Data":"d1c391fe68303db94bb518644ce9868d9cdae96e140449fce263f67a00d21ef6"} Dec 11 08:39:19 crc kubenswrapper[4881]: I1211 08:39:19.986206 4881 generic.go:334] "Generic (PLEG): container finished" podID="07a8ad09-4a57-4bec-bba2-682ce3e405b6" containerID="7b423d2507e65b7394dd7ac25996fb90ee191ddf5e87efc3a144549a1ecc7d2a" exitCode=0 Dec 11 08:39:19 crc kubenswrapper[4881]: I1211 08:39:19.986273 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-ebaf-account-create-f5l7l" event={"ID":"07a8ad09-4a57-4bec-bba2-682ce3e405b6","Type":"ContainerDied","Data":"7b423d2507e65b7394dd7ac25996fb90ee191ddf5e87efc3a144549a1ecc7d2a"} Dec 11 08:39:19 crc kubenswrapper[4881]: I1211 08:39:19.989191 4881 generic.go:334] "Generic (PLEG): container finished" podID="90e20f17-6c3d-4da8-91d5-6a4fcbb8832d" containerID="dad2be3c2ee493ae66d709cd28efe8b57e81164c9f30af2f89c5213a7a48c548" exitCode=0 Dec 11 08:39:19 crc kubenswrapper[4881]: I1211 08:39:19.989242 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-2f0d-account-create-tp8rt" event={"ID":"90e20f17-6c3d-4da8-91d5-6a4fcbb8832d","Type":"ContainerDied","Data":"dad2be3c2ee493ae66d709cd28efe8b57e81164c9f30af2f89c5213a7a48c548"} Dec 11 08:39:19 crc kubenswrapper[4881]: I1211 08:39:19.989263 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-2f0d-account-create-tp8rt" event={"ID":"90e20f17-6c3d-4da8-91d5-6a4fcbb8832d","Type":"ContainerStarted","Data":"16388f26eca11390caafcafae90e1d893a5962c6a3acd0dba327083cc63f7910"} Dec 11 08:39:21 crc kubenswrapper[4881]: I1211 08:39:21.001701 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"15b38329-9fa4-4050-9d6a-ede621f328d3","Type":"ContainerStarted","Data":"ea1b3e47edaf62f5639baf0e889363b705618499d6fc9552b6405907b6e14b51"} Dec 11 08:39:21 crc kubenswrapper[4881]: I1211 08:39:21.001866 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="15b38329-9fa4-4050-9d6a-ede621f328d3" containerName="ceilometer-central-agent" containerID="cri-o://60e7233e95eb3ea86f725b4455ff9c94c87306716c8baf259f33e2bf37729abf" gracePeriod=30 Dec 11 08:39:21 crc kubenswrapper[4881]: I1211 08:39:21.001956 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="15b38329-9fa4-4050-9d6a-ede621f328d3" containerName="sg-core" containerID="cri-o://02d606f3350fe1ed49418410ffd1561549070f3191c6b94f550b345f2273996e" gracePeriod=30 Dec 11 08:39:21 crc kubenswrapper[4881]: I1211 08:39:21.001974 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="15b38329-9fa4-4050-9d6a-ede621f328d3" containerName="ceilometer-notification-agent" containerID="cri-o://7acdc684c0ea0f1890b6f885420013c1d97425c01c38ee013ba0e378dcb48d90" gracePeriod=30 Dec 11 08:39:21 crc kubenswrapper[4881]: I1211 08:39:21.002130 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="15b38329-9fa4-4050-9d6a-ede621f328d3" containerName="proxy-httpd" containerID="cri-o://ea1b3e47edaf62f5639baf0e889363b705618499d6fc9552b6405907b6e14b51" gracePeriod=30 Dec 11 08:39:21 crc kubenswrapper[4881]: I1211 08:39:21.531823 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-2f0d-account-create-tp8rt" Dec 11 08:39:21 crc kubenswrapper[4881]: I1211 08:39:21.545692 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fqsht\" (UniqueName: \"kubernetes.io/projected/90e20f17-6c3d-4da8-91d5-6a4fcbb8832d-kube-api-access-fqsht\") pod \"90e20f17-6c3d-4da8-91d5-6a4fcbb8832d\" (UID: \"90e20f17-6c3d-4da8-91d5-6a4fcbb8832d\") " Dec 11 08:39:21 crc kubenswrapper[4881]: I1211 08:39:21.562532 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=3.796105363 podStartE2EDuration="12.56250692s" podCreationTimestamp="2025-12-11 08:39:09 +0000 UTC" firstStartedPulling="2025-12-11 08:39:10.928291994 +0000 UTC m=+1399.305660691" lastFinishedPulling="2025-12-11 08:39:19.694693551 +0000 UTC m=+1408.072062248" observedRunningTime="2025-12-11 08:39:21.033173492 +0000 UTC m=+1409.410542189" watchObservedRunningTime="2025-12-11 08:39:21.56250692 +0000 UTC m=+1409.939875637" Dec 11 08:39:21 crc kubenswrapper[4881]: I1211 08:39:21.577563 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/90e20f17-6c3d-4da8-91d5-6a4fcbb8832d-kube-api-access-fqsht" (OuterVolumeSpecName: "kube-api-access-fqsht") pod "90e20f17-6c3d-4da8-91d5-6a4fcbb8832d" (UID: "90e20f17-6c3d-4da8-91d5-6a4fcbb8832d"). InnerVolumeSpecName "kube-api-access-fqsht". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:39:21 crc kubenswrapper[4881]: I1211 08:39:21.655100 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fqsht\" (UniqueName: \"kubernetes.io/projected/90e20f17-6c3d-4da8-91d5-6a4fcbb8832d-kube-api-access-fqsht\") on node \"crc\" DevicePath \"\"" Dec 11 08:39:21 crc kubenswrapper[4881]: I1211 08:39:21.814495 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-ea12-account-create-lwkvg" Dec 11 08:39:21 crc kubenswrapper[4881]: I1211 08:39:21.859661 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wrjn2\" (UniqueName: \"kubernetes.io/projected/f9014fc6-5e9d-4535-9961-e9cc2ab2f357-kube-api-access-wrjn2\") pod \"f9014fc6-5e9d-4535-9961-e9cc2ab2f357\" (UID: \"f9014fc6-5e9d-4535-9961-e9cc2ab2f357\") " Dec 11 08:39:21 crc kubenswrapper[4881]: I1211 08:39:21.867511 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f9014fc6-5e9d-4535-9961-e9cc2ab2f357-kube-api-access-wrjn2" (OuterVolumeSpecName: "kube-api-access-wrjn2") pod "f9014fc6-5e9d-4535-9961-e9cc2ab2f357" (UID: "f9014fc6-5e9d-4535-9961-e9cc2ab2f357"). InnerVolumeSpecName "kube-api-access-wrjn2". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:39:21 crc kubenswrapper[4881]: I1211 08:39:21.934973 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-ebaf-account-create-f5l7l" Dec 11 08:39:21 crc kubenswrapper[4881]: I1211 08:39:21.961651 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-b29mw\" (UniqueName: \"kubernetes.io/projected/07a8ad09-4a57-4bec-bba2-682ce3e405b6-kube-api-access-b29mw\") pod \"07a8ad09-4a57-4bec-bba2-682ce3e405b6\" (UID: \"07a8ad09-4a57-4bec-bba2-682ce3e405b6\") " Dec 11 08:39:21 crc kubenswrapper[4881]: I1211 08:39:21.962255 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wrjn2\" (UniqueName: \"kubernetes.io/projected/f9014fc6-5e9d-4535-9961-e9cc2ab2f357-kube-api-access-wrjn2\") on node \"crc\" DevicePath \"\"" Dec 11 08:39:21 crc kubenswrapper[4881]: I1211 08:39:21.969522 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/07a8ad09-4a57-4bec-bba2-682ce3e405b6-kube-api-access-b29mw" (OuterVolumeSpecName: "kube-api-access-b29mw") pod "07a8ad09-4a57-4bec-bba2-682ce3e405b6" (UID: "07a8ad09-4a57-4bec-bba2-682ce3e405b6"). InnerVolumeSpecName "kube-api-access-b29mw". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:39:22 crc kubenswrapper[4881]: I1211 08:39:22.021163 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-2f0d-account-create-tp8rt" event={"ID":"90e20f17-6c3d-4da8-91d5-6a4fcbb8832d","Type":"ContainerDied","Data":"16388f26eca11390caafcafae90e1d893a5962c6a3acd0dba327083cc63f7910"} Dec 11 08:39:22 crc kubenswrapper[4881]: I1211 08:39:22.021204 4881 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="16388f26eca11390caafcafae90e1d893a5962c6a3acd0dba327083cc63f7910" Dec 11 08:39:22 crc kubenswrapper[4881]: I1211 08:39:22.021272 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-2f0d-account-create-tp8rt" Dec 11 08:39:22 crc kubenswrapper[4881]: I1211 08:39:22.026888 4881 generic.go:334] "Generic (PLEG): container finished" podID="15b38329-9fa4-4050-9d6a-ede621f328d3" containerID="ea1b3e47edaf62f5639baf0e889363b705618499d6fc9552b6405907b6e14b51" exitCode=0 Dec 11 08:39:22 crc kubenswrapper[4881]: I1211 08:39:22.026917 4881 generic.go:334] "Generic (PLEG): container finished" podID="15b38329-9fa4-4050-9d6a-ede621f328d3" containerID="02d606f3350fe1ed49418410ffd1561549070f3191c6b94f550b345f2273996e" exitCode=2 Dec 11 08:39:22 crc kubenswrapper[4881]: I1211 08:39:22.026926 4881 generic.go:334] "Generic (PLEG): container finished" podID="15b38329-9fa4-4050-9d6a-ede621f328d3" containerID="7acdc684c0ea0f1890b6f885420013c1d97425c01c38ee013ba0e378dcb48d90" exitCode=0 Dec 11 08:39:22 crc kubenswrapper[4881]: I1211 08:39:22.026932 4881 generic.go:334] "Generic (PLEG): container finished" podID="15b38329-9fa4-4050-9d6a-ede621f328d3" containerID="60e7233e95eb3ea86f725b4455ff9c94c87306716c8baf259f33e2bf37729abf" exitCode=0 Dec 11 08:39:22 crc kubenswrapper[4881]: I1211 08:39:22.026978 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"15b38329-9fa4-4050-9d6a-ede621f328d3","Type":"ContainerDied","Data":"ea1b3e47edaf62f5639baf0e889363b705618499d6fc9552b6405907b6e14b51"} Dec 11 08:39:22 crc kubenswrapper[4881]: I1211 08:39:22.027006 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"15b38329-9fa4-4050-9d6a-ede621f328d3","Type":"ContainerDied","Data":"02d606f3350fe1ed49418410ffd1561549070f3191c6b94f550b345f2273996e"} Dec 11 08:39:22 crc kubenswrapper[4881]: I1211 08:39:22.027018 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"15b38329-9fa4-4050-9d6a-ede621f328d3","Type":"ContainerDied","Data":"7acdc684c0ea0f1890b6f885420013c1d97425c01c38ee013ba0e378dcb48d90"} Dec 11 08:39:22 crc kubenswrapper[4881]: I1211 08:39:22.027026 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"15b38329-9fa4-4050-9d6a-ede621f328d3","Type":"ContainerDied","Data":"60e7233e95eb3ea86f725b4455ff9c94c87306716c8baf259f33e2bf37729abf"} Dec 11 08:39:22 crc kubenswrapper[4881]: I1211 08:39:22.036974 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-ea12-account-create-lwkvg" event={"ID":"f9014fc6-5e9d-4535-9961-e9cc2ab2f357","Type":"ContainerDied","Data":"27a9a4ecf30fff34ab58001db8a3c4d373f2c3fd742c056e7b9d600f7abf87a9"} Dec 11 08:39:22 crc kubenswrapper[4881]: I1211 08:39:22.037020 4881 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="27a9a4ecf30fff34ab58001db8a3c4d373f2c3fd742c056e7b9d600f7abf87a9" Dec 11 08:39:22 crc kubenswrapper[4881]: I1211 08:39:22.037076 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-ea12-account-create-lwkvg" Dec 11 08:39:22 crc kubenswrapper[4881]: I1211 08:39:22.051526 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-ebaf-account-create-f5l7l" event={"ID":"07a8ad09-4a57-4bec-bba2-682ce3e405b6","Type":"ContainerDied","Data":"1229a8590ed2c4c8b317b27e47ae0d8e0c4cf3cab32d237d3920fc00f524de30"} Dec 11 08:39:22 crc kubenswrapper[4881]: I1211 08:39:22.051594 4881 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1229a8590ed2c4c8b317b27e47ae0d8e0c4cf3cab32d237d3920fc00f524de30" Dec 11 08:39:22 crc kubenswrapper[4881]: I1211 08:39:22.051793 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-ebaf-account-create-f5l7l" Dec 11 08:39:22 crc kubenswrapper[4881]: I1211 08:39:22.064639 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-b29mw\" (UniqueName: \"kubernetes.io/projected/07a8ad09-4a57-4bec-bba2-682ce3e405b6-kube-api-access-b29mw\") on node \"crc\" DevicePath \"\"" Dec 11 08:39:23 crc kubenswrapper[4881]: I1211 08:39:23.183648 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 11 08:39:23 crc kubenswrapper[4881]: I1211 08:39:23.290580 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/15b38329-9fa4-4050-9d6a-ede621f328d3-sg-core-conf-yaml\") pod \"15b38329-9fa4-4050-9d6a-ede621f328d3\" (UID: \"15b38329-9fa4-4050-9d6a-ede621f328d3\") " Dec 11 08:39:23 crc kubenswrapper[4881]: I1211 08:39:23.290717 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/15b38329-9fa4-4050-9d6a-ede621f328d3-combined-ca-bundle\") pod \"15b38329-9fa4-4050-9d6a-ede621f328d3\" (UID: \"15b38329-9fa4-4050-9d6a-ede621f328d3\") " Dec 11 08:39:23 crc kubenswrapper[4881]: I1211 08:39:23.290754 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jgjch\" (UniqueName: \"kubernetes.io/projected/15b38329-9fa4-4050-9d6a-ede621f328d3-kube-api-access-jgjch\") pod \"15b38329-9fa4-4050-9d6a-ede621f328d3\" (UID: \"15b38329-9fa4-4050-9d6a-ede621f328d3\") " Dec 11 08:39:23 crc kubenswrapper[4881]: I1211 08:39:23.290791 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/15b38329-9fa4-4050-9d6a-ede621f328d3-config-data\") pod \"15b38329-9fa4-4050-9d6a-ede621f328d3\" (UID: \"15b38329-9fa4-4050-9d6a-ede621f328d3\") " Dec 11 08:39:23 crc kubenswrapper[4881]: I1211 08:39:23.290832 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/15b38329-9fa4-4050-9d6a-ede621f328d3-scripts\") pod \"15b38329-9fa4-4050-9d6a-ede621f328d3\" (UID: \"15b38329-9fa4-4050-9d6a-ede621f328d3\") " Dec 11 08:39:23 crc kubenswrapper[4881]: I1211 08:39:23.290860 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/15b38329-9fa4-4050-9d6a-ede621f328d3-log-httpd\") pod \"15b38329-9fa4-4050-9d6a-ede621f328d3\" (UID: \"15b38329-9fa4-4050-9d6a-ede621f328d3\") " Dec 11 08:39:23 crc kubenswrapper[4881]: I1211 08:39:23.290910 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/15b38329-9fa4-4050-9d6a-ede621f328d3-run-httpd\") pod \"15b38329-9fa4-4050-9d6a-ede621f328d3\" (UID: \"15b38329-9fa4-4050-9d6a-ede621f328d3\") " Dec 11 08:39:23 crc kubenswrapper[4881]: I1211 08:39:23.291408 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/15b38329-9fa4-4050-9d6a-ede621f328d3-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "15b38329-9fa4-4050-9d6a-ede621f328d3" (UID: "15b38329-9fa4-4050-9d6a-ede621f328d3"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 08:39:23 crc kubenswrapper[4881]: I1211 08:39:23.291630 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/15b38329-9fa4-4050-9d6a-ede621f328d3-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "15b38329-9fa4-4050-9d6a-ede621f328d3" (UID: "15b38329-9fa4-4050-9d6a-ede621f328d3"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 08:39:23 crc kubenswrapper[4881]: I1211 08:39:23.296999 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/15b38329-9fa4-4050-9d6a-ede621f328d3-scripts" (OuterVolumeSpecName: "scripts") pod "15b38329-9fa4-4050-9d6a-ede621f328d3" (UID: "15b38329-9fa4-4050-9d6a-ede621f328d3"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:39:23 crc kubenswrapper[4881]: I1211 08:39:23.299973 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/15b38329-9fa4-4050-9d6a-ede621f328d3-kube-api-access-jgjch" (OuterVolumeSpecName: "kube-api-access-jgjch") pod "15b38329-9fa4-4050-9d6a-ede621f328d3" (UID: "15b38329-9fa4-4050-9d6a-ede621f328d3"). InnerVolumeSpecName "kube-api-access-jgjch". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:39:23 crc kubenswrapper[4881]: I1211 08:39:23.349789 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/15b38329-9fa4-4050-9d6a-ede621f328d3-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "15b38329-9fa4-4050-9d6a-ede621f328d3" (UID: "15b38329-9fa4-4050-9d6a-ede621f328d3"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:39:23 crc kubenswrapper[4881]: I1211 08:39:23.397763 4881 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/15b38329-9fa4-4050-9d6a-ede621f328d3-scripts\") on node \"crc\" DevicePath \"\"" Dec 11 08:39:23 crc kubenswrapper[4881]: I1211 08:39:23.397792 4881 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/15b38329-9fa4-4050-9d6a-ede621f328d3-log-httpd\") on node \"crc\" DevicePath \"\"" Dec 11 08:39:23 crc kubenswrapper[4881]: I1211 08:39:23.397802 4881 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/15b38329-9fa4-4050-9d6a-ede621f328d3-run-httpd\") on node \"crc\" DevicePath \"\"" Dec 11 08:39:23 crc kubenswrapper[4881]: I1211 08:39:23.397815 4881 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/15b38329-9fa4-4050-9d6a-ede621f328d3-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Dec 11 08:39:23 crc kubenswrapper[4881]: I1211 08:39:23.397951 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jgjch\" (UniqueName: \"kubernetes.io/projected/15b38329-9fa4-4050-9d6a-ede621f328d3-kube-api-access-jgjch\") on node \"crc\" DevicePath \"\"" Dec 11 08:39:23 crc kubenswrapper[4881]: I1211 08:39:23.442806 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/15b38329-9fa4-4050-9d6a-ede621f328d3-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "15b38329-9fa4-4050-9d6a-ede621f328d3" (UID: "15b38329-9fa4-4050-9d6a-ede621f328d3"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:39:23 crc kubenswrapper[4881]: I1211 08:39:23.454791 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/15b38329-9fa4-4050-9d6a-ede621f328d3-config-data" (OuterVolumeSpecName: "config-data") pod "15b38329-9fa4-4050-9d6a-ede621f328d3" (UID: "15b38329-9fa4-4050-9d6a-ede621f328d3"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:39:23 crc kubenswrapper[4881]: I1211 08:39:23.500615 4881 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/15b38329-9fa4-4050-9d6a-ede621f328d3-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 11 08:39:23 crc kubenswrapper[4881]: I1211 08:39:23.500647 4881 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/15b38329-9fa4-4050-9d6a-ede621f328d3-config-data\") on node \"crc\" DevicePath \"\"" Dec 11 08:39:24 crc kubenswrapper[4881]: I1211 08:39:24.075547 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"15b38329-9fa4-4050-9d6a-ede621f328d3","Type":"ContainerDied","Data":"a2b6c42ed8c1dc29ddab7b307ea617bb348133110913140690f678ec6d91bb27"} Dec 11 08:39:24 crc kubenswrapper[4881]: I1211 08:39:24.075894 4881 scope.go:117] "RemoveContainer" containerID="ea1b3e47edaf62f5639baf0e889363b705618499d6fc9552b6405907b6e14b51" Dec 11 08:39:24 crc kubenswrapper[4881]: I1211 08:39:24.075824 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 11 08:39:24 crc kubenswrapper[4881]: I1211 08:39:24.113237 4881 scope.go:117] "RemoveContainer" containerID="02d606f3350fe1ed49418410ffd1561549070f3191c6b94f550b345f2273996e" Dec 11 08:39:24 crc kubenswrapper[4881]: I1211 08:39:24.122381 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Dec 11 08:39:24 crc kubenswrapper[4881]: I1211 08:39:24.151796 4881 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Dec 11 08:39:24 crc kubenswrapper[4881]: I1211 08:39:24.156895 4881 scope.go:117] "RemoveContainer" containerID="7acdc684c0ea0f1890b6f885420013c1d97425c01c38ee013ba0e378dcb48d90" Dec 11 08:39:24 crc kubenswrapper[4881]: I1211 08:39:24.171884 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Dec 11 08:39:24 crc kubenswrapper[4881]: E1211 08:39:24.172490 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="15b38329-9fa4-4050-9d6a-ede621f328d3" containerName="ceilometer-notification-agent" Dec 11 08:39:24 crc kubenswrapper[4881]: I1211 08:39:24.172510 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="15b38329-9fa4-4050-9d6a-ede621f328d3" containerName="ceilometer-notification-agent" Dec 11 08:39:24 crc kubenswrapper[4881]: E1211 08:39:24.172521 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="15b38329-9fa4-4050-9d6a-ede621f328d3" containerName="sg-core" Dec 11 08:39:24 crc kubenswrapper[4881]: I1211 08:39:24.172527 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="15b38329-9fa4-4050-9d6a-ede621f328d3" containerName="sg-core" Dec 11 08:39:24 crc kubenswrapper[4881]: E1211 08:39:24.172541 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="90e20f17-6c3d-4da8-91d5-6a4fcbb8832d" containerName="mariadb-account-create" Dec 11 08:39:24 crc kubenswrapper[4881]: I1211 08:39:24.172551 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="90e20f17-6c3d-4da8-91d5-6a4fcbb8832d" containerName="mariadb-account-create" Dec 11 08:39:24 crc kubenswrapper[4881]: E1211 08:39:24.172582 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="15b38329-9fa4-4050-9d6a-ede621f328d3" containerName="ceilometer-central-agent" Dec 11 08:39:24 crc kubenswrapper[4881]: I1211 08:39:24.172595 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="15b38329-9fa4-4050-9d6a-ede621f328d3" containerName="ceilometer-central-agent" Dec 11 08:39:24 crc kubenswrapper[4881]: E1211 08:39:24.172609 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="07a8ad09-4a57-4bec-bba2-682ce3e405b6" containerName="mariadb-account-create" Dec 11 08:39:24 crc kubenswrapper[4881]: I1211 08:39:24.172615 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="07a8ad09-4a57-4bec-bba2-682ce3e405b6" containerName="mariadb-account-create" Dec 11 08:39:24 crc kubenswrapper[4881]: E1211 08:39:24.172629 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f9014fc6-5e9d-4535-9961-e9cc2ab2f357" containerName="mariadb-account-create" Dec 11 08:39:24 crc kubenswrapper[4881]: I1211 08:39:24.172635 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="f9014fc6-5e9d-4535-9961-e9cc2ab2f357" containerName="mariadb-account-create" Dec 11 08:39:24 crc kubenswrapper[4881]: E1211 08:39:24.172650 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="15b38329-9fa4-4050-9d6a-ede621f328d3" containerName="proxy-httpd" Dec 11 08:39:24 crc kubenswrapper[4881]: I1211 08:39:24.172656 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="15b38329-9fa4-4050-9d6a-ede621f328d3" containerName="proxy-httpd" Dec 11 08:39:24 crc kubenswrapper[4881]: I1211 08:39:24.172861 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="15b38329-9fa4-4050-9d6a-ede621f328d3" containerName="ceilometer-notification-agent" Dec 11 08:39:24 crc kubenswrapper[4881]: I1211 08:39:24.172876 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="90e20f17-6c3d-4da8-91d5-6a4fcbb8832d" containerName="mariadb-account-create" Dec 11 08:39:24 crc kubenswrapper[4881]: I1211 08:39:24.172894 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="15b38329-9fa4-4050-9d6a-ede621f328d3" containerName="ceilometer-central-agent" Dec 11 08:39:24 crc kubenswrapper[4881]: I1211 08:39:24.172902 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="15b38329-9fa4-4050-9d6a-ede621f328d3" containerName="sg-core" Dec 11 08:39:24 crc kubenswrapper[4881]: I1211 08:39:24.172915 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="07a8ad09-4a57-4bec-bba2-682ce3e405b6" containerName="mariadb-account-create" Dec 11 08:39:24 crc kubenswrapper[4881]: I1211 08:39:24.172926 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="f9014fc6-5e9d-4535-9961-e9cc2ab2f357" containerName="mariadb-account-create" Dec 11 08:39:24 crc kubenswrapper[4881]: I1211 08:39:24.172938 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="15b38329-9fa4-4050-9d6a-ede621f328d3" containerName="proxy-httpd" Dec 11 08:39:24 crc kubenswrapper[4881]: I1211 08:39:24.175059 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 11 08:39:24 crc kubenswrapper[4881]: I1211 08:39:24.178001 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Dec 11 08:39:24 crc kubenswrapper[4881]: I1211 08:39:24.178198 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Dec 11 08:39:24 crc kubenswrapper[4881]: I1211 08:39:24.185197 4881 scope.go:117] "RemoveContainer" containerID="60e7233e95eb3ea86f725b4455ff9c94c87306716c8baf259f33e2bf37729abf" Dec 11 08:39:24 crc kubenswrapper[4881]: I1211 08:39:24.196931 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Dec 11 08:39:24 crc kubenswrapper[4881]: I1211 08:39:24.319189 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/fd2a3a77-7cef-426e-a49c-939df192076b-run-httpd\") pod \"ceilometer-0\" (UID: \"fd2a3a77-7cef-426e-a49c-939df192076b\") " pod="openstack/ceilometer-0" Dec 11 08:39:24 crc kubenswrapper[4881]: I1211 08:39:24.319401 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fd2a3a77-7cef-426e-a49c-939df192076b-scripts\") pod \"ceilometer-0\" (UID: \"fd2a3a77-7cef-426e-a49c-939df192076b\") " pod="openstack/ceilometer-0" Dec 11 08:39:24 crc kubenswrapper[4881]: I1211 08:39:24.319469 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/fd2a3a77-7cef-426e-a49c-939df192076b-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"fd2a3a77-7cef-426e-a49c-939df192076b\") " pod="openstack/ceilometer-0" Dec 11 08:39:24 crc kubenswrapper[4881]: I1211 08:39:24.319660 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fd2a3a77-7cef-426e-a49c-939df192076b-config-data\") pod \"ceilometer-0\" (UID: \"fd2a3a77-7cef-426e-a49c-939df192076b\") " pod="openstack/ceilometer-0" Dec 11 08:39:24 crc kubenswrapper[4881]: I1211 08:39:24.319726 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/fd2a3a77-7cef-426e-a49c-939df192076b-log-httpd\") pod \"ceilometer-0\" (UID: \"fd2a3a77-7cef-426e-a49c-939df192076b\") " pod="openstack/ceilometer-0" Dec 11 08:39:24 crc kubenswrapper[4881]: I1211 08:39:24.319819 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fd2a3a77-7cef-426e-a49c-939df192076b-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"fd2a3a77-7cef-426e-a49c-939df192076b\") " pod="openstack/ceilometer-0" Dec 11 08:39:24 crc kubenswrapper[4881]: I1211 08:39:24.319920 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ln62k\" (UniqueName: \"kubernetes.io/projected/fd2a3a77-7cef-426e-a49c-939df192076b-kube-api-access-ln62k\") pod \"ceilometer-0\" (UID: \"fd2a3a77-7cef-426e-a49c-939df192076b\") " pod="openstack/ceilometer-0" Dec 11 08:39:24 crc kubenswrapper[4881]: I1211 08:39:24.422106 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fd2a3a77-7cef-426e-a49c-939df192076b-scripts\") pod \"ceilometer-0\" (UID: \"fd2a3a77-7cef-426e-a49c-939df192076b\") " pod="openstack/ceilometer-0" Dec 11 08:39:24 crc kubenswrapper[4881]: I1211 08:39:24.422509 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/fd2a3a77-7cef-426e-a49c-939df192076b-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"fd2a3a77-7cef-426e-a49c-939df192076b\") " pod="openstack/ceilometer-0" Dec 11 08:39:24 crc kubenswrapper[4881]: I1211 08:39:24.422604 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fd2a3a77-7cef-426e-a49c-939df192076b-config-data\") pod \"ceilometer-0\" (UID: \"fd2a3a77-7cef-426e-a49c-939df192076b\") " pod="openstack/ceilometer-0" Dec 11 08:39:24 crc kubenswrapper[4881]: I1211 08:39:24.422636 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/fd2a3a77-7cef-426e-a49c-939df192076b-log-httpd\") pod \"ceilometer-0\" (UID: \"fd2a3a77-7cef-426e-a49c-939df192076b\") " pod="openstack/ceilometer-0" Dec 11 08:39:24 crc kubenswrapper[4881]: I1211 08:39:24.422679 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fd2a3a77-7cef-426e-a49c-939df192076b-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"fd2a3a77-7cef-426e-a49c-939df192076b\") " pod="openstack/ceilometer-0" Dec 11 08:39:24 crc kubenswrapper[4881]: I1211 08:39:24.422730 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ln62k\" (UniqueName: \"kubernetes.io/projected/fd2a3a77-7cef-426e-a49c-939df192076b-kube-api-access-ln62k\") pod \"ceilometer-0\" (UID: \"fd2a3a77-7cef-426e-a49c-939df192076b\") " pod="openstack/ceilometer-0" Dec 11 08:39:24 crc kubenswrapper[4881]: I1211 08:39:24.422877 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/fd2a3a77-7cef-426e-a49c-939df192076b-run-httpd\") pod \"ceilometer-0\" (UID: \"fd2a3a77-7cef-426e-a49c-939df192076b\") " pod="openstack/ceilometer-0" Dec 11 08:39:24 crc kubenswrapper[4881]: I1211 08:39:24.423069 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/fd2a3a77-7cef-426e-a49c-939df192076b-log-httpd\") pod \"ceilometer-0\" (UID: \"fd2a3a77-7cef-426e-a49c-939df192076b\") " pod="openstack/ceilometer-0" Dec 11 08:39:24 crc kubenswrapper[4881]: I1211 08:39:24.423206 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/fd2a3a77-7cef-426e-a49c-939df192076b-run-httpd\") pod \"ceilometer-0\" (UID: \"fd2a3a77-7cef-426e-a49c-939df192076b\") " pod="openstack/ceilometer-0" Dec 11 08:39:24 crc kubenswrapper[4881]: I1211 08:39:24.435573 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fd2a3a77-7cef-426e-a49c-939df192076b-scripts\") pod \"ceilometer-0\" (UID: \"fd2a3a77-7cef-426e-a49c-939df192076b\") " pod="openstack/ceilometer-0" Dec 11 08:39:24 crc kubenswrapper[4881]: I1211 08:39:24.435858 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/fd2a3a77-7cef-426e-a49c-939df192076b-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"fd2a3a77-7cef-426e-a49c-939df192076b\") " pod="openstack/ceilometer-0" Dec 11 08:39:24 crc kubenswrapper[4881]: I1211 08:39:24.435934 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fd2a3a77-7cef-426e-a49c-939df192076b-config-data\") pod \"ceilometer-0\" (UID: \"fd2a3a77-7cef-426e-a49c-939df192076b\") " pod="openstack/ceilometer-0" Dec 11 08:39:24 crc kubenswrapper[4881]: I1211 08:39:24.439499 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fd2a3a77-7cef-426e-a49c-939df192076b-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"fd2a3a77-7cef-426e-a49c-939df192076b\") " pod="openstack/ceilometer-0" Dec 11 08:39:24 crc kubenswrapper[4881]: I1211 08:39:24.445122 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ln62k\" (UniqueName: \"kubernetes.io/projected/fd2a3a77-7cef-426e-a49c-939df192076b-kube-api-access-ln62k\") pod \"ceilometer-0\" (UID: \"fd2a3a77-7cef-426e-a49c-939df192076b\") " pod="openstack/ceilometer-0" Dec 11 08:39:24 crc kubenswrapper[4881]: I1211 08:39:24.540749 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 11 08:39:25 crc kubenswrapper[4881]: I1211 08:39:25.018598 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="15b38329-9fa4-4050-9d6a-ede621f328d3" path="/var/lib/kubelet/pods/15b38329-9fa4-4050-9d6a-ede621f328d3/volumes" Dec 11 08:39:25 crc kubenswrapper[4881]: W1211 08:39:25.201763 4881 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podfd2a3a77_7cef_426e_a49c_939df192076b.slice/crio-a92ee765b461f7336bad6d8df9065cfad3892a3c38ab4fc55c4e080904ac17df WatchSource:0}: Error finding container a92ee765b461f7336bad6d8df9065cfad3892a3c38ab4fc55c4e080904ac17df: Status 404 returned error can't find the container with id a92ee765b461f7336bad6d8df9065cfad3892a3c38ab4fc55c4e080904ac17df Dec 11 08:39:25 crc kubenswrapper[4881]: I1211 08:39:25.211355 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Dec 11 08:39:25 crc kubenswrapper[4881]: E1211 08:39:25.324590 4881 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="e984e439f24989cdf8f7b0f35994a1bb5660e2301c8cd791b8a8c90727ebe50f" cmd=["/usr/bin/pgrep","-r","DRST","heat-engine"] Dec 11 08:39:25 crc kubenswrapper[4881]: E1211 08:39:25.327056 4881 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="e984e439f24989cdf8f7b0f35994a1bb5660e2301c8cd791b8a8c90727ebe50f" cmd=["/usr/bin/pgrep","-r","DRST","heat-engine"] Dec 11 08:39:25 crc kubenswrapper[4881]: E1211 08:39:25.328291 4881 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="e984e439f24989cdf8f7b0f35994a1bb5660e2301c8cd791b8a8c90727ebe50f" cmd=["/usr/bin/pgrep","-r","DRST","heat-engine"] Dec 11 08:39:25 crc kubenswrapper[4881]: E1211 08:39:25.328537 4881 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/heat-engine-557bc7cb47-8hntq" podUID="41f75170-bc89-4911-a5f3-5456d3512897" containerName="heat-engine" Dec 11 08:39:25 crc kubenswrapper[4881]: I1211 08:39:25.483437 4881 pod_container_manager_linux.go:210] "Failed to delete cgroup paths" cgroupName=["kubepods","besteffort","podedab6e89-cb03-44e6-b511-d64ba764b857"] err="unable to destroy cgroup paths for cgroup [kubepods besteffort podedab6e89-cb03-44e6-b511-d64ba764b857] : Timed out while waiting for systemd to remove kubepods-besteffort-podedab6e89_cb03_44e6_b511_d64ba764b857.slice" Dec 11 08:39:25 crc kubenswrapper[4881]: E1211 08:39:25.483493 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to delete cgroup paths for [kubepods besteffort podedab6e89-cb03-44e6-b511-d64ba764b857] : unable to destroy cgroup paths for cgroup [kubepods besteffort podedab6e89-cb03-44e6-b511-d64ba764b857] : Timed out while waiting for systemd to remove kubepods-besteffort-podedab6e89_cb03_44e6_b511_d64ba764b857.slice" pod="openstack/dnsmasq-dns-75c8ddd69c-js2tw" podUID="edab6e89-cb03-44e6-b511-d64ba764b857" Dec 11 08:39:26 crc kubenswrapper[4881]: I1211 08:39:26.099221 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-75c8ddd69c-js2tw" Dec 11 08:39:26 crc kubenswrapper[4881]: I1211 08:39:26.100195 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"fd2a3a77-7cef-426e-a49c-939df192076b","Type":"ContainerStarted","Data":"a92ee765b461f7336bad6d8df9065cfad3892a3c38ab4fc55c4e080904ac17df"} Dec 11 08:39:26 crc kubenswrapper[4881]: I1211 08:39:26.151181 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-75c8ddd69c-js2tw"] Dec 11 08:39:26 crc kubenswrapper[4881]: I1211 08:39:26.163580 4881 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-75c8ddd69c-js2tw"] Dec 11 08:39:26 crc kubenswrapper[4881]: I1211 08:39:26.762606 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-db-sync-6jtqx"] Dec 11 08:39:26 crc kubenswrapper[4881]: I1211 08:39:26.764844 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-6jtqx" Dec 11 08:39:26 crc kubenswrapper[4881]: I1211 08:39:26.767194 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Dec 11 08:39:26 crc kubenswrapper[4881]: I1211 08:39:26.769642 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-vzncb" Dec 11 08:39:26 crc kubenswrapper[4881]: I1211 08:39:26.774046 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-scripts" Dec 11 08:39:26 crc kubenswrapper[4881]: I1211 08:39:26.775937 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/87e2a6fa-8d80-4bde-9f33-fe0d4f34d933-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-6jtqx\" (UID: \"87e2a6fa-8d80-4bde-9f33-fe0d4f34d933\") " pod="openstack/nova-cell0-conductor-db-sync-6jtqx" Dec 11 08:39:26 crc kubenswrapper[4881]: I1211 08:39:26.776212 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/87e2a6fa-8d80-4bde-9f33-fe0d4f34d933-config-data\") pod \"nova-cell0-conductor-db-sync-6jtqx\" (UID: \"87e2a6fa-8d80-4bde-9f33-fe0d4f34d933\") " pod="openstack/nova-cell0-conductor-db-sync-6jtqx" Dec 11 08:39:26 crc kubenswrapper[4881]: I1211 08:39:26.776425 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/87e2a6fa-8d80-4bde-9f33-fe0d4f34d933-scripts\") pod \"nova-cell0-conductor-db-sync-6jtqx\" (UID: \"87e2a6fa-8d80-4bde-9f33-fe0d4f34d933\") " pod="openstack/nova-cell0-conductor-db-sync-6jtqx" Dec 11 08:39:26 crc kubenswrapper[4881]: I1211 08:39:26.776526 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l8q7f\" (UniqueName: \"kubernetes.io/projected/87e2a6fa-8d80-4bde-9f33-fe0d4f34d933-kube-api-access-l8q7f\") pod \"nova-cell0-conductor-db-sync-6jtqx\" (UID: \"87e2a6fa-8d80-4bde-9f33-fe0d4f34d933\") " pod="openstack/nova-cell0-conductor-db-sync-6jtqx" Dec 11 08:39:26 crc kubenswrapper[4881]: I1211 08:39:26.779919 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-6jtqx"] Dec 11 08:39:26 crc kubenswrapper[4881]: I1211 08:39:26.879141 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/87e2a6fa-8d80-4bde-9f33-fe0d4f34d933-config-data\") pod \"nova-cell0-conductor-db-sync-6jtqx\" (UID: \"87e2a6fa-8d80-4bde-9f33-fe0d4f34d933\") " pod="openstack/nova-cell0-conductor-db-sync-6jtqx" Dec 11 08:39:26 crc kubenswrapper[4881]: I1211 08:39:26.879236 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/87e2a6fa-8d80-4bde-9f33-fe0d4f34d933-scripts\") pod \"nova-cell0-conductor-db-sync-6jtqx\" (UID: \"87e2a6fa-8d80-4bde-9f33-fe0d4f34d933\") " pod="openstack/nova-cell0-conductor-db-sync-6jtqx" Dec 11 08:39:26 crc kubenswrapper[4881]: I1211 08:39:26.879284 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l8q7f\" (UniqueName: \"kubernetes.io/projected/87e2a6fa-8d80-4bde-9f33-fe0d4f34d933-kube-api-access-l8q7f\") pod \"nova-cell0-conductor-db-sync-6jtqx\" (UID: \"87e2a6fa-8d80-4bde-9f33-fe0d4f34d933\") " pod="openstack/nova-cell0-conductor-db-sync-6jtqx" Dec 11 08:39:26 crc kubenswrapper[4881]: I1211 08:39:26.879371 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/87e2a6fa-8d80-4bde-9f33-fe0d4f34d933-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-6jtqx\" (UID: \"87e2a6fa-8d80-4bde-9f33-fe0d4f34d933\") " pod="openstack/nova-cell0-conductor-db-sync-6jtqx" Dec 11 08:39:26 crc kubenswrapper[4881]: I1211 08:39:26.885184 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/87e2a6fa-8d80-4bde-9f33-fe0d4f34d933-config-data\") pod \"nova-cell0-conductor-db-sync-6jtqx\" (UID: \"87e2a6fa-8d80-4bde-9f33-fe0d4f34d933\") " pod="openstack/nova-cell0-conductor-db-sync-6jtqx" Dec 11 08:39:26 crc kubenswrapper[4881]: I1211 08:39:26.885207 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/87e2a6fa-8d80-4bde-9f33-fe0d4f34d933-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-6jtqx\" (UID: \"87e2a6fa-8d80-4bde-9f33-fe0d4f34d933\") " pod="openstack/nova-cell0-conductor-db-sync-6jtqx" Dec 11 08:39:26 crc kubenswrapper[4881]: I1211 08:39:26.896941 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/87e2a6fa-8d80-4bde-9f33-fe0d4f34d933-scripts\") pod \"nova-cell0-conductor-db-sync-6jtqx\" (UID: \"87e2a6fa-8d80-4bde-9f33-fe0d4f34d933\") " pod="openstack/nova-cell0-conductor-db-sync-6jtqx" Dec 11 08:39:26 crc kubenswrapper[4881]: I1211 08:39:26.902487 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l8q7f\" (UniqueName: \"kubernetes.io/projected/87e2a6fa-8d80-4bde-9f33-fe0d4f34d933-kube-api-access-l8q7f\") pod \"nova-cell0-conductor-db-sync-6jtqx\" (UID: \"87e2a6fa-8d80-4bde-9f33-fe0d4f34d933\") " pod="openstack/nova-cell0-conductor-db-sync-6jtqx" Dec 11 08:39:27 crc kubenswrapper[4881]: I1211 08:39:27.018969 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="edab6e89-cb03-44e6-b511-d64ba764b857" path="/var/lib/kubelet/pods/edab6e89-cb03-44e6-b511-d64ba764b857/volumes" Dec 11 08:39:27 crc kubenswrapper[4881]: I1211 08:39:27.091720 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-6jtqx" Dec 11 08:39:29 crc kubenswrapper[4881]: I1211 08:39:29.397222 4881 patch_prober.go:28] interesting pod/machine-config-daemon-z9nnh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 11 08:39:29 crc kubenswrapper[4881]: I1211 08:39:29.397871 4881 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 11 08:39:30 crc kubenswrapper[4881]: I1211 08:39:30.870165 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-6jtqx"] Dec 11 08:39:31 crc kubenswrapper[4881]: I1211 08:39:31.152904 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"fd2a3a77-7cef-426e-a49c-939df192076b","Type":"ContainerStarted","Data":"913c623aed0e1abb5e5b51c361702f7e2bf90a58cd2bc66cb4bbc64cdb00bc76"} Dec 11 08:39:31 crc kubenswrapper[4881]: I1211 08:39:31.154889 4881 generic.go:334] "Generic (PLEG): container finished" podID="41f75170-bc89-4911-a5f3-5456d3512897" containerID="e984e439f24989cdf8f7b0f35994a1bb5660e2301c8cd791b8a8c90727ebe50f" exitCode=0 Dec 11 08:39:31 crc kubenswrapper[4881]: I1211 08:39:31.154966 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-engine-557bc7cb47-8hntq" event={"ID":"41f75170-bc89-4911-a5f3-5456d3512897","Type":"ContainerDied","Data":"e984e439f24989cdf8f7b0f35994a1bb5660e2301c8cd791b8a8c90727ebe50f"} Dec 11 08:39:31 crc kubenswrapper[4881]: I1211 08:39:31.156374 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-6jtqx" event={"ID":"87e2a6fa-8d80-4bde-9f33-fe0d4f34d933","Type":"ContainerStarted","Data":"88da7ccb69135e338fb6b7a9d3308c7bebe9a6add0253752440521396a2b6326"} Dec 11 08:39:31 crc kubenswrapper[4881]: I1211 08:39:31.972771 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-engine-557bc7cb47-8hntq" Dec 11 08:39:32 crc kubenswrapper[4881]: I1211 08:39:32.046040 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-45brw\" (UniqueName: \"kubernetes.io/projected/41f75170-bc89-4911-a5f3-5456d3512897-kube-api-access-45brw\") pod \"41f75170-bc89-4911-a5f3-5456d3512897\" (UID: \"41f75170-bc89-4911-a5f3-5456d3512897\") " Dec 11 08:39:32 crc kubenswrapper[4881]: I1211 08:39:32.046230 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/41f75170-bc89-4911-a5f3-5456d3512897-combined-ca-bundle\") pod \"41f75170-bc89-4911-a5f3-5456d3512897\" (UID: \"41f75170-bc89-4911-a5f3-5456d3512897\") " Dec 11 08:39:32 crc kubenswrapper[4881]: I1211 08:39:32.046438 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/41f75170-bc89-4911-a5f3-5456d3512897-config-data-custom\") pod \"41f75170-bc89-4911-a5f3-5456d3512897\" (UID: \"41f75170-bc89-4911-a5f3-5456d3512897\") " Dec 11 08:39:32 crc kubenswrapper[4881]: I1211 08:39:32.046543 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/41f75170-bc89-4911-a5f3-5456d3512897-config-data\") pod \"41f75170-bc89-4911-a5f3-5456d3512897\" (UID: \"41f75170-bc89-4911-a5f3-5456d3512897\") " Dec 11 08:39:32 crc kubenswrapper[4881]: I1211 08:39:32.055015 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/41f75170-bc89-4911-a5f3-5456d3512897-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "41f75170-bc89-4911-a5f3-5456d3512897" (UID: "41f75170-bc89-4911-a5f3-5456d3512897"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:39:32 crc kubenswrapper[4881]: I1211 08:39:32.069574 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/41f75170-bc89-4911-a5f3-5456d3512897-kube-api-access-45brw" (OuterVolumeSpecName: "kube-api-access-45brw") pod "41f75170-bc89-4911-a5f3-5456d3512897" (UID: "41f75170-bc89-4911-a5f3-5456d3512897"). InnerVolumeSpecName "kube-api-access-45brw". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:39:32 crc kubenswrapper[4881]: I1211 08:39:32.150814 4881 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/41f75170-bc89-4911-a5f3-5456d3512897-config-data-custom\") on node \"crc\" DevicePath \"\"" Dec 11 08:39:32 crc kubenswrapper[4881]: I1211 08:39:32.150858 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-45brw\" (UniqueName: \"kubernetes.io/projected/41f75170-bc89-4911-a5f3-5456d3512897-kube-api-access-45brw\") on node \"crc\" DevicePath \"\"" Dec 11 08:39:32 crc kubenswrapper[4881]: I1211 08:39:32.154141 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/41f75170-bc89-4911-a5f3-5456d3512897-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "41f75170-bc89-4911-a5f3-5456d3512897" (UID: "41f75170-bc89-4911-a5f3-5456d3512897"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:39:32 crc kubenswrapper[4881]: I1211 08:39:32.170156 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/41f75170-bc89-4911-a5f3-5456d3512897-config-data" (OuterVolumeSpecName: "config-data") pod "41f75170-bc89-4911-a5f3-5456d3512897" (UID: "41f75170-bc89-4911-a5f3-5456d3512897"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:39:32 crc kubenswrapper[4881]: I1211 08:39:32.173854 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-engine-557bc7cb47-8hntq" event={"ID":"41f75170-bc89-4911-a5f3-5456d3512897","Type":"ContainerDied","Data":"99ef9e8842d16f19d0a64f913afee0b2db9e83ec9c99f5876cc22f4bbf7b25a5"} Dec 11 08:39:32 crc kubenswrapper[4881]: I1211 08:39:32.173900 4881 scope.go:117] "RemoveContainer" containerID="e984e439f24989cdf8f7b0f35994a1bb5660e2301c8cd791b8a8c90727ebe50f" Dec 11 08:39:32 crc kubenswrapper[4881]: I1211 08:39:32.174021 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-engine-557bc7cb47-8hntq" Dec 11 08:39:32 crc kubenswrapper[4881]: I1211 08:39:32.251460 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-engine-557bc7cb47-8hntq"] Dec 11 08:39:32 crc kubenswrapper[4881]: I1211 08:39:32.253132 4881 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/41f75170-bc89-4911-a5f3-5456d3512897-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 11 08:39:32 crc kubenswrapper[4881]: I1211 08:39:32.253166 4881 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/41f75170-bc89-4911-a5f3-5456d3512897-config-data\") on node \"crc\" DevicePath \"\"" Dec 11 08:39:32 crc kubenswrapper[4881]: I1211 08:39:32.261398 4881 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-engine-557bc7cb47-8hntq"] Dec 11 08:39:33 crc kubenswrapper[4881]: I1211 08:39:33.025422 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="41f75170-bc89-4911-a5f3-5456d3512897" path="/var/lib/kubelet/pods/41f75170-bc89-4911-a5f3-5456d3512897/volumes" Dec 11 08:39:36 crc kubenswrapper[4881]: I1211 08:39:36.220824 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"fd2a3a77-7cef-426e-a49c-939df192076b","Type":"ContainerStarted","Data":"fef0599414e17159cce005a75955e8f58051c457138e510f77a08edff74773e0"} Dec 11 08:39:36 crc kubenswrapper[4881]: I1211 08:39:36.829486 4881 patch_prober.go:28] interesting pod/oauth-openshift-846dc6fc5d-rv7gt container/oauth-openshift namespace/openshift-authentication: Readiness probe status=failure output="Get \"https://10.217.0.62:6443/healthz\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" start-of-body= Dec 11 08:39:36 crc kubenswrapper[4881]: I1211 08:39:36.829555 4881 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-authentication/oauth-openshift-846dc6fc5d-rv7gt" podUID="c3868292-0936-4979-bd1f-c9406decb7a8" containerName="oauth-openshift" probeResult="failure" output="Get \"https://10.217.0.62:6443/healthz\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Dec 11 08:39:37 crc kubenswrapper[4881]: I1211 08:39:37.633785 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Dec 11 08:39:40 crc kubenswrapper[4881]: I1211 08:39:40.266911 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"fd2a3a77-7cef-426e-a49c-939df192076b","Type":"ContainerStarted","Data":"972b25f97e86da6a22590571398b541c3da161dd7401077e14fc0d78d7a8a28a"} Dec 11 08:39:46 crc kubenswrapper[4881]: I1211 08:39:46.828813 4881 patch_prober.go:28] interesting pod/oauth-openshift-846dc6fc5d-rv7gt container/oauth-openshift namespace/openshift-authentication: Readiness probe status=failure output="Get \"https://10.217.0.62:6443/healthz\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" start-of-body= Dec 11 08:39:46 crc kubenswrapper[4881]: I1211 08:39:46.835352 4881 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-authentication/oauth-openshift-846dc6fc5d-rv7gt" podUID="c3868292-0936-4979-bd1f-c9406decb7a8" containerName="oauth-openshift" probeResult="failure" output="Get \"https://10.217.0.62:6443/healthz\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Dec 11 08:39:49 crc kubenswrapper[4881]: I1211 08:39:49.477807 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"fd2a3a77-7cef-426e-a49c-939df192076b","Type":"ContainerStarted","Data":"aec839c2373d063f3af9aee1bc31131bb082438aa8435df45e3a34afa75dcbd7"} Dec 11 08:39:50 crc kubenswrapper[4881]: I1211 08:39:50.491955 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="fd2a3a77-7cef-426e-a49c-939df192076b" containerName="ceilometer-central-agent" containerID="cri-o://913c623aed0e1abb5e5b51c361702f7e2bf90a58cd2bc66cb4bbc64cdb00bc76" gracePeriod=30 Dec 11 08:39:50 crc kubenswrapper[4881]: I1211 08:39:50.493228 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-6jtqx" event={"ID":"87e2a6fa-8d80-4bde-9f33-fe0d4f34d933","Type":"ContainerStarted","Data":"4aad9c5989f63736bb1737df392cb932ad9795365978ce16a585b9700510a0fa"} Dec 11 08:39:50 crc kubenswrapper[4881]: I1211 08:39:50.493294 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Dec 11 08:39:50 crc kubenswrapper[4881]: I1211 08:39:50.493467 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="fd2a3a77-7cef-426e-a49c-939df192076b" containerName="proxy-httpd" containerID="cri-o://aec839c2373d063f3af9aee1bc31131bb082438aa8435df45e3a34afa75dcbd7" gracePeriod=30 Dec 11 08:39:50 crc kubenswrapper[4881]: I1211 08:39:50.493518 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="fd2a3a77-7cef-426e-a49c-939df192076b" containerName="sg-core" containerID="cri-o://972b25f97e86da6a22590571398b541c3da161dd7401077e14fc0d78d7a8a28a" gracePeriod=30 Dec 11 08:39:50 crc kubenswrapper[4881]: I1211 08:39:50.493485 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="fd2a3a77-7cef-426e-a49c-939df192076b" containerName="ceilometer-notification-agent" containerID="cri-o://fef0599414e17159cce005a75955e8f58051c457138e510f77a08edff74773e0" gracePeriod=30 Dec 11 08:39:50 crc kubenswrapper[4881]: I1211 08:39:50.516718 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-db-sync-6jtqx" podStartSLOduration=6.914619798 podStartE2EDuration="24.516694655s" podCreationTimestamp="2025-12-11 08:39:26 +0000 UTC" firstStartedPulling="2025-12-11 08:39:30.870517363 +0000 UTC m=+1419.247886060" lastFinishedPulling="2025-12-11 08:39:48.47259222 +0000 UTC m=+1436.849960917" observedRunningTime="2025-12-11 08:39:50.512914839 +0000 UTC m=+1438.890283546" watchObservedRunningTime="2025-12-11 08:39:50.516694655 +0000 UTC m=+1438.894063352" Dec 11 08:39:50 crc kubenswrapper[4881]: I1211 08:39:50.541528 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=3.277582703 podStartE2EDuration="26.541508221s" podCreationTimestamp="2025-12-11 08:39:24 +0000 UTC" firstStartedPulling="2025-12-11 08:39:25.205963614 +0000 UTC m=+1413.583332311" lastFinishedPulling="2025-12-11 08:39:48.469889132 +0000 UTC m=+1436.847257829" observedRunningTime="2025-12-11 08:39:50.533462837 +0000 UTC m=+1438.910831544" watchObservedRunningTime="2025-12-11 08:39:50.541508221 +0000 UTC m=+1438.918876918" Dec 11 08:39:51 crc kubenswrapper[4881]: I1211 08:39:51.505825 4881 generic.go:334] "Generic (PLEG): container finished" podID="fd2a3a77-7cef-426e-a49c-939df192076b" containerID="aec839c2373d063f3af9aee1bc31131bb082438aa8435df45e3a34afa75dcbd7" exitCode=0 Dec 11 08:39:51 crc kubenswrapper[4881]: I1211 08:39:51.505964 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"fd2a3a77-7cef-426e-a49c-939df192076b","Type":"ContainerDied","Data":"aec839c2373d063f3af9aee1bc31131bb082438aa8435df45e3a34afa75dcbd7"} Dec 11 08:39:52 crc kubenswrapper[4881]: I1211 08:39:52.520200 4881 generic.go:334] "Generic (PLEG): container finished" podID="fd2a3a77-7cef-426e-a49c-939df192076b" containerID="972b25f97e86da6a22590571398b541c3da161dd7401077e14fc0d78d7a8a28a" exitCode=2 Dec 11 08:39:52 crc kubenswrapper[4881]: I1211 08:39:52.520562 4881 generic.go:334] "Generic (PLEG): container finished" podID="fd2a3a77-7cef-426e-a49c-939df192076b" containerID="913c623aed0e1abb5e5b51c361702f7e2bf90a58cd2bc66cb4bbc64cdb00bc76" exitCode=0 Dec 11 08:39:52 crc kubenswrapper[4881]: I1211 08:39:52.520275 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"fd2a3a77-7cef-426e-a49c-939df192076b","Type":"ContainerDied","Data":"972b25f97e86da6a22590571398b541c3da161dd7401077e14fc0d78d7a8a28a"} Dec 11 08:39:52 crc kubenswrapper[4881]: I1211 08:39:52.520616 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"fd2a3a77-7cef-426e-a49c-939df192076b","Type":"ContainerDied","Data":"913c623aed0e1abb5e5b51c361702f7e2bf90a58cd2bc66cb4bbc64cdb00bc76"} Dec 11 08:39:53 crc kubenswrapper[4881]: I1211 08:39:53.534235 4881 generic.go:334] "Generic (PLEG): container finished" podID="fd2a3a77-7cef-426e-a49c-939df192076b" containerID="fef0599414e17159cce005a75955e8f58051c457138e510f77a08edff74773e0" exitCode=0 Dec 11 08:39:53 crc kubenswrapper[4881]: I1211 08:39:53.534284 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"fd2a3a77-7cef-426e-a49c-939df192076b","Type":"ContainerDied","Data":"fef0599414e17159cce005a75955e8f58051c457138e510f77a08edff74773e0"} Dec 11 08:39:57 crc kubenswrapper[4881]: I1211 08:39:57.262226 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 11 08:39:57 crc kubenswrapper[4881]: I1211 08:39:57.412184 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/fd2a3a77-7cef-426e-a49c-939df192076b-sg-core-conf-yaml\") pod \"fd2a3a77-7cef-426e-a49c-939df192076b\" (UID: \"fd2a3a77-7cef-426e-a49c-939df192076b\") " Dec 11 08:39:57 crc kubenswrapper[4881]: I1211 08:39:57.413300 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/fd2a3a77-7cef-426e-a49c-939df192076b-log-httpd\") pod \"fd2a3a77-7cef-426e-a49c-939df192076b\" (UID: \"fd2a3a77-7cef-426e-a49c-939df192076b\") " Dec 11 08:39:57 crc kubenswrapper[4881]: I1211 08:39:57.413366 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fd2a3a77-7cef-426e-a49c-939df192076b-combined-ca-bundle\") pod \"fd2a3a77-7cef-426e-a49c-939df192076b\" (UID: \"fd2a3a77-7cef-426e-a49c-939df192076b\") " Dec 11 08:39:57 crc kubenswrapper[4881]: I1211 08:39:57.413582 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fd2a3a77-7cef-426e-a49c-939df192076b-scripts\") pod \"fd2a3a77-7cef-426e-a49c-939df192076b\" (UID: \"fd2a3a77-7cef-426e-a49c-939df192076b\") " Dec 11 08:39:57 crc kubenswrapper[4881]: I1211 08:39:57.413629 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/fd2a3a77-7cef-426e-a49c-939df192076b-run-httpd\") pod \"fd2a3a77-7cef-426e-a49c-939df192076b\" (UID: \"fd2a3a77-7cef-426e-a49c-939df192076b\") " Dec 11 08:39:57 crc kubenswrapper[4881]: I1211 08:39:57.413714 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ln62k\" (UniqueName: \"kubernetes.io/projected/fd2a3a77-7cef-426e-a49c-939df192076b-kube-api-access-ln62k\") pod \"fd2a3a77-7cef-426e-a49c-939df192076b\" (UID: \"fd2a3a77-7cef-426e-a49c-939df192076b\") " Dec 11 08:39:57 crc kubenswrapper[4881]: I1211 08:39:57.413754 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fd2a3a77-7cef-426e-a49c-939df192076b-config-data\") pod \"fd2a3a77-7cef-426e-a49c-939df192076b\" (UID: \"fd2a3a77-7cef-426e-a49c-939df192076b\") " Dec 11 08:39:57 crc kubenswrapper[4881]: I1211 08:39:57.413828 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fd2a3a77-7cef-426e-a49c-939df192076b-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "fd2a3a77-7cef-426e-a49c-939df192076b" (UID: "fd2a3a77-7cef-426e-a49c-939df192076b"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 08:39:57 crc kubenswrapper[4881]: I1211 08:39:57.414669 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fd2a3a77-7cef-426e-a49c-939df192076b-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "fd2a3a77-7cef-426e-a49c-939df192076b" (UID: "fd2a3a77-7cef-426e-a49c-939df192076b"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 08:39:57 crc kubenswrapper[4881]: I1211 08:39:57.415076 4881 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/fd2a3a77-7cef-426e-a49c-939df192076b-run-httpd\") on node \"crc\" DevicePath \"\"" Dec 11 08:39:57 crc kubenswrapper[4881]: I1211 08:39:57.415103 4881 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/fd2a3a77-7cef-426e-a49c-939df192076b-log-httpd\") on node \"crc\" DevicePath \"\"" Dec 11 08:39:57 crc kubenswrapper[4881]: I1211 08:39:57.419712 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fd2a3a77-7cef-426e-a49c-939df192076b-scripts" (OuterVolumeSpecName: "scripts") pod "fd2a3a77-7cef-426e-a49c-939df192076b" (UID: "fd2a3a77-7cef-426e-a49c-939df192076b"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:39:57 crc kubenswrapper[4881]: I1211 08:39:57.432036 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fd2a3a77-7cef-426e-a49c-939df192076b-kube-api-access-ln62k" (OuterVolumeSpecName: "kube-api-access-ln62k") pod "fd2a3a77-7cef-426e-a49c-939df192076b" (UID: "fd2a3a77-7cef-426e-a49c-939df192076b"). InnerVolumeSpecName "kube-api-access-ln62k". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:39:57 crc kubenswrapper[4881]: I1211 08:39:57.454717 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fd2a3a77-7cef-426e-a49c-939df192076b-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "fd2a3a77-7cef-426e-a49c-939df192076b" (UID: "fd2a3a77-7cef-426e-a49c-939df192076b"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:39:57 crc kubenswrapper[4881]: I1211 08:39:57.516966 4881 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fd2a3a77-7cef-426e-a49c-939df192076b-scripts\") on node \"crc\" DevicePath \"\"" Dec 11 08:39:57 crc kubenswrapper[4881]: I1211 08:39:57.517007 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ln62k\" (UniqueName: \"kubernetes.io/projected/fd2a3a77-7cef-426e-a49c-939df192076b-kube-api-access-ln62k\") on node \"crc\" DevicePath \"\"" Dec 11 08:39:57 crc kubenswrapper[4881]: I1211 08:39:57.517022 4881 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/fd2a3a77-7cef-426e-a49c-939df192076b-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Dec 11 08:39:57 crc kubenswrapper[4881]: I1211 08:39:57.517325 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fd2a3a77-7cef-426e-a49c-939df192076b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "fd2a3a77-7cef-426e-a49c-939df192076b" (UID: "fd2a3a77-7cef-426e-a49c-939df192076b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:39:57 crc kubenswrapper[4881]: I1211 08:39:57.574987 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"fd2a3a77-7cef-426e-a49c-939df192076b","Type":"ContainerDied","Data":"a92ee765b461f7336bad6d8df9065cfad3892a3c38ab4fc55c4e080904ac17df"} Dec 11 08:39:57 crc kubenswrapper[4881]: I1211 08:39:57.575298 4881 scope.go:117] "RemoveContainer" containerID="aec839c2373d063f3af9aee1bc31131bb082438aa8435df45e3a34afa75dcbd7" Dec 11 08:39:57 crc kubenswrapper[4881]: I1211 08:39:57.575543 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 11 08:39:57 crc kubenswrapper[4881]: I1211 08:39:57.578689 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fd2a3a77-7cef-426e-a49c-939df192076b-config-data" (OuterVolumeSpecName: "config-data") pod "fd2a3a77-7cef-426e-a49c-939df192076b" (UID: "fd2a3a77-7cef-426e-a49c-939df192076b"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:39:57 crc kubenswrapper[4881]: I1211 08:39:57.612748 4881 scope.go:117] "RemoveContainer" containerID="972b25f97e86da6a22590571398b541c3da161dd7401077e14fc0d78d7a8a28a" Dec 11 08:39:57 crc kubenswrapper[4881]: I1211 08:39:57.620068 4881 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fd2a3a77-7cef-426e-a49c-939df192076b-config-data\") on node \"crc\" DevicePath \"\"" Dec 11 08:39:57 crc kubenswrapper[4881]: I1211 08:39:57.620100 4881 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fd2a3a77-7cef-426e-a49c-939df192076b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 11 08:39:57 crc kubenswrapper[4881]: I1211 08:39:57.657754 4881 scope.go:117] "RemoveContainer" containerID="fef0599414e17159cce005a75955e8f58051c457138e510f77a08edff74773e0" Dec 11 08:39:57 crc kubenswrapper[4881]: I1211 08:39:57.696974 4881 scope.go:117] "RemoveContainer" containerID="913c623aed0e1abb5e5b51c361702f7e2bf90a58cd2bc66cb4bbc64cdb00bc76" Dec 11 08:39:57 crc kubenswrapper[4881]: I1211 08:39:57.916553 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Dec 11 08:39:57 crc kubenswrapper[4881]: I1211 08:39:57.925949 4881 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Dec 11 08:39:57 crc kubenswrapper[4881]: I1211 08:39:57.947415 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Dec 11 08:39:57 crc kubenswrapper[4881]: E1211 08:39:57.947897 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fd2a3a77-7cef-426e-a49c-939df192076b" containerName="proxy-httpd" Dec 11 08:39:57 crc kubenswrapper[4881]: I1211 08:39:57.947918 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="fd2a3a77-7cef-426e-a49c-939df192076b" containerName="proxy-httpd" Dec 11 08:39:57 crc kubenswrapper[4881]: E1211 08:39:57.947951 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="41f75170-bc89-4911-a5f3-5456d3512897" containerName="heat-engine" Dec 11 08:39:57 crc kubenswrapper[4881]: I1211 08:39:57.947959 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="41f75170-bc89-4911-a5f3-5456d3512897" containerName="heat-engine" Dec 11 08:39:57 crc kubenswrapper[4881]: E1211 08:39:57.947985 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fd2a3a77-7cef-426e-a49c-939df192076b" containerName="ceilometer-central-agent" Dec 11 08:39:57 crc kubenswrapper[4881]: I1211 08:39:57.947991 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="fd2a3a77-7cef-426e-a49c-939df192076b" containerName="ceilometer-central-agent" Dec 11 08:39:57 crc kubenswrapper[4881]: E1211 08:39:57.948013 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fd2a3a77-7cef-426e-a49c-939df192076b" containerName="sg-core" Dec 11 08:39:57 crc kubenswrapper[4881]: I1211 08:39:57.948020 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="fd2a3a77-7cef-426e-a49c-939df192076b" containerName="sg-core" Dec 11 08:39:57 crc kubenswrapper[4881]: E1211 08:39:57.948034 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fd2a3a77-7cef-426e-a49c-939df192076b" containerName="ceilometer-notification-agent" Dec 11 08:39:57 crc kubenswrapper[4881]: I1211 08:39:57.948042 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="fd2a3a77-7cef-426e-a49c-939df192076b" containerName="ceilometer-notification-agent" Dec 11 08:39:57 crc kubenswrapper[4881]: I1211 08:39:57.948253 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="fd2a3a77-7cef-426e-a49c-939df192076b" containerName="sg-core" Dec 11 08:39:57 crc kubenswrapper[4881]: I1211 08:39:57.948273 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="41f75170-bc89-4911-a5f3-5456d3512897" containerName="heat-engine" Dec 11 08:39:57 crc kubenswrapper[4881]: I1211 08:39:57.948291 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="fd2a3a77-7cef-426e-a49c-939df192076b" containerName="ceilometer-central-agent" Dec 11 08:39:57 crc kubenswrapper[4881]: I1211 08:39:57.948300 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="fd2a3a77-7cef-426e-a49c-939df192076b" containerName="proxy-httpd" Dec 11 08:39:57 crc kubenswrapper[4881]: I1211 08:39:57.948308 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="fd2a3a77-7cef-426e-a49c-939df192076b" containerName="ceilometer-notification-agent" Dec 11 08:39:57 crc kubenswrapper[4881]: I1211 08:39:57.950361 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 11 08:39:57 crc kubenswrapper[4881]: I1211 08:39:57.952434 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Dec 11 08:39:57 crc kubenswrapper[4881]: I1211 08:39:57.952696 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Dec 11 08:39:57 crc kubenswrapper[4881]: I1211 08:39:57.968232 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Dec 11 08:39:58 crc kubenswrapper[4881]: I1211 08:39:58.129957 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/928ad409-872d-4979-9820-fec3cae3cfba-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"928ad409-872d-4979-9820-fec3cae3cfba\") " pod="openstack/ceilometer-0" Dec 11 08:39:58 crc kubenswrapper[4881]: I1211 08:39:58.130002 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hxfw8\" (UniqueName: \"kubernetes.io/projected/928ad409-872d-4979-9820-fec3cae3cfba-kube-api-access-hxfw8\") pod \"ceilometer-0\" (UID: \"928ad409-872d-4979-9820-fec3cae3cfba\") " pod="openstack/ceilometer-0" Dec 11 08:39:58 crc kubenswrapper[4881]: I1211 08:39:58.130075 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/928ad409-872d-4979-9820-fec3cae3cfba-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"928ad409-872d-4979-9820-fec3cae3cfba\") " pod="openstack/ceilometer-0" Dec 11 08:39:58 crc kubenswrapper[4881]: I1211 08:39:58.130121 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/928ad409-872d-4979-9820-fec3cae3cfba-config-data\") pod \"ceilometer-0\" (UID: \"928ad409-872d-4979-9820-fec3cae3cfba\") " pod="openstack/ceilometer-0" Dec 11 08:39:58 crc kubenswrapper[4881]: I1211 08:39:58.130185 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/928ad409-872d-4979-9820-fec3cae3cfba-log-httpd\") pod \"ceilometer-0\" (UID: \"928ad409-872d-4979-9820-fec3cae3cfba\") " pod="openstack/ceilometer-0" Dec 11 08:39:58 crc kubenswrapper[4881]: I1211 08:39:58.130325 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/928ad409-872d-4979-9820-fec3cae3cfba-scripts\") pod \"ceilometer-0\" (UID: \"928ad409-872d-4979-9820-fec3cae3cfba\") " pod="openstack/ceilometer-0" Dec 11 08:39:58 crc kubenswrapper[4881]: I1211 08:39:58.130453 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/928ad409-872d-4979-9820-fec3cae3cfba-run-httpd\") pod \"ceilometer-0\" (UID: \"928ad409-872d-4979-9820-fec3cae3cfba\") " pod="openstack/ceilometer-0" Dec 11 08:39:58 crc kubenswrapper[4881]: I1211 08:39:58.231959 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/928ad409-872d-4979-9820-fec3cae3cfba-run-httpd\") pod \"ceilometer-0\" (UID: \"928ad409-872d-4979-9820-fec3cae3cfba\") " pod="openstack/ceilometer-0" Dec 11 08:39:58 crc kubenswrapper[4881]: I1211 08:39:58.232015 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/928ad409-872d-4979-9820-fec3cae3cfba-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"928ad409-872d-4979-9820-fec3cae3cfba\") " pod="openstack/ceilometer-0" Dec 11 08:39:58 crc kubenswrapper[4881]: I1211 08:39:58.232039 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hxfw8\" (UniqueName: \"kubernetes.io/projected/928ad409-872d-4979-9820-fec3cae3cfba-kube-api-access-hxfw8\") pod \"ceilometer-0\" (UID: \"928ad409-872d-4979-9820-fec3cae3cfba\") " pod="openstack/ceilometer-0" Dec 11 08:39:58 crc kubenswrapper[4881]: I1211 08:39:58.232079 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/928ad409-872d-4979-9820-fec3cae3cfba-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"928ad409-872d-4979-9820-fec3cae3cfba\") " pod="openstack/ceilometer-0" Dec 11 08:39:58 crc kubenswrapper[4881]: I1211 08:39:58.232116 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/928ad409-872d-4979-9820-fec3cae3cfba-config-data\") pod \"ceilometer-0\" (UID: \"928ad409-872d-4979-9820-fec3cae3cfba\") " pod="openstack/ceilometer-0" Dec 11 08:39:58 crc kubenswrapper[4881]: I1211 08:39:58.232165 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/928ad409-872d-4979-9820-fec3cae3cfba-log-httpd\") pod \"ceilometer-0\" (UID: \"928ad409-872d-4979-9820-fec3cae3cfba\") " pod="openstack/ceilometer-0" Dec 11 08:39:58 crc kubenswrapper[4881]: I1211 08:39:58.232226 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/928ad409-872d-4979-9820-fec3cae3cfba-scripts\") pod \"ceilometer-0\" (UID: \"928ad409-872d-4979-9820-fec3cae3cfba\") " pod="openstack/ceilometer-0" Dec 11 08:39:58 crc kubenswrapper[4881]: I1211 08:39:58.232515 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/928ad409-872d-4979-9820-fec3cae3cfba-run-httpd\") pod \"ceilometer-0\" (UID: \"928ad409-872d-4979-9820-fec3cae3cfba\") " pod="openstack/ceilometer-0" Dec 11 08:39:58 crc kubenswrapper[4881]: I1211 08:39:58.232814 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/928ad409-872d-4979-9820-fec3cae3cfba-log-httpd\") pod \"ceilometer-0\" (UID: \"928ad409-872d-4979-9820-fec3cae3cfba\") " pod="openstack/ceilometer-0" Dec 11 08:39:58 crc kubenswrapper[4881]: I1211 08:39:58.252745 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/928ad409-872d-4979-9820-fec3cae3cfba-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"928ad409-872d-4979-9820-fec3cae3cfba\") " pod="openstack/ceilometer-0" Dec 11 08:39:58 crc kubenswrapper[4881]: I1211 08:39:58.252864 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/928ad409-872d-4979-9820-fec3cae3cfba-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"928ad409-872d-4979-9820-fec3cae3cfba\") " pod="openstack/ceilometer-0" Dec 11 08:39:58 crc kubenswrapper[4881]: I1211 08:39:58.253763 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/928ad409-872d-4979-9820-fec3cae3cfba-scripts\") pod \"ceilometer-0\" (UID: \"928ad409-872d-4979-9820-fec3cae3cfba\") " pod="openstack/ceilometer-0" Dec 11 08:39:58 crc kubenswrapper[4881]: I1211 08:39:58.259560 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/928ad409-872d-4979-9820-fec3cae3cfba-config-data\") pod \"ceilometer-0\" (UID: \"928ad409-872d-4979-9820-fec3cae3cfba\") " pod="openstack/ceilometer-0" Dec 11 08:39:58 crc kubenswrapper[4881]: I1211 08:39:58.262926 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hxfw8\" (UniqueName: \"kubernetes.io/projected/928ad409-872d-4979-9820-fec3cae3cfba-kube-api-access-hxfw8\") pod \"ceilometer-0\" (UID: \"928ad409-872d-4979-9820-fec3cae3cfba\") " pod="openstack/ceilometer-0" Dec 11 08:39:58 crc kubenswrapper[4881]: I1211 08:39:58.269172 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 11 08:39:58 crc kubenswrapper[4881]: I1211 08:39:58.779465 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Dec 11 08:39:59 crc kubenswrapper[4881]: I1211 08:39:59.019524 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fd2a3a77-7cef-426e-a49c-939df192076b" path="/var/lib/kubelet/pods/fd2a3a77-7cef-426e-a49c-939df192076b/volumes" Dec 11 08:39:59 crc kubenswrapper[4881]: I1211 08:39:59.397805 4881 patch_prober.go:28] interesting pod/machine-config-daemon-z9nnh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 11 08:39:59 crc kubenswrapper[4881]: I1211 08:39:59.398132 4881 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 11 08:39:59 crc kubenswrapper[4881]: I1211 08:39:59.398184 4881 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" Dec 11 08:39:59 crc kubenswrapper[4881]: I1211 08:39:59.399099 4881 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"a515bd066db37a4e28461e603c6dd047107539c1d10350095f8c9ca546f164e8"} pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Dec 11 08:39:59 crc kubenswrapper[4881]: I1211 08:39:59.399143 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" containerName="machine-config-daemon" containerID="cri-o://a515bd066db37a4e28461e603c6dd047107539c1d10350095f8c9ca546f164e8" gracePeriod=600 Dec 11 08:39:59 crc kubenswrapper[4881]: I1211 08:39:59.603456 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"928ad409-872d-4979-9820-fec3cae3cfba","Type":"ContainerStarted","Data":"b132b55371cf194963b55580966166cd7fb7d34b34ba289b88b6cbf955cb277e"} Dec 11 08:40:00 crc kubenswrapper[4881]: I1211 08:40:00.617413 4881 generic.go:334] "Generic (PLEG): container finished" podID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" containerID="a515bd066db37a4e28461e603c6dd047107539c1d10350095f8c9ca546f164e8" exitCode=0 Dec 11 08:40:00 crc kubenswrapper[4881]: I1211 08:40:00.617761 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" event={"ID":"56d69133-af36-4cbd-af7d-3a58cc4dd8ca","Type":"ContainerDied","Data":"a515bd066db37a4e28461e603c6dd047107539c1d10350095f8c9ca546f164e8"} Dec 11 08:40:00 crc kubenswrapper[4881]: I1211 08:40:00.617858 4881 scope.go:117] "RemoveContainer" containerID="bf6d1efeca37e2539778b6f34be1560f88c12ad867f27057f394c09165f250e9" Dec 11 08:40:00 crc kubenswrapper[4881]: I1211 08:40:00.964228 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Dec 11 08:40:01 crc kubenswrapper[4881]: I1211 08:40:01.636737 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" event={"ID":"56d69133-af36-4cbd-af7d-3a58cc4dd8ca","Type":"ContainerStarted","Data":"6d4e8d739554b5cd95df4279914b6fd3590e9c30b07a9063a641aef8fdb20ae9"} Dec 11 08:40:05 crc kubenswrapper[4881]: I1211 08:40:05.705815 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"928ad409-872d-4979-9820-fec3cae3cfba","Type":"ContainerStarted","Data":"7c7e4f080d155180b13a369f67aa38793a6dae9326686df5a7ed96024f29cfb9"} Dec 11 08:40:08 crc kubenswrapper[4881]: I1211 08:40:08.100303 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/aodh-db-create-n8lqz"] Dec 11 08:40:08 crc kubenswrapper[4881]: I1211 08:40:08.102112 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-create-n8lqz" Dec 11 08:40:08 crc kubenswrapper[4881]: I1211 08:40:08.118507 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-db-create-n8lqz"] Dec 11 08:40:08 crc kubenswrapper[4881]: I1211 08:40:08.270544 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dhxbg\" (UniqueName: \"kubernetes.io/projected/82b87f02-5631-4cbe-be7a-22cf0e321e23-kube-api-access-dhxbg\") pod \"aodh-db-create-n8lqz\" (UID: \"82b87f02-5631-4cbe-be7a-22cf0e321e23\") " pod="openstack/aodh-db-create-n8lqz" Dec 11 08:40:08 crc kubenswrapper[4881]: I1211 08:40:08.373043 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dhxbg\" (UniqueName: \"kubernetes.io/projected/82b87f02-5631-4cbe-be7a-22cf0e321e23-kube-api-access-dhxbg\") pod \"aodh-db-create-n8lqz\" (UID: \"82b87f02-5631-4cbe-be7a-22cf0e321e23\") " pod="openstack/aodh-db-create-n8lqz" Dec 11 08:40:08 crc kubenswrapper[4881]: I1211 08:40:08.402952 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dhxbg\" (UniqueName: \"kubernetes.io/projected/82b87f02-5631-4cbe-be7a-22cf0e321e23-kube-api-access-dhxbg\") pod \"aodh-db-create-n8lqz\" (UID: \"82b87f02-5631-4cbe-be7a-22cf0e321e23\") " pod="openstack/aodh-db-create-n8lqz" Dec 11 08:40:08 crc kubenswrapper[4881]: I1211 08:40:08.435381 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-create-n8lqz" Dec 11 08:40:10 crc kubenswrapper[4881]: I1211 08:40:10.819053 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-db-create-n8lqz"] Dec 11 08:40:11 crc kubenswrapper[4881]: I1211 08:40:11.769351 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-create-n8lqz" event={"ID":"82b87f02-5631-4cbe-be7a-22cf0e321e23","Type":"ContainerStarted","Data":"b188af95160a9d5d65b1d9acedf4216339b2f9d8b73eca39b85e7c282e5ca005"} Dec 11 08:40:12 crc kubenswrapper[4881]: I1211 08:40:12.781645 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-create-n8lqz" event={"ID":"82b87f02-5631-4cbe-be7a-22cf0e321e23","Type":"ContainerStarted","Data":"e34cb666a0b9bfe93c61de10e1acebf64b2f12870292850ba40f91470fbebf50"} Dec 11 08:40:13 crc kubenswrapper[4881]: I1211 08:40:13.820299 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/aodh-db-create-n8lqz" podStartSLOduration=5.820281943 podStartE2EDuration="5.820281943s" podCreationTimestamp="2025-12-11 08:40:08 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 08:40:13.810403914 +0000 UTC m=+1462.187772621" watchObservedRunningTime="2025-12-11 08:40:13.820281943 +0000 UTC m=+1462.197650640" Dec 11 08:40:14 crc kubenswrapper[4881]: I1211 08:40:14.804861 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"928ad409-872d-4979-9820-fec3cae3cfba","Type":"ContainerStarted","Data":"a2ebc98b836800fb91752a65af0489007a962443b301cf909dea997cbe413bdb"} Dec 11 08:40:14 crc kubenswrapper[4881]: I1211 08:40:14.807476 4881 generic.go:334] "Generic (PLEG): container finished" podID="82b87f02-5631-4cbe-be7a-22cf0e321e23" containerID="e34cb666a0b9bfe93c61de10e1acebf64b2f12870292850ba40f91470fbebf50" exitCode=0 Dec 11 08:40:14 crc kubenswrapper[4881]: I1211 08:40:14.807531 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-create-n8lqz" event={"ID":"82b87f02-5631-4cbe-be7a-22cf0e321e23","Type":"ContainerDied","Data":"e34cb666a0b9bfe93c61de10e1acebf64b2f12870292850ba40f91470fbebf50"} Dec 11 08:40:15 crc kubenswrapper[4881]: I1211 08:40:15.832419 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"928ad409-872d-4979-9820-fec3cae3cfba","Type":"ContainerStarted","Data":"434d31838e5479f57a2538c65aa1c2ca1b2f297976f5bdc74f5deae3db6b626a"} Dec 11 08:40:16 crc kubenswrapper[4881]: I1211 08:40:16.249203 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-create-n8lqz" Dec 11 08:40:16 crc kubenswrapper[4881]: I1211 08:40:16.410192 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dhxbg\" (UniqueName: \"kubernetes.io/projected/82b87f02-5631-4cbe-be7a-22cf0e321e23-kube-api-access-dhxbg\") pod \"82b87f02-5631-4cbe-be7a-22cf0e321e23\" (UID: \"82b87f02-5631-4cbe-be7a-22cf0e321e23\") " Dec 11 08:40:16 crc kubenswrapper[4881]: I1211 08:40:16.424705 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/82b87f02-5631-4cbe-be7a-22cf0e321e23-kube-api-access-dhxbg" (OuterVolumeSpecName: "kube-api-access-dhxbg") pod "82b87f02-5631-4cbe-be7a-22cf0e321e23" (UID: "82b87f02-5631-4cbe-be7a-22cf0e321e23"). InnerVolumeSpecName "kube-api-access-dhxbg". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:40:16 crc kubenswrapper[4881]: I1211 08:40:16.512748 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dhxbg\" (UniqueName: \"kubernetes.io/projected/82b87f02-5631-4cbe-be7a-22cf0e321e23-kube-api-access-dhxbg\") on node \"crc\" DevicePath \"\"" Dec 11 08:40:16 crc kubenswrapper[4881]: I1211 08:40:16.860536 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-create-n8lqz" event={"ID":"82b87f02-5631-4cbe-be7a-22cf0e321e23","Type":"ContainerDied","Data":"b188af95160a9d5d65b1d9acedf4216339b2f9d8b73eca39b85e7c282e5ca005"} Dec 11 08:40:16 crc kubenswrapper[4881]: I1211 08:40:16.860585 4881 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b188af95160a9d5d65b1d9acedf4216339b2f9d8b73eca39b85e7c282e5ca005" Dec 11 08:40:16 crc kubenswrapper[4881]: I1211 08:40:16.860615 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-create-n8lqz" Dec 11 08:40:21 crc kubenswrapper[4881]: I1211 08:40:21.927156 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"928ad409-872d-4979-9820-fec3cae3cfba","Type":"ContainerStarted","Data":"f90ae80a51f7fba5d13fbe54303d1bcbd1c7da8cc5895b8137e29e04c4905352"} Dec 11 08:40:21 crc kubenswrapper[4881]: I1211 08:40:21.929439 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Dec 11 08:40:21 crc kubenswrapper[4881]: I1211 08:40:21.927541 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="928ad409-872d-4979-9820-fec3cae3cfba" containerName="sg-core" containerID="cri-o://434d31838e5479f57a2538c65aa1c2ca1b2f297976f5bdc74f5deae3db6b626a" gracePeriod=30 Dec 11 08:40:21 crc kubenswrapper[4881]: I1211 08:40:21.927654 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="928ad409-872d-4979-9820-fec3cae3cfba" containerName="ceilometer-notification-agent" containerID="cri-o://a2ebc98b836800fb91752a65af0489007a962443b301cf909dea997cbe413bdb" gracePeriod=30 Dec 11 08:40:21 crc kubenswrapper[4881]: I1211 08:40:21.927683 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="928ad409-872d-4979-9820-fec3cae3cfba" containerName="proxy-httpd" containerID="cri-o://f90ae80a51f7fba5d13fbe54303d1bcbd1c7da8cc5895b8137e29e04c4905352" gracePeriod=30 Dec 11 08:40:21 crc kubenswrapper[4881]: I1211 08:40:21.927386 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="928ad409-872d-4979-9820-fec3cae3cfba" containerName="ceilometer-central-agent" containerID="cri-o://7c7e4f080d155180b13a369f67aa38793a6dae9326686df5a7ed96024f29cfb9" gracePeriod=30 Dec 11 08:40:21 crc kubenswrapper[4881]: I1211 08:40:21.959755 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.7405597459999997 podStartE2EDuration="24.959727919s" podCreationTimestamp="2025-12-11 08:39:57 +0000 UTC" firstStartedPulling="2025-12-11 08:39:58.766488563 +0000 UTC m=+1447.143857260" lastFinishedPulling="2025-12-11 08:40:20.985656736 +0000 UTC m=+1469.363025433" observedRunningTime="2025-12-11 08:40:21.95104793 +0000 UTC m=+1470.328416657" watchObservedRunningTime="2025-12-11 08:40:21.959727919 +0000 UTC m=+1470.337096616" Dec 11 08:40:22 crc kubenswrapper[4881]: I1211 08:40:22.940925 4881 generic.go:334] "Generic (PLEG): container finished" podID="928ad409-872d-4979-9820-fec3cae3cfba" containerID="f90ae80a51f7fba5d13fbe54303d1bcbd1c7da8cc5895b8137e29e04c4905352" exitCode=0 Dec 11 08:40:22 crc kubenswrapper[4881]: I1211 08:40:22.940966 4881 generic.go:334] "Generic (PLEG): container finished" podID="928ad409-872d-4979-9820-fec3cae3cfba" containerID="434d31838e5479f57a2538c65aa1c2ca1b2f297976f5bdc74f5deae3db6b626a" exitCode=2 Dec 11 08:40:22 crc kubenswrapper[4881]: I1211 08:40:22.940978 4881 generic.go:334] "Generic (PLEG): container finished" podID="928ad409-872d-4979-9820-fec3cae3cfba" containerID="a2ebc98b836800fb91752a65af0489007a962443b301cf909dea997cbe413bdb" exitCode=0 Dec 11 08:40:22 crc kubenswrapper[4881]: I1211 08:40:22.940987 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"928ad409-872d-4979-9820-fec3cae3cfba","Type":"ContainerDied","Data":"f90ae80a51f7fba5d13fbe54303d1bcbd1c7da8cc5895b8137e29e04c4905352"} Dec 11 08:40:22 crc kubenswrapper[4881]: I1211 08:40:22.941026 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"928ad409-872d-4979-9820-fec3cae3cfba","Type":"ContainerDied","Data":"434d31838e5479f57a2538c65aa1c2ca1b2f297976f5bdc74f5deae3db6b626a"} Dec 11 08:40:22 crc kubenswrapper[4881]: I1211 08:40:22.941037 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"928ad409-872d-4979-9820-fec3cae3cfba","Type":"ContainerDied","Data":"a2ebc98b836800fb91752a65af0489007a962443b301cf909dea997cbe413bdb"} Dec 11 08:40:25 crc kubenswrapper[4881]: E1211 08:40:25.421906 4881 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod928ad409_872d_4979_9820_fec3cae3cfba.slice/crio-7c7e4f080d155180b13a369f67aa38793a6dae9326686df5a7ed96024f29cfb9.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod928ad409_872d_4979_9820_fec3cae3cfba.slice/crio-conmon-7c7e4f080d155180b13a369f67aa38793a6dae9326686df5a7ed96024f29cfb9.scope\": RecentStats: unable to find data in memory cache]" Dec 11 08:40:25 crc kubenswrapper[4881]: I1211 08:40:25.650060 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 11 08:40:25 crc kubenswrapper[4881]: I1211 08:40:25.733166 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/928ad409-872d-4979-9820-fec3cae3cfba-config-data\") pod \"928ad409-872d-4979-9820-fec3cae3cfba\" (UID: \"928ad409-872d-4979-9820-fec3cae3cfba\") " Dec 11 08:40:25 crc kubenswrapper[4881]: I1211 08:40:25.733265 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/928ad409-872d-4979-9820-fec3cae3cfba-run-httpd\") pod \"928ad409-872d-4979-9820-fec3cae3cfba\" (UID: \"928ad409-872d-4979-9820-fec3cae3cfba\") " Dec 11 08:40:25 crc kubenswrapper[4881]: I1211 08:40:25.733376 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/928ad409-872d-4979-9820-fec3cae3cfba-scripts\") pod \"928ad409-872d-4979-9820-fec3cae3cfba\" (UID: \"928ad409-872d-4979-9820-fec3cae3cfba\") " Dec 11 08:40:25 crc kubenswrapper[4881]: I1211 08:40:25.733429 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hxfw8\" (UniqueName: \"kubernetes.io/projected/928ad409-872d-4979-9820-fec3cae3cfba-kube-api-access-hxfw8\") pod \"928ad409-872d-4979-9820-fec3cae3cfba\" (UID: \"928ad409-872d-4979-9820-fec3cae3cfba\") " Dec 11 08:40:25 crc kubenswrapper[4881]: I1211 08:40:25.733543 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/928ad409-872d-4979-9820-fec3cae3cfba-sg-core-conf-yaml\") pod \"928ad409-872d-4979-9820-fec3cae3cfba\" (UID: \"928ad409-872d-4979-9820-fec3cae3cfba\") " Dec 11 08:40:25 crc kubenswrapper[4881]: I1211 08:40:25.733596 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/928ad409-872d-4979-9820-fec3cae3cfba-log-httpd\") pod \"928ad409-872d-4979-9820-fec3cae3cfba\" (UID: \"928ad409-872d-4979-9820-fec3cae3cfba\") " Dec 11 08:40:25 crc kubenswrapper[4881]: I1211 08:40:25.733633 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/928ad409-872d-4979-9820-fec3cae3cfba-combined-ca-bundle\") pod \"928ad409-872d-4979-9820-fec3cae3cfba\" (UID: \"928ad409-872d-4979-9820-fec3cae3cfba\") " Dec 11 08:40:25 crc kubenswrapper[4881]: I1211 08:40:25.733759 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/928ad409-872d-4979-9820-fec3cae3cfba-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "928ad409-872d-4979-9820-fec3cae3cfba" (UID: "928ad409-872d-4979-9820-fec3cae3cfba"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 08:40:25 crc kubenswrapper[4881]: I1211 08:40:25.734130 4881 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/928ad409-872d-4979-9820-fec3cae3cfba-run-httpd\") on node \"crc\" DevicePath \"\"" Dec 11 08:40:25 crc kubenswrapper[4881]: I1211 08:40:25.734665 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/928ad409-872d-4979-9820-fec3cae3cfba-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "928ad409-872d-4979-9820-fec3cae3cfba" (UID: "928ad409-872d-4979-9820-fec3cae3cfba"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 08:40:25 crc kubenswrapper[4881]: I1211 08:40:25.738692 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/928ad409-872d-4979-9820-fec3cae3cfba-kube-api-access-hxfw8" (OuterVolumeSpecName: "kube-api-access-hxfw8") pod "928ad409-872d-4979-9820-fec3cae3cfba" (UID: "928ad409-872d-4979-9820-fec3cae3cfba"). InnerVolumeSpecName "kube-api-access-hxfw8". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:40:25 crc kubenswrapper[4881]: I1211 08:40:25.739699 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/928ad409-872d-4979-9820-fec3cae3cfba-scripts" (OuterVolumeSpecName: "scripts") pod "928ad409-872d-4979-9820-fec3cae3cfba" (UID: "928ad409-872d-4979-9820-fec3cae3cfba"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:40:25 crc kubenswrapper[4881]: I1211 08:40:25.771420 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/928ad409-872d-4979-9820-fec3cae3cfba-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "928ad409-872d-4979-9820-fec3cae3cfba" (UID: "928ad409-872d-4979-9820-fec3cae3cfba"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:40:25 crc kubenswrapper[4881]: I1211 08:40:25.835404 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/928ad409-872d-4979-9820-fec3cae3cfba-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "928ad409-872d-4979-9820-fec3cae3cfba" (UID: "928ad409-872d-4979-9820-fec3cae3cfba"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:40:25 crc kubenswrapper[4881]: I1211 08:40:25.835982 4881 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/928ad409-872d-4979-9820-fec3cae3cfba-log-httpd\") on node \"crc\" DevicePath \"\"" Dec 11 08:40:25 crc kubenswrapper[4881]: I1211 08:40:25.836014 4881 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/928ad409-872d-4979-9820-fec3cae3cfba-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 11 08:40:25 crc kubenswrapper[4881]: I1211 08:40:25.836025 4881 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/928ad409-872d-4979-9820-fec3cae3cfba-scripts\") on node \"crc\" DevicePath \"\"" Dec 11 08:40:25 crc kubenswrapper[4881]: I1211 08:40:25.836033 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hxfw8\" (UniqueName: \"kubernetes.io/projected/928ad409-872d-4979-9820-fec3cae3cfba-kube-api-access-hxfw8\") on node \"crc\" DevicePath \"\"" Dec 11 08:40:25 crc kubenswrapper[4881]: I1211 08:40:25.836043 4881 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/928ad409-872d-4979-9820-fec3cae3cfba-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Dec 11 08:40:25 crc kubenswrapper[4881]: I1211 08:40:25.856495 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/928ad409-872d-4979-9820-fec3cae3cfba-config-data" (OuterVolumeSpecName: "config-data") pod "928ad409-872d-4979-9820-fec3cae3cfba" (UID: "928ad409-872d-4979-9820-fec3cae3cfba"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:40:25 crc kubenswrapper[4881]: I1211 08:40:25.938018 4881 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/928ad409-872d-4979-9820-fec3cae3cfba-config-data\") on node \"crc\" DevicePath \"\"" Dec 11 08:40:25 crc kubenswrapper[4881]: I1211 08:40:25.981873 4881 generic.go:334] "Generic (PLEG): container finished" podID="928ad409-872d-4979-9820-fec3cae3cfba" containerID="7c7e4f080d155180b13a369f67aa38793a6dae9326686df5a7ed96024f29cfb9" exitCode=0 Dec 11 08:40:25 crc kubenswrapper[4881]: I1211 08:40:25.981930 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"928ad409-872d-4979-9820-fec3cae3cfba","Type":"ContainerDied","Data":"7c7e4f080d155180b13a369f67aa38793a6dae9326686df5a7ed96024f29cfb9"} Dec 11 08:40:25 crc kubenswrapper[4881]: I1211 08:40:25.981964 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"928ad409-872d-4979-9820-fec3cae3cfba","Type":"ContainerDied","Data":"b132b55371cf194963b55580966166cd7fb7d34b34ba289b88b6cbf955cb277e"} Dec 11 08:40:25 crc kubenswrapper[4881]: I1211 08:40:25.981986 4881 scope.go:117] "RemoveContainer" containerID="f90ae80a51f7fba5d13fbe54303d1bcbd1c7da8cc5895b8137e29e04c4905352" Dec 11 08:40:25 crc kubenswrapper[4881]: I1211 08:40:25.982002 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 11 08:40:26 crc kubenswrapper[4881]: I1211 08:40:26.001131 4881 scope.go:117] "RemoveContainer" containerID="434d31838e5479f57a2538c65aa1c2ca1b2f297976f5bdc74f5deae3db6b626a" Dec 11 08:40:26 crc kubenswrapper[4881]: I1211 08:40:26.025903 4881 scope.go:117] "RemoveContainer" containerID="a2ebc98b836800fb91752a65af0489007a962443b301cf909dea997cbe413bdb" Dec 11 08:40:26 crc kubenswrapper[4881]: I1211 08:40:26.026955 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Dec 11 08:40:26 crc kubenswrapper[4881]: I1211 08:40:26.045753 4881 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Dec 11 08:40:26 crc kubenswrapper[4881]: I1211 08:40:26.061948 4881 scope.go:117] "RemoveContainer" containerID="7c7e4f080d155180b13a369f67aa38793a6dae9326686df5a7ed96024f29cfb9" Dec 11 08:40:26 crc kubenswrapper[4881]: I1211 08:40:26.063361 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Dec 11 08:40:26 crc kubenswrapper[4881]: E1211 08:40:26.063865 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="928ad409-872d-4979-9820-fec3cae3cfba" containerName="ceilometer-central-agent" Dec 11 08:40:26 crc kubenswrapper[4881]: I1211 08:40:26.063884 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="928ad409-872d-4979-9820-fec3cae3cfba" containerName="ceilometer-central-agent" Dec 11 08:40:26 crc kubenswrapper[4881]: E1211 08:40:26.063899 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="928ad409-872d-4979-9820-fec3cae3cfba" containerName="proxy-httpd" Dec 11 08:40:26 crc kubenswrapper[4881]: I1211 08:40:26.063906 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="928ad409-872d-4979-9820-fec3cae3cfba" containerName="proxy-httpd" Dec 11 08:40:26 crc kubenswrapper[4881]: E1211 08:40:26.063924 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="82b87f02-5631-4cbe-be7a-22cf0e321e23" containerName="mariadb-database-create" Dec 11 08:40:26 crc kubenswrapper[4881]: I1211 08:40:26.063931 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="82b87f02-5631-4cbe-be7a-22cf0e321e23" containerName="mariadb-database-create" Dec 11 08:40:26 crc kubenswrapper[4881]: E1211 08:40:26.063942 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="928ad409-872d-4979-9820-fec3cae3cfba" containerName="sg-core" Dec 11 08:40:26 crc kubenswrapper[4881]: I1211 08:40:26.063948 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="928ad409-872d-4979-9820-fec3cae3cfba" containerName="sg-core" Dec 11 08:40:26 crc kubenswrapper[4881]: E1211 08:40:26.063996 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="928ad409-872d-4979-9820-fec3cae3cfba" containerName="ceilometer-notification-agent" Dec 11 08:40:26 crc kubenswrapper[4881]: I1211 08:40:26.064007 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="928ad409-872d-4979-9820-fec3cae3cfba" containerName="ceilometer-notification-agent" Dec 11 08:40:26 crc kubenswrapper[4881]: I1211 08:40:26.064215 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="928ad409-872d-4979-9820-fec3cae3cfba" containerName="ceilometer-central-agent" Dec 11 08:40:26 crc kubenswrapper[4881]: I1211 08:40:26.064242 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="928ad409-872d-4979-9820-fec3cae3cfba" containerName="proxy-httpd" Dec 11 08:40:26 crc kubenswrapper[4881]: I1211 08:40:26.064256 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="82b87f02-5631-4cbe-be7a-22cf0e321e23" containerName="mariadb-database-create" Dec 11 08:40:26 crc kubenswrapper[4881]: I1211 08:40:26.064267 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="928ad409-872d-4979-9820-fec3cae3cfba" containerName="sg-core" Dec 11 08:40:26 crc kubenswrapper[4881]: I1211 08:40:26.064281 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="928ad409-872d-4979-9820-fec3cae3cfba" containerName="ceilometer-notification-agent" Dec 11 08:40:26 crc kubenswrapper[4881]: I1211 08:40:26.066476 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 11 08:40:26 crc kubenswrapper[4881]: I1211 08:40:26.076930 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Dec 11 08:40:26 crc kubenswrapper[4881]: I1211 08:40:26.078504 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Dec 11 08:40:26 crc kubenswrapper[4881]: I1211 08:40:26.082901 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Dec 11 08:40:26 crc kubenswrapper[4881]: I1211 08:40:26.085461 4881 scope.go:117] "RemoveContainer" containerID="f90ae80a51f7fba5d13fbe54303d1bcbd1c7da8cc5895b8137e29e04c4905352" Dec 11 08:40:26 crc kubenswrapper[4881]: E1211 08:40:26.085970 4881 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f90ae80a51f7fba5d13fbe54303d1bcbd1c7da8cc5895b8137e29e04c4905352\": container with ID starting with f90ae80a51f7fba5d13fbe54303d1bcbd1c7da8cc5895b8137e29e04c4905352 not found: ID does not exist" containerID="f90ae80a51f7fba5d13fbe54303d1bcbd1c7da8cc5895b8137e29e04c4905352" Dec 11 08:40:26 crc kubenswrapper[4881]: I1211 08:40:26.086007 4881 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f90ae80a51f7fba5d13fbe54303d1bcbd1c7da8cc5895b8137e29e04c4905352"} err="failed to get container status \"f90ae80a51f7fba5d13fbe54303d1bcbd1c7da8cc5895b8137e29e04c4905352\": rpc error: code = NotFound desc = could not find container \"f90ae80a51f7fba5d13fbe54303d1bcbd1c7da8cc5895b8137e29e04c4905352\": container with ID starting with f90ae80a51f7fba5d13fbe54303d1bcbd1c7da8cc5895b8137e29e04c4905352 not found: ID does not exist" Dec 11 08:40:26 crc kubenswrapper[4881]: I1211 08:40:26.086035 4881 scope.go:117] "RemoveContainer" containerID="434d31838e5479f57a2538c65aa1c2ca1b2f297976f5bdc74f5deae3db6b626a" Dec 11 08:40:26 crc kubenswrapper[4881]: E1211 08:40:26.086378 4881 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"434d31838e5479f57a2538c65aa1c2ca1b2f297976f5bdc74f5deae3db6b626a\": container with ID starting with 434d31838e5479f57a2538c65aa1c2ca1b2f297976f5bdc74f5deae3db6b626a not found: ID does not exist" containerID="434d31838e5479f57a2538c65aa1c2ca1b2f297976f5bdc74f5deae3db6b626a" Dec 11 08:40:26 crc kubenswrapper[4881]: I1211 08:40:26.086408 4881 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"434d31838e5479f57a2538c65aa1c2ca1b2f297976f5bdc74f5deae3db6b626a"} err="failed to get container status \"434d31838e5479f57a2538c65aa1c2ca1b2f297976f5bdc74f5deae3db6b626a\": rpc error: code = NotFound desc = could not find container \"434d31838e5479f57a2538c65aa1c2ca1b2f297976f5bdc74f5deae3db6b626a\": container with ID starting with 434d31838e5479f57a2538c65aa1c2ca1b2f297976f5bdc74f5deae3db6b626a not found: ID does not exist" Dec 11 08:40:26 crc kubenswrapper[4881]: I1211 08:40:26.086423 4881 scope.go:117] "RemoveContainer" containerID="a2ebc98b836800fb91752a65af0489007a962443b301cf909dea997cbe413bdb" Dec 11 08:40:26 crc kubenswrapper[4881]: E1211 08:40:26.086828 4881 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a2ebc98b836800fb91752a65af0489007a962443b301cf909dea997cbe413bdb\": container with ID starting with a2ebc98b836800fb91752a65af0489007a962443b301cf909dea997cbe413bdb not found: ID does not exist" containerID="a2ebc98b836800fb91752a65af0489007a962443b301cf909dea997cbe413bdb" Dec 11 08:40:26 crc kubenswrapper[4881]: I1211 08:40:26.086849 4881 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a2ebc98b836800fb91752a65af0489007a962443b301cf909dea997cbe413bdb"} err="failed to get container status \"a2ebc98b836800fb91752a65af0489007a962443b301cf909dea997cbe413bdb\": rpc error: code = NotFound desc = could not find container \"a2ebc98b836800fb91752a65af0489007a962443b301cf909dea997cbe413bdb\": container with ID starting with a2ebc98b836800fb91752a65af0489007a962443b301cf909dea997cbe413bdb not found: ID does not exist" Dec 11 08:40:26 crc kubenswrapper[4881]: I1211 08:40:26.086863 4881 scope.go:117] "RemoveContainer" containerID="7c7e4f080d155180b13a369f67aa38793a6dae9326686df5a7ed96024f29cfb9" Dec 11 08:40:26 crc kubenswrapper[4881]: E1211 08:40:26.087170 4881 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7c7e4f080d155180b13a369f67aa38793a6dae9326686df5a7ed96024f29cfb9\": container with ID starting with 7c7e4f080d155180b13a369f67aa38793a6dae9326686df5a7ed96024f29cfb9 not found: ID does not exist" containerID="7c7e4f080d155180b13a369f67aa38793a6dae9326686df5a7ed96024f29cfb9" Dec 11 08:40:26 crc kubenswrapper[4881]: I1211 08:40:26.087189 4881 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7c7e4f080d155180b13a369f67aa38793a6dae9326686df5a7ed96024f29cfb9"} err="failed to get container status \"7c7e4f080d155180b13a369f67aa38793a6dae9326686df5a7ed96024f29cfb9\": rpc error: code = NotFound desc = could not find container \"7c7e4f080d155180b13a369f67aa38793a6dae9326686df5a7ed96024f29cfb9\": container with ID starting with 7c7e4f080d155180b13a369f67aa38793a6dae9326686df5a7ed96024f29cfb9 not found: ID does not exist" Dec 11 08:40:26 crc kubenswrapper[4881]: I1211 08:40:26.250273 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7c22dd13-8a15-412b-a351-851d91a763ba-scripts\") pod \"ceilometer-0\" (UID: \"7c22dd13-8a15-412b-a351-851d91a763ba\") " pod="openstack/ceilometer-0" Dec 11 08:40:26 crc kubenswrapper[4881]: I1211 08:40:26.250385 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7c22dd13-8a15-412b-a351-851d91a763ba-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"7c22dd13-8a15-412b-a351-851d91a763ba\") " pod="openstack/ceilometer-0" Dec 11 08:40:26 crc kubenswrapper[4881]: I1211 08:40:26.250778 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qc66q\" (UniqueName: \"kubernetes.io/projected/7c22dd13-8a15-412b-a351-851d91a763ba-kube-api-access-qc66q\") pod \"ceilometer-0\" (UID: \"7c22dd13-8a15-412b-a351-851d91a763ba\") " pod="openstack/ceilometer-0" Dec 11 08:40:26 crc kubenswrapper[4881]: I1211 08:40:26.250874 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7c22dd13-8a15-412b-a351-851d91a763ba-config-data\") pod \"ceilometer-0\" (UID: \"7c22dd13-8a15-412b-a351-851d91a763ba\") " pod="openstack/ceilometer-0" Dec 11 08:40:26 crc kubenswrapper[4881]: I1211 08:40:26.250962 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/7c22dd13-8a15-412b-a351-851d91a763ba-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"7c22dd13-8a15-412b-a351-851d91a763ba\") " pod="openstack/ceilometer-0" Dec 11 08:40:26 crc kubenswrapper[4881]: I1211 08:40:26.251014 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7c22dd13-8a15-412b-a351-851d91a763ba-run-httpd\") pod \"ceilometer-0\" (UID: \"7c22dd13-8a15-412b-a351-851d91a763ba\") " pod="openstack/ceilometer-0" Dec 11 08:40:26 crc kubenswrapper[4881]: I1211 08:40:26.251102 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7c22dd13-8a15-412b-a351-851d91a763ba-log-httpd\") pod \"ceilometer-0\" (UID: \"7c22dd13-8a15-412b-a351-851d91a763ba\") " pod="openstack/ceilometer-0" Dec 11 08:40:26 crc kubenswrapper[4881]: I1211 08:40:26.353146 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7c22dd13-8a15-412b-a351-851d91a763ba-scripts\") pod \"ceilometer-0\" (UID: \"7c22dd13-8a15-412b-a351-851d91a763ba\") " pod="openstack/ceilometer-0" Dec 11 08:40:26 crc kubenswrapper[4881]: I1211 08:40:26.353231 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7c22dd13-8a15-412b-a351-851d91a763ba-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"7c22dd13-8a15-412b-a351-851d91a763ba\") " pod="openstack/ceilometer-0" Dec 11 08:40:26 crc kubenswrapper[4881]: I1211 08:40:26.353355 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qc66q\" (UniqueName: \"kubernetes.io/projected/7c22dd13-8a15-412b-a351-851d91a763ba-kube-api-access-qc66q\") pod \"ceilometer-0\" (UID: \"7c22dd13-8a15-412b-a351-851d91a763ba\") " pod="openstack/ceilometer-0" Dec 11 08:40:26 crc kubenswrapper[4881]: I1211 08:40:26.353403 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7c22dd13-8a15-412b-a351-851d91a763ba-config-data\") pod \"ceilometer-0\" (UID: \"7c22dd13-8a15-412b-a351-851d91a763ba\") " pod="openstack/ceilometer-0" Dec 11 08:40:26 crc kubenswrapper[4881]: I1211 08:40:26.353447 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/7c22dd13-8a15-412b-a351-851d91a763ba-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"7c22dd13-8a15-412b-a351-851d91a763ba\") " pod="openstack/ceilometer-0" Dec 11 08:40:26 crc kubenswrapper[4881]: I1211 08:40:26.353549 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7c22dd13-8a15-412b-a351-851d91a763ba-run-httpd\") pod \"ceilometer-0\" (UID: \"7c22dd13-8a15-412b-a351-851d91a763ba\") " pod="openstack/ceilometer-0" Dec 11 08:40:26 crc kubenswrapper[4881]: I1211 08:40:26.353602 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7c22dd13-8a15-412b-a351-851d91a763ba-log-httpd\") pod \"ceilometer-0\" (UID: \"7c22dd13-8a15-412b-a351-851d91a763ba\") " pod="openstack/ceilometer-0" Dec 11 08:40:26 crc kubenswrapper[4881]: I1211 08:40:26.354104 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7c22dd13-8a15-412b-a351-851d91a763ba-run-httpd\") pod \"ceilometer-0\" (UID: \"7c22dd13-8a15-412b-a351-851d91a763ba\") " pod="openstack/ceilometer-0" Dec 11 08:40:26 crc kubenswrapper[4881]: I1211 08:40:26.356289 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7c22dd13-8a15-412b-a351-851d91a763ba-log-httpd\") pod \"ceilometer-0\" (UID: \"7c22dd13-8a15-412b-a351-851d91a763ba\") " pod="openstack/ceilometer-0" Dec 11 08:40:26 crc kubenswrapper[4881]: I1211 08:40:26.357121 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/7c22dd13-8a15-412b-a351-851d91a763ba-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"7c22dd13-8a15-412b-a351-851d91a763ba\") " pod="openstack/ceilometer-0" Dec 11 08:40:26 crc kubenswrapper[4881]: I1211 08:40:26.358145 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7c22dd13-8a15-412b-a351-851d91a763ba-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"7c22dd13-8a15-412b-a351-851d91a763ba\") " pod="openstack/ceilometer-0" Dec 11 08:40:26 crc kubenswrapper[4881]: I1211 08:40:26.358649 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7c22dd13-8a15-412b-a351-851d91a763ba-config-data\") pod \"ceilometer-0\" (UID: \"7c22dd13-8a15-412b-a351-851d91a763ba\") " pod="openstack/ceilometer-0" Dec 11 08:40:26 crc kubenswrapper[4881]: I1211 08:40:26.368627 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7c22dd13-8a15-412b-a351-851d91a763ba-scripts\") pod \"ceilometer-0\" (UID: \"7c22dd13-8a15-412b-a351-851d91a763ba\") " pod="openstack/ceilometer-0" Dec 11 08:40:26 crc kubenswrapper[4881]: I1211 08:40:26.372979 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qc66q\" (UniqueName: \"kubernetes.io/projected/7c22dd13-8a15-412b-a351-851d91a763ba-kube-api-access-qc66q\") pod \"ceilometer-0\" (UID: \"7c22dd13-8a15-412b-a351-851d91a763ba\") " pod="openstack/ceilometer-0" Dec 11 08:40:26 crc kubenswrapper[4881]: I1211 08:40:26.401805 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 11 08:40:26 crc kubenswrapper[4881]: I1211 08:40:26.915148 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Dec 11 08:40:27 crc kubenswrapper[4881]: I1211 08:40:27.020087 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="928ad409-872d-4979-9820-fec3cae3cfba" path="/var/lib/kubelet/pods/928ad409-872d-4979-9820-fec3cae3cfba/volumes" Dec 11 08:40:27 crc kubenswrapper[4881]: I1211 08:40:27.021224 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7c22dd13-8a15-412b-a351-851d91a763ba","Type":"ContainerStarted","Data":"1e60923b8a101ca74eb8a0796f23df21a0eaf6ec68c345252198c77716e9cd23"} Dec 11 08:40:32 crc kubenswrapper[4881]: I1211 08:40:32.704080 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/aodh-3e52-account-create-82sqk"] Dec 11 08:40:32 crc kubenswrapper[4881]: I1211 08:40:32.706400 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-3e52-account-create-82sqk" Dec 11 08:40:32 crc kubenswrapper[4881]: I1211 08:40:32.708782 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-db-secret" Dec 11 08:40:32 crc kubenswrapper[4881]: I1211 08:40:32.725676 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-3e52-account-create-82sqk"] Dec 11 08:40:32 crc kubenswrapper[4881]: I1211 08:40:32.812467 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v4kw6\" (UniqueName: \"kubernetes.io/projected/04df1111-8ddd-4102-9ab8-103767f09410-kube-api-access-v4kw6\") pod \"aodh-3e52-account-create-82sqk\" (UID: \"04df1111-8ddd-4102-9ab8-103767f09410\") " pod="openstack/aodh-3e52-account-create-82sqk" Dec 11 08:40:32 crc kubenswrapper[4881]: I1211 08:40:32.914857 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v4kw6\" (UniqueName: \"kubernetes.io/projected/04df1111-8ddd-4102-9ab8-103767f09410-kube-api-access-v4kw6\") pod \"aodh-3e52-account-create-82sqk\" (UID: \"04df1111-8ddd-4102-9ab8-103767f09410\") " pod="openstack/aodh-3e52-account-create-82sqk" Dec 11 08:40:32 crc kubenswrapper[4881]: I1211 08:40:32.936871 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v4kw6\" (UniqueName: \"kubernetes.io/projected/04df1111-8ddd-4102-9ab8-103767f09410-kube-api-access-v4kw6\") pod \"aodh-3e52-account-create-82sqk\" (UID: \"04df1111-8ddd-4102-9ab8-103767f09410\") " pod="openstack/aodh-3e52-account-create-82sqk" Dec 11 08:40:33 crc kubenswrapper[4881]: I1211 08:40:33.033478 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-3e52-account-create-82sqk" Dec 11 08:40:33 crc kubenswrapper[4881]: I1211 08:40:33.545493 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-3e52-account-create-82sqk"] Dec 11 08:40:34 crc kubenswrapper[4881]: I1211 08:40:34.111868 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-3e52-account-create-82sqk" event={"ID":"04df1111-8ddd-4102-9ab8-103767f09410","Type":"ContainerStarted","Data":"4f6ff7738d68f7848ce185b77c8dd6b81634f3f43ebca3007990f2d203d86fb2"} Dec 11 08:40:34 crc kubenswrapper[4881]: I1211 08:40:34.113096 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-3e52-account-create-82sqk" event={"ID":"04df1111-8ddd-4102-9ab8-103767f09410","Type":"ContainerStarted","Data":"97a39810db342c485a6f0668710f0ed9ead3b257891d4c77001e77c3b26f2ac8"} Dec 11 08:40:34 crc kubenswrapper[4881]: I1211 08:40:34.116190 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7c22dd13-8a15-412b-a351-851d91a763ba","Type":"ContainerStarted","Data":"16efdf8e4070fcffa5289512b3f3b3d42643ad9087c9ee2d8117b428fe2aa04f"} Dec 11 08:40:37 crc kubenswrapper[4881]: I1211 08:40:37.149552 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7c22dd13-8a15-412b-a351-851d91a763ba","Type":"ContainerStarted","Data":"2a3a1064d42f444ae15ac25f5dbf69b384a98da7dd40d5787d27be2c49036beb"} Dec 11 08:40:37 crc kubenswrapper[4881]: I1211 08:40:37.151791 4881 generic.go:334] "Generic (PLEG): container finished" podID="04df1111-8ddd-4102-9ab8-103767f09410" containerID="4f6ff7738d68f7848ce185b77c8dd6b81634f3f43ebca3007990f2d203d86fb2" exitCode=0 Dec 11 08:40:37 crc kubenswrapper[4881]: I1211 08:40:37.151833 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-3e52-account-create-82sqk" event={"ID":"04df1111-8ddd-4102-9ab8-103767f09410","Type":"ContainerDied","Data":"4f6ff7738d68f7848ce185b77c8dd6b81634f3f43ebca3007990f2d203d86fb2"} Dec 11 08:40:38 crc kubenswrapper[4881]: I1211 08:40:38.704240 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-3e52-account-create-82sqk" Dec 11 08:40:38 crc kubenswrapper[4881]: I1211 08:40:38.892410 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v4kw6\" (UniqueName: \"kubernetes.io/projected/04df1111-8ddd-4102-9ab8-103767f09410-kube-api-access-v4kw6\") pod \"04df1111-8ddd-4102-9ab8-103767f09410\" (UID: \"04df1111-8ddd-4102-9ab8-103767f09410\") " Dec 11 08:40:38 crc kubenswrapper[4881]: I1211 08:40:38.897374 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/04df1111-8ddd-4102-9ab8-103767f09410-kube-api-access-v4kw6" (OuterVolumeSpecName: "kube-api-access-v4kw6") pod "04df1111-8ddd-4102-9ab8-103767f09410" (UID: "04df1111-8ddd-4102-9ab8-103767f09410"). InnerVolumeSpecName "kube-api-access-v4kw6". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:40:38 crc kubenswrapper[4881]: I1211 08:40:38.996865 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v4kw6\" (UniqueName: \"kubernetes.io/projected/04df1111-8ddd-4102-9ab8-103767f09410-kube-api-access-v4kw6\") on node \"crc\" DevicePath \"\"" Dec 11 08:40:39 crc kubenswrapper[4881]: I1211 08:40:39.178765 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7c22dd13-8a15-412b-a351-851d91a763ba","Type":"ContainerStarted","Data":"b5ff8cc8738894863ae31d50bce383dc0ccfab6019c4feec2a216b9dad415a4c"} Dec 11 08:40:39 crc kubenswrapper[4881]: I1211 08:40:39.180884 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-3e52-account-create-82sqk" event={"ID":"04df1111-8ddd-4102-9ab8-103767f09410","Type":"ContainerDied","Data":"97a39810db342c485a6f0668710f0ed9ead3b257891d4c77001e77c3b26f2ac8"} Dec 11 08:40:39 crc kubenswrapper[4881]: I1211 08:40:39.180930 4881 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="97a39810db342c485a6f0668710f0ed9ead3b257891d4c77001e77c3b26f2ac8" Dec 11 08:40:39 crc kubenswrapper[4881]: I1211 08:40:39.180957 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-3e52-account-create-82sqk" Dec 11 08:40:41 crc kubenswrapper[4881]: I1211 08:40:41.205998 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7c22dd13-8a15-412b-a351-851d91a763ba","Type":"ContainerStarted","Data":"526b94cabc1ab5f42375c717f3f78a7d65fcb4c9648f3d8d19d27530ff115210"} Dec 11 08:40:41 crc kubenswrapper[4881]: I1211 08:40:41.206601 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Dec 11 08:40:41 crc kubenswrapper[4881]: I1211 08:40:41.233565 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=1.94914788 podStartE2EDuration="15.233547783s" podCreationTimestamp="2025-12-11 08:40:26 +0000 UTC" firstStartedPulling="2025-12-11 08:40:26.918586033 +0000 UTC m=+1475.295954730" lastFinishedPulling="2025-12-11 08:40:40.202985926 +0000 UTC m=+1488.580354633" observedRunningTime="2025-12-11 08:40:41.222895104 +0000 UTC m=+1489.600263821" watchObservedRunningTime="2025-12-11 08:40:41.233547783 +0000 UTC m=+1489.610916480" Dec 11 08:40:42 crc kubenswrapper[4881]: I1211 08:40:42.051176 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-vk695"] Dec 11 08:40:42 crc kubenswrapper[4881]: E1211 08:40:42.052352 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="04df1111-8ddd-4102-9ab8-103767f09410" containerName="mariadb-account-create" Dec 11 08:40:42 crc kubenswrapper[4881]: I1211 08:40:42.052455 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="04df1111-8ddd-4102-9ab8-103767f09410" containerName="mariadb-account-create" Dec 11 08:40:42 crc kubenswrapper[4881]: I1211 08:40:42.052838 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="04df1111-8ddd-4102-9ab8-103767f09410" containerName="mariadb-account-create" Dec 11 08:40:42 crc kubenswrapper[4881]: I1211 08:40:42.055150 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-vk695" Dec 11 08:40:42 crc kubenswrapper[4881]: I1211 08:40:42.075462 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-vk695"] Dec 11 08:40:42 crc kubenswrapper[4881]: I1211 08:40:42.176175 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/324526c9-d17c-4dfd-b2f7-b86e9577c36c-catalog-content\") pod \"redhat-operators-vk695\" (UID: \"324526c9-d17c-4dfd-b2f7-b86e9577c36c\") " pod="openshift-marketplace/redhat-operators-vk695" Dec 11 08:40:42 crc kubenswrapper[4881]: I1211 08:40:42.176360 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/324526c9-d17c-4dfd-b2f7-b86e9577c36c-utilities\") pod \"redhat-operators-vk695\" (UID: \"324526c9-d17c-4dfd-b2f7-b86e9577c36c\") " pod="openshift-marketplace/redhat-operators-vk695" Dec 11 08:40:42 crc kubenswrapper[4881]: I1211 08:40:42.176662 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p8k7d\" (UniqueName: \"kubernetes.io/projected/324526c9-d17c-4dfd-b2f7-b86e9577c36c-kube-api-access-p8k7d\") pod \"redhat-operators-vk695\" (UID: \"324526c9-d17c-4dfd-b2f7-b86e9577c36c\") " pod="openshift-marketplace/redhat-operators-vk695" Dec 11 08:40:42 crc kubenswrapper[4881]: I1211 08:40:42.278685 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/324526c9-d17c-4dfd-b2f7-b86e9577c36c-catalog-content\") pod \"redhat-operators-vk695\" (UID: \"324526c9-d17c-4dfd-b2f7-b86e9577c36c\") " pod="openshift-marketplace/redhat-operators-vk695" Dec 11 08:40:42 crc kubenswrapper[4881]: I1211 08:40:42.278781 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/324526c9-d17c-4dfd-b2f7-b86e9577c36c-utilities\") pod \"redhat-operators-vk695\" (UID: \"324526c9-d17c-4dfd-b2f7-b86e9577c36c\") " pod="openshift-marketplace/redhat-operators-vk695" Dec 11 08:40:42 crc kubenswrapper[4881]: I1211 08:40:42.278823 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p8k7d\" (UniqueName: \"kubernetes.io/projected/324526c9-d17c-4dfd-b2f7-b86e9577c36c-kube-api-access-p8k7d\") pod \"redhat-operators-vk695\" (UID: \"324526c9-d17c-4dfd-b2f7-b86e9577c36c\") " pod="openshift-marketplace/redhat-operators-vk695" Dec 11 08:40:42 crc kubenswrapper[4881]: I1211 08:40:42.279192 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/324526c9-d17c-4dfd-b2f7-b86e9577c36c-catalog-content\") pod \"redhat-operators-vk695\" (UID: \"324526c9-d17c-4dfd-b2f7-b86e9577c36c\") " pod="openshift-marketplace/redhat-operators-vk695" Dec 11 08:40:42 crc kubenswrapper[4881]: I1211 08:40:42.279503 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/324526c9-d17c-4dfd-b2f7-b86e9577c36c-utilities\") pod \"redhat-operators-vk695\" (UID: \"324526c9-d17c-4dfd-b2f7-b86e9577c36c\") " pod="openshift-marketplace/redhat-operators-vk695" Dec 11 08:40:42 crc kubenswrapper[4881]: I1211 08:40:42.303239 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p8k7d\" (UniqueName: \"kubernetes.io/projected/324526c9-d17c-4dfd-b2f7-b86e9577c36c-kube-api-access-p8k7d\") pod \"redhat-operators-vk695\" (UID: \"324526c9-d17c-4dfd-b2f7-b86e9577c36c\") " pod="openshift-marketplace/redhat-operators-vk695" Dec 11 08:40:42 crc kubenswrapper[4881]: I1211 08:40:42.449619 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-vk695" Dec 11 08:40:42 crc kubenswrapper[4881]: I1211 08:40:42.952986 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-vk695"] Dec 11 08:40:43 crc kubenswrapper[4881]: I1211 08:40:43.076319 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/aodh-db-sync-tt4br"] Dec 11 08:40:43 crc kubenswrapper[4881]: I1211 08:40:43.087173 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-sync-tt4br" Dec 11 08:40:43 crc kubenswrapper[4881]: I1211 08:40:43.091570 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-config-data" Dec 11 08:40:43 crc kubenswrapper[4881]: I1211 08:40:43.091790 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-scripts" Dec 11 08:40:43 crc kubenswrapper[4881]: I1211 08:40:43.091963 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"telemetry-autoscaling-dockercfg-gqtm2" Dec 11 08:40:43 crc kubenswrapper[4881]: I1211 08:40:43.097136 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-db-sync-tt4br"] Dec 11 08:40:43 crc kubenswrapper[4881]: I1211 08:40:43.201145 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/67c4b27a-5352-4006-9944-6de8dc05d3d1-scripts\") pod \"aodh-db-sync-tt4br\" (UID: \"67c4b27a-5352-4006-9944-6de8dc05d3d1\") " pod="openstack/aodh-db-sync-tt4br" Dec 11 08:40:43 crc kubenswrapper[4881]: I1211 08:40:43.201303 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/67c4b27a-5352-4006-9944-6de8dc05d3d1-config-data\") pod \"aodh-db-sync-tt4br\" (UID: \"67c4b27a-5352-4006-9944-6de8dc05d3d1\") " pod="openstack/aodh-db-sync-tt4br" Dec 11 08:40:43 crc kubenswrapper[4881]: I1211 08:40:43.201355 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/67c4b27a-5352-4006-9944-6de8dc05d3d1-combined-ca-bundle\") pod \"aodh-db-sync-tt4br\" (UID: \"67c4b27a-5352-4006-9944-6de8dc05d3d1\") " pod="openstack/aodh-db-sync-tt4br" Dec 11 08:40:43 crc kubenswrapper[4881]: I1211 08:40:43.201452 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6qgbh\" (UniqueName: \"kubernetes.io/projected/67c4b27a-5352-4006-9944-6de8dc05d3d1-kube-api-access-6qgbh\") pod \"aodh-db-sync-tt4br\" (UID: \"67c4b27a-5352-4006-9944-6de8dc05d3d1\") " pod="openstack/aodh-db-sync-tt4br" Dec 11 08:40:43 crc kubenswrapper[4881]: I1211 08:40:43.235052 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-vk695" event={"ID":"324526c9-d17c-4dfd-b2f7-b86e9577c36c","Type":"ContainerStarted","Data":"1257b08e4188c1026c60fa90aa8dfd41d01ae08ea1530a7ae64a6c10d85fc1d4"} Dec 11 08:40:43 crc kubenswrapper[4881]: I1211 08:40:43.303512 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/67c4b27a-5352-4006-9944-6de8dc05d3d1-scripts\") pod \"aodh-db-sync-tt4br\" (UID: \"67c4b27a-5352-4006-9944-6de8dc05d3d1\") " pod="openstack/aodh-db-sync-tt4br" Dec 11 08:40:43 crc kubenswrapper[4881]: I1211 08:40:43.303639 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/67c4b27a-5352-4006-9944-6de8dc05d3d1-config-data\") pod \"aodh-db-sync-tt4br\" (UID: \"67c4b27a-5352-4006-9944-6de8dc05d3d1\") " pod="openstack/aodh-db-sync-tt4br" Dec 11 08:40:43 crc kubenswrapper[4881]: I1211 08:40:43.303665 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/67c4b27a-5352-4006-9944-6de8dc05d3d1-combined-ca-bundle\") pod \"aodh-db-sync-tt4br\" (UID: \"67c4b27a-5352-4006-9944-6de8dc05d3d1\") " pod="openstack/aodh-db-sync-tt4br" Dec 11 08:40:43 crc kubenswrapper[4881]: I1211 08:40:43.303741 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6qgbh\" (UniqueName: \"kubernetes.io/projected/67c4b27a-5352-4006-9944-6de8dc05d3d1-kube-api-access-6qgbh\") pod \"aodh-db-sync-tt4br\" (UID: \"67c4b27a-5352-4006-9944-6de8dc05d3d1\") " pod="openstack/aodh-db-sync-tt4br" Dec 11 08:40:43 crc kubenswrapper[4881]: I1211 08:40:43.310825 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/67c4b27a-5352-4006-9944-6de8dc05d3d1-combined-ca-bundle\") pod \"aodh-db-sync-tt4br\" (UID: \"67c4b27a-5352-4006-9944-6de8dc05d3d1\") " pod="openstack/aodh-db-sync-tt4br" Dec 11 08:40:43 crc kubenswrapper[4881]: I1211 08:40:43.311112 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/67c4b27a-5352-4006-9944-6de8dc05d3d1-config-data\") pod \"aodh-db-sync-tt4br\" (UID: \"67c4b27a-5352-4006-9944-6de8dc05d3d1\") " pod="openstack/aodh-db-sync-tt4br" Dec 11 08:40:43 crc kubenswrapper[4881]: I1211 08:40:43.316260 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/67c4b27a-5352-4006-9944-6de8dc05d3d1-scripts\") pod \"aodh-db-sync-tt4br\" (UID: \"67c4b27a-5352-4006-9944-6de8dc05d3d1\") " pod="openstack/aodh-db-sync-tt4br" Dec 11 08:40:43 crc kubenswrapper[4881]: I1211 08:40:43.326912 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6qgbh\" (UniqueName: \"kubernetes.io/projected/67c4b27a-5352-4006-9944-6de8dc05d3d1-kube-api-access-6qgbh\") pod \"aodh-db-sync-tt4br\" (UID: \"67c4b27a-5352-4006-9944-6de8dc05d3d1\") " pod="openstack/aodh-db-sync-tt4br" Dec 11 08:40:43 crc kubenswrapper[4881]: I1211 08:40:43.433179 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-sync-tt4br" Dec 11 08:40:43 crc kubenswrapper[4881]: I1211 08:40:43.926033 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-db-sync-tt4br"] Dec 11 08:40:43 crc kubenswrapper[4881]: W1211 08:40:43.940149 4881 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod67c4b27a_5352_4006_9944_6de8dc05d3d1.slice/crio-393e637041ef805584aca75634d58da67bd74f25246ad16b2040a7c305d3ce35 WatchSource:0}: Error finding container 393e637041ef805584aca75634d58da67bd74f25246ad16b2040a7c305d3ce35: Status 404 returned error can't find the container with id 393e637041ef805584aca75634d58da67bd74f25246ad16b2040a7c305d3ce35 Dec 11 08:40:44 crc kubenswrapper[4881]: I1211 08:40:44.263939 4881 generic.go:334] "Generic (PLEG): container finished" podID="87e2a6fa-8d80-4bde-9f33-fe0d4f34d933" containerID="4aad9c5989f63736bb1737df392cb932ad9795365978ce16a585b9700510a0fa" exitCode=0 Dec 11 08:40:44 crc kubenswrapper[4881]: I1211 08:40:44.264029 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-6jtqx" event={"ID":"87e2a6fa-8d80-4bde-9f33-fe0d4f34d933","Type":"ContainerDied","Data":"4aad9c5989f63736bb1737df392cb932ad9795365978ce16a585b9700510a0fa"} Dec 11 08:40:44 crc kubenswrapper[4881]: I1211 08:40:44.265754 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-sync-tt4br" event={"ID":"67c4b27a-5352-4006-9944-6de8dc05d3d1","Type":"ContainerStarted","Data":"393e637041ef805584aca75634d58da67bd74f25246ad16b2040a7c305d3ce35"} Dec 11 08:40:45 crc kubenswrapper[4881]: I1211 08:40:45.716994 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-6jtqx" Dec 11 08:40:45 crc kubenswrapper[4881]: I1211 08:40:45.860913 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-l8q7f\" (UniqueName: \"kubernetes.io/projected/87e2a6fa-8d80-4bde-9f33-fe0d4f34d933-kube-api-access-l8q7f\") pod \"87e2a6fa-8d80-4bde-9f33-fe0d4f34d933\" (UID: \"87e2a6fa-8d80-4bde-9f33-fe0d4f34d933\") " Dec 11 08:40:45 crc kubenswrapper[4881]: I1211 08:40:45.861022 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/87e2a6fa-8d80-4bde-9f33-fe0d4f34d933-scripts\") pod \"87e2a6fa-8d80-4bde-9f33-fe0d4f34d933\" (UID: \"87e2a6fa-8d80-4bde-9f33-fe0d4f34d933\") " Dec 11 08:40:45 crc kubenswrapper[4881]: I1211 08:40:45.861114 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/87e2a6fa-8d80-4bde-9f33-fe0d4f34d933-combined-ca-bundle\") pod \"87e2a6fa-8d80-4bde-9f33-fe0d4f34d933\" (UID: \"87e2a6fa-8d80-4bde-9f33-fe0d4f34d933\") " Dec 11 08:40:45 crc kubenswrapper[4881]: I1211 08:40:45.861264 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/87e2a6fa-8d80-4bde-9f33-fe0d4f34d933-config-data\") pod \"87e2a6fa-8d80-4bde-9f33-fe0d4f34d933\" (UID: \"87e2a6fa-8d80-4bde-9f33-fe0d4f34d933\") " Dec 11 08:40:45 crc kubenswrapper[4881]: I1211 08:40:45.868629 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/87e2a6fa-8d80-4bde-9f33-fe0d4f34d933-scripts" (OuterVolumeSpecName: "scripts") pod "87e2a6fa-8d80-4bde-9f33-fe0d4f34d933" (UID: "87e2a6fa-8d80-4bde-9f33-fe0d4f34d933"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:40:45 crc kubenswrapper[4881]: I1211 08:40:45.873698 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/87e2a6fa-8d80-4bde-9f33-fe0d4f34d933-kube-api-access-l8q7f" (OuterVolumeSpecName: "kube-api-access-l8q7f") pod "87e2a6fa-8d80-4bde-9f33-fe0d4f34d933" (UID: "87e2a6fa-8d80-4bde-9f33-fe0d4f34d933"). InnerVolumeSpecName "kube-api-access-l8q7f". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:40:45 crc kubenswrapper[4881]: I1211 08:40:45.907942 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/87e2a6fa-8d80-4bde-9f33-fe0d4f34d933-config-data" (OuterVolumeSpecName: "config-data") pod "87e2a6fa-8d80-4bde-9f33-fe0d4f34d933" (UID: "87e2a6fa-8d80-4bde-9f33-fe0d4f34d933"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:40:45 crc kubenswrapper[4881]: I1211 08:40:45.926430 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/87e2a6fa-8d80-4bde-9f33-fe0d4f34d933-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "87e2a6fa-8d80-4bde-9f33-fe0d4f34d933" (UID: "87e2a6fa-8d80-4bde-9f33-fe0d4f34d933"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:40:45 crc kubenswrapper[4881]: I1211 08:40:45.963954 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-l8q7f\" (UniqueName: \"kubernetes.io/projected/87e2a6fa-8d80-4bde-9f33-fe0d4f34d933-kube-api-access-l8q7f\") on node \"crc\" DevicePath \"\"" Dec 11 08:40:45 crc kubenswrapper[4881]: I1211 08:40:45.963993 4881 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/87e2a6fa-8d80-4bde-9f33-fe0d4f34d933-scripts\") on node \"crc\" DevicePath \"\"" Dec 11 08:40:45 crc kubenswrapper[4881]: I1211 08:40:45.964006 4881 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/87e2a6fa-8d80-4bde-9f33-fe0d4f34d933-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 11 08:40:45 crc kubenswrapper[4881]: I1211 08:40:45.964016 4881 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/87e2a6fa-8d80-4bde-9f33-fe0d4f34d933-config-data\") on node \"crc\" DevicePath \"\"" Dec 11 08:40:46 crc kubenswrapper[4881]: I1211 08:40:46.295817 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-6jtqx" event={"ID":"87e2a6fa-8d80-4bde-9f33-fe0d4f34d933","Type":"ContainerDied","Data":"88da7ccb69135e338fb6b7a9d3308c7bebe9a6add0253752440521396a2b6326"} Dec 11 08:40:46 crc kubenswrapper[4881]: I1211 08:40:46.295862 4881 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="88da7ccb69135e338fb6b7a9d3308c7bebe9a6add0253752440521396a2b6326" Dec 11 08:40:46 crc kubenswrapper[4881]: I1211 08:40:46.295914 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-6jtqx" Dec 11 08:40:52 crc kubenswrapper[4881]: I1211 08:40:52.742781 4881 trace.go:236] Trace[1560495968]: "Calculate volume metrics of logging-loki-ca-bundle for pod openshift-logging/logging-loki-querier-5895d59bb8-87hfb" (11-Dec-2025 08:40:44.923) (total time: 7819ms): Dec 11 08:40:52 crc kubenswrapper[4881]: Trace[1560495968]: [7.81965375s] [7.81965375s] END Dec 11 08:40:53 crc kubenswrapper[4881]: I1211 08:40:53.450644 4881 generic.go:334] "Generic (PLEG): container finished" podID="324526c9-d17c-4dfd-b2f7-b86e9577c36c" containerID="c90ce7cd8fcf126d46d59245f06190daa1830091da52fd5b1af057889d7e2442" exitCode=0 Dec 11 08:40:53 crc kubenswrapper[4881]: I1211 08:40:53.451055 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-vk695" event={"ID":"324526c9-d17c-4dfd-b2f7-b86e9577c36c","Type":"ContainerDied","Data":"c90ce7cd8fcf126d46d59245f06190daa1830091da52fd5b1af057889d7e2442"} Dec 11 08:40:53 crc kubenswrapper[4881]: I1211 08:40:53.972967 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-0"] Dec 11 08:40:53 crc kubenswrapper[4881]: E1211 08:40:53.975086 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="87e2a6fa-8d80-4bde-9f33-fe0d4f34d933" containerName="nova-cell0-conductor-db-sync" Dec 11 08:40:53 crc kubenswrapper[4881]: I1211 08:40:53.975457 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="87e2a6fa-8d80-4bde-9f33-fe0d4f34d933" containerName="nova-cell0-conductor-db-sync" Dec 11 08:40:53 crc kubenswrapper[4881]: I1211 08:40:53.976009 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="87e2a6fa-8d80-4bde-9f33-fe0d4f34d933" containerName="nova-cell0-conductor-db-sync" Dec 11 08:40:53 crc kubenswrapper[4881]: I1211 08:40:53.977514 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Dec 11 08:40:53 crc kubenswrapper[4881]: I1211 08:40:53.981199 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Dec 11 08:40:53 crc kubenswrapper[4881]: I1211 08:40:53.981893 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-vzncb" Dec 11 08:40:53 crc kubenswrapper[4881]: I1211 08:40:53.988250 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Dec 11 08:40:54 crc kubenswrapper[4881]: I1211 08:40:54.126321 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5bbd6724-7a9f-4aac-8ca7-199f8cba6223-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"5bbd6724-7a9f-4aac-8ca7-199f8cba6223\") " pod="openstack/nova-cell0-conductor-0" Dec 11 08:40:54 crc kubenswrapper[4881]: I1211 08:40:54.126694 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g9cg9\" (UniqueName: \"kubernetes.io/projected/5bbd6724-7a9f-4aac-8ca7-199f8cba6223-kube-api-access-g9cg9\") pod \"nova-cell0-conductor-0\" (UID: \"5bbd6724-7a9f-4aac-8ca7-199f8cba6223\") " pod="openstack/nova-cell0-conductor-0" Dec 11 08:40:54 crc kubenswrapper[4881]: I1211 08:40:54.127007 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5bbd6724-7a9f-4aac-8ca7-199f8cba6223-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"5bbd6724-7a9f-4aac-8ca7-199f8cba6223\") " pod="openstack/nova-cell0-conductor-0" Dec 11 08:40:54 crc kubenswrapper[4881]: I1211 08:40:54.229797 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5bbd6724-7a9f-4aac-8ca7-199f8cba6223-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"5bbd6724-7a9f-4aac-8ca7-199f8cba6223\") " pod="openstack/nova-cell0-conductor-0" Dec 11 08:40:54 crc kubenswrapper[4881]: I1211 08:40:54.230138 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5bbd6724-7a9f-4aac-8ca7-199f8cba6223-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"5bbd6724-7a9f-4aac-8ca7-199f8cba6223\") " pod="openstack/nova-cell0-conductor-0" Dec 11 08:40:54 crc kubenswrapper[4881]: I1211 08:40:54.230172 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g9cg9\" (UniqueName: \"kubernetes.io/projected/5bbd6724-7a9f-4aac-8ca7-199f8cba6223-kube-api-access-g9cg9\") pod \"nova-cell0-conductor-0\" (UID: \"5bbd6724-7a9f-4aac-8ca7-199f8cba6223\") " pod="openstack/nova-cell0-conductor-0" Dec 11 08:40:54 crc kubenswrapper[4881]: I1211 08:40:54.240859 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5bbd6724-7a9f-4aac-8ca7-199f8cba6223-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"5bbd6724-7a9f-4aac-8ca7-199f8cba6223\") " pod="openstack/nova-cell0-conductor-0" Dec 11 08:40:54 crc kubenswrapper[4881]: I1211 08:40:54.243975 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5bbd6724-7a9f-4aac-8ca7-199f8cba6223-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"5bbd6724-7a9f-4aac-8ca7-199f8cba6223\") " pod="openstack/nova-cell0-conductor-0" Dec 11 08:40:54 crc kubenswrapper[4881]: I1211 08:40:54.263692 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g9cg9\" (UniqueName: \"kubernetes.io/projected/5bbd6724-7a9f-4aac-8ca7-199f8cba6223-kube-api-access-g9cg9\") pod \"nova-cell0-conductor-0\" (UID: \"5bbd6724-7a9f-4aac-8ca7-199f8cba6223\") " pod="openstack/nova-cell0-conductor-0" Dec 11 08:40:54 crc kubenswrapper[4881]: I1211 08:40:54.307890 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Dec 11 08:40:55 crc kubenswrapper[4881]: I1211 08:40:55.064226 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Dec 11 08:40:55 crc kubenswrapper[4881]: W1211 08:40:55.076632 4881 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod5bbd6724_7a9f_4aac_8ca7_199f8cba6223.slice/crio-46aa157a0368b9bd489f26768a7f3dc041b316f106baa7acfc209b99ee12bbc0 WatchSource:0}: Error finding container 46aa157a0368b9bd489f26768a7f3dc041b316f106baa7acfc209b99ee12bbc0: Status 404 returned error can't find the container with id 46aa157a0368b9bd489f26768a7f3dc041b316f106baa7acfc209b99ee12bbc0 Dec 11 08:40:55 crc kubenswrapper[4881]: I1211 08:40:55.521680 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-vk695" event={"ID":"324526c9-d17c-4dfd-b2f7-b86e9577c36c","Type":"ContainerStarted","Data":"a957d9ac23a8d9794d75049cfcd6cd927636c635e317f85cbdb77490a1f2eb9b"} Dec 11 08:40:55 crc kubenswrapper[4881]: I1211 08:40:55.524715 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"5bbd6724-7a9f-4aac-8ca7-199f8cba6223","Type":"ContainerStarted","Data":"46aa157a0368b9bd489f26768a7f3dc041b316f106baa7acfc209b99ee12bbc0"} Dec 11 08:40:56 crc kubenswrapper[4881]: E1211 08:40:56.116123 4881 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod324526c9_d17c_4dfd_b2f7_b86e9577c36c.slice/crio-c90ce7cd8fcf126d46d59245f06190daa1830091da52fd5b1af057889d7e2442.scope\": RecentStats: unable to find data in memory cache]" Dec 11 08:40:56 crc kubenswrapper[4881]: I1211 08:40:56.455734 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Dec 11 08:40:56 crc kubenswrapper[4881]: I1211 08:40:56.539958 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"5bbd6724-7a9f-4aac-8ca7-199f8cba6223","Type":"ContainerStarted","Data":"83735a99237eb2b4dc979c5304e051c3110d8c7c212ad16d6b48816f760e0a37"} Dec 11 08:40:56 crc kubenswrapper[4881]: I1211 08:40:56.572977 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-0" podStartSLOduration=3.572957155 podStartE2EDuration="3.572957155s" podCreationTimestamp="2025-12-11 08:40:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 08:40:56.557113482 +0000 UTC m=+1504.934482179" watchObservedRunningTime="2025-12-11 08:40:56.572957155 +0000 UTC m=+1504.950325852" Dec 11 08:40:57 crc kubenswrapper[4881]: I1211 08:40:57.552978 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell0-conductor-0" Dec 11 08:40:58 crc kubenswrapper[4881]: I1211 08:40:58.283889 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-shvrh"] Dec 11 08:40:58 crc kubenswrapper[4881]: I1211 08:40:58.286374 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-shvrh" Dec 11 08:40:58 crc kubenswrapper[4881]: I1211 08:40:58.337413 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-shvrh"] Dec 11 08:40:58 crc kubenswrapper[4881]: I1211 08:40:58.466273 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4vt5s\" (UniqueName: \"kubernetes.io/projected/755c1a62-7945-4edf-9e40-90d8abfea7bd-kube-api-access-4vt5s\") pod \"redhat-marketplace-shvrh\" (UID: \"755c1a62-7945-4edf-9e40-90d8abfea7bd\") " pod="openshift-marketplace/redhat-marketplace-shvrh" Dec 11 08:40:58 crc kubenswrapper[4881]: I1211 08:40:58.466501 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/755c1a62-7945-4edf-9e40-90d8abfea7bd-utilities\") pod \"redhat-marketplace-shvrh\" (UID: \"755c1a62-7945-4edf-9e40-90d8abfea7bd\") " pod="openshift-marketplace/redhat-marketplace-shvrh" Dec 11 08:40:58 crc kubenswrapper[4881]: I1211 08:40:58.466658 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/755c1a62-7945-4edf-9e40-90d8abfea7bd-catalog-content\") pod \"redhat-marketplace-shvrh\" (UID: \"755c1a62-7945-4edf-9e40-90d8abfea7bd\") " pod="openshift-marketplace/redhat-marketplace-shvrh" Dec 11 08:40:58 crc kubenswrapper[4881]: I1211 08:40:58.573076 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/755c1a62-7945-4edf-9e40-90d8abfea7bd-catalog-content\") pod \"redhat-marketplace-shvrh\" (UID: \"755c1a62-7945-4edf-9e40-90d8abfea7bd\") " pod="openshift-marketplace/redhat-marketplace-shvrh" Dec 11 08:40:58 crc kubenswrapper[4881]: I1211 08:40:58.573441 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4vt5s\" (UniqueName: \"kubernetes.io/projected/755c1a62-7945-4edf-9e40-90d8abfea7bd-kube-api-access-4vt5s\") pod \"redhat-marketplace-shvrh\" (UID: \"755c1a62-7945-4edf-9e40-90d8abfea7bd\") " pod="openshift-marketplace/redhat-marketplace-shvrh" Dec 11 08:40:58 crc kubenswrapper[4881]: I1211 08:40:58.573656 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/755c1a62-7945-4edf-9e40-90d8abfea7bd-utilities\") pod \"redhat-marketplace-shvrh\" (UID: \"755c1a62-7945-4edf-9e40-90d8abfea7bd\") " pod="openshift-marketplace/redhat-marketplace-shvrh" Dec 11 08:40:59 crc kubenswrapper[4881]: I1211 08:40:59.934477 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/755c1a62-7945-4edf-9e40-90d8abfea7bd-catalog-content\") pod \"redhat-marketplace-shvrh\" (UID: \"755c1a62-7945-4edf-9e40-90d8abfea7bd\") " pod="openshift-marketplace/redhat-marketplace-shvrh" Dec 11 08:40:59 crc kubenswrapper[4881]: I1211 08:40:59.937539 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/755c1a62-7945-4edf-9e40-90d8abfea7bd-utilities\") pod \"redhat-marketplace-shvrh\" (UID: \"755c1a62-7945-4edf-9e40-90d8abfea7bd\") " pod="openshift-marketplace/redhat-marketplace-shvrh" Dec 11 08:40:59 crc kubenswrapper[4881]: I1211 08:40:59.951459 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4vt5s\" (UniqueName: \"kubernetes.io/projected/755c1a62-7945-4edf-9e40-90d8abfea7bd-kube-api-access-4vt5s\") pod \"redhat-marketplace-shvrh\" (UID: \"755c1a62-7945-4edf-9e40-90d8abfea7bd\") " pod="openshift-marketplace/redhat-marketplace-shvrh" Dec 11 08:41:00 crc kubenswrapper[4881]: I1211 08:41:00.235257 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-shvrh" Dec 11 08:41:01 crc kubenswrapper[4881]: I1211 08:41:01.798179 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Dec 11 08:41:01 crc kubenswrapper[4881]: I1211 08:41:01.798405 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/kube-state-metrics-0" podUID="558f097f-277d-4824-bafc-28c4c0f139f3" containerName="kube-state-metrics" containerID="cri-o://6f5ccd333037555a031831817d11ecc7d4a25ca7cf05d3819deda856ae8cdce8" gracePeriod=30 Dec 11 08:41:01 crc kubenswrapper[4881]: I1211 08:41:01.935927 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mysqld-exporter-0"] Dec 11 08:41:01 crc kubenswrapper[4881]: I1211 08:41:01.936435 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/mysqld-exporter-0" podUID="3dca57af-8220-449b-a5fc-8001bcc8c180" containerName="mysqld-exporter" containerID="cri-o://040473c11cc342fb095cdc5e6d4af3dd423c70da54ff938477dab4b9b33715a3" gracePeriod=30 Dec 11 08:41:02 crc kubenswrapper[4881]: E1211 08:41:02.559963 4881 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod324526c9_d17c_4dfd_b2f7_b86e9577c36c.slice/crio-c90ce7cd8fcf126d46d59245f06190daa1830091da52fd5b1af057889d7e2442.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod558f097f_277d_4824_bafc_28c4c0f139f3.slice/crio-conmon-6f5ccd333037555a031831817d11ecc7d4a25ca7cf05d3819deda856ae8cdce8.scope\": RecentStats: unable to find data in memory cache]" Dec 11 08:41:02 crc kubenswrapper[4881]: I1211 08:41:02.607999 4881 generic.go:334] "Generic (PLEG): container finished" podID="3dca57af-8220-449b-a5fc-8001bcc8c180" containerID="040473c11cc342fb095cdc5e6d4af3dd423c70da54ff938477dab4b9b33715a3" exitCode=2 Dec 11 08:41:02 crc kubenswrapper[4881]: I1211 08:41:02.608130 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-0" event={"ID":"3dca57af-8220-449b-a5fc-8001bcc8c180","Type":"ContainerDied","Data":"040473c11cc342fb095cdc5e6d4af3dd423c70da54ff938477dab4b9b33715a3"} Dec 11 08:41:02 crc kubenswrapper[4881]: I1211 08:41:02.611526 4881 generic.go:334] "Generic (PLEG): container finished" podID="558f097f-277d-4824-bafc-28c4c0f139f3" containerID="6f5ccd333037555a031831817d11ecc7d4a25ca7cf05d3819deda856ae8cdce8" exitCode=2 Dec 11 08:41:02 crc kubenswrapper[4881]: I1211 08:41:02.611556 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"558f097f-277d-4824-bafc-28c4c0f139f3","Type":"ContainerDied","Data":"6f5ccd333037555a031831817d11ecc7d4a25ca7cf05d3819deda856ae8cdce8"} Dec 11 08:41:03 crc kubenswrapper[4881]: I1211 08:41:03.947301 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Dec 11 08:41:03 crc kubenswrapper[4881]: I1211 08:41:03.947846 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="7c22dd13-8a15-412b-a351-851d91a763ba" containerName="ceilometer-central-agent" containerID="cri-o://16efdf8e4070fcffa5289512b3f3b3d42643ad9087c9ee2d8117b428fe2aa04f" gracePeriod=30 Dec 11 08:41:03 crc kubenswrapper[4881]: I1211 08:41:03.948349 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="7c22dd13-8a15-412b-a351-851d91a763ba" containerName="proxy-httpd" containerID="cri-o://526b94cabc1ab5f42375c717f3f78a7d65fcb4c9648f3d8d19d27530ff115210" gracePeriod=30 Dec 11 08:41:03 crc kubenswrapper[4881]: I1211 08:41:03.948406 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="7c22dd13-8a15-412b-a351-851d91a763ba" containerName="sg-core" containerID="cri-o://b5ff8cc8738894863ae31d50bce383dc0ccfab6019c4feec2a216b9dad415a4c" gracePeriod=30 Dec 11 08:41:03 crc kubenswrapper[4881]: I1211 08:41:03.948449 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="7c22dd13-8a15-412b-a351-851d91a763ba" containerName="ceilometer-notification-agent" containerID="cri-o://2a3a1064d42f444ae15ac25f5dbf69b384a98da7dd40d5787d27be2c49036beb" gracePeriod=30 Dec 11 08:41:04 crc kubenswrapper[4881]: I1211 08:41:04.357458 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell0-conductor-0" Dec 11 08:41:04 crc kubenswrapper[4881]: I1211 08:41:04.636456 4881 generic.go:334] "Generic (PLEG): container finished" podID="7c22dd13-8a15-412b-a351-851d91a763ba" containerID="526b94cabc1ab5f42375c717f3f78a7d65fcb4c9648f3d8d19d27530ff115210" exitCode=0 Dec 11 08:41:04 crc kubenswrapper[4881]: I1211 08:41:04.636947 4881 generic.go:334] "Generic (PLEG): container finished" podID="7c22dd13-8a15-412b-a351-851d91a763ba" containerID="b5ff8cc8738894863ae31d50bce383dc0ccfab6019c4feec2a216b9dad415a4c" exitCode=2 Dec 11 08:41:04 crc kubenswrapper[4881]: I1211 08:41:04.636524 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7c22dd13-8a15-412b-a351-851d91a763ba","Type":"ContainerDied","Data":"526b94cabc1ab5f42375c717f3f78a7d65fcb4c9648f3d8d19d27530ff115210"} Dec 11 08:41:04 crc kubenswrapper[4881]: I1211 08:41:04.637090 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7c22dd13-8a15-412b-a351-851d91a763ba","Type":"ContainerDied","Data":"b5ff8cc8738894863ae31d50bce383dc0ccfab6019c4feec2a216b9dad415a4c"} Dec 11 08:41:04 crc kubenswrapper[4881]: I1211 08:41:04.985012 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-cell-mapping-4h7jd"] Dec 11 08:41:04 crc kubenswrapper[4881]: I1211 08:41:04.986964 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-4h7jd" Dec 11 08:41:04 crc kubenswrapper[4881]: I1211 08:41:04.988905 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-manage-config-data" Dec 11 08:41:04 crc kubenswrapper[4881]: I1211 08:41:04.989751 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-manage-scripts" Dec 11 08:41:05 crc kubenswrapper[4881]: I1211 08:41:05.048191 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-cell-mapping-4h7jd"] Dec 11 08:41:05 crc kubenswrapper[4881]: I1211 08:41:05.059141 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b6d5fc48-a707-4ab1-a8f9-392295486185-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-4h7jd\" (UID: \"b6d5fc48-a707-4ab1-a8f9-392295486185\") " pod="openstack/nova-cell0-cell-mapping-4h7jd" Dec 11 08:41:05 crc kubenswrapper[4881]: I1211 08:41:05.059227 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b6d5fc48-a707-4ab1-a8f9-392295486185-scripts\") pod \"nova-cell0-cell-mapping-4h7jd\" (UID: \"b6d5fc48-a707-4ab1-a8f9-392295486185\") " pod="openstack/nova-cell0-cell-mapping-4h7jd" Dec 11 08:41:05 crc kubenswrapper[4881]: I1211 08:41:05.059270 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bfq26\" (UniqueName: \"kubernetes.io/projected/b6d5fc48-a707-4ab1-a8f9-392295486185-kube-api-access-bfq26\") pod \"nova-cell0-cell-mapping-4h7jd\" (UID: \"b6d5fc48-a707-4ab1-a8f9-392295486185\") " pod="openstack/nova-cell0-cell-mapping-4h7jd" Dec 11 08:41:05 crc kubenswrapper[4881]: I1211 08:41:05.059406 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b6d5fc48-a707-4ab1-a8f9-392295486185-config-data\") pod \"nova-cell0-cell-mapping-4h7jd\" (UID: \"b6d5fc48-a707-4ab1-a8f9-392295486185\") " pod="openstack/nova-cell0-cell-mapping-4h7jd" Dec 11 08:41:05 crc kubenswrapper[4881]: I1211 08:41:05.161257 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b6d5fc48-a707-4ab1-a8f9-392295486185-scripts\") pod \"nova-cell0-cell-mapping-4h7jd\" (UID: \"b6d5fc48-a707-4ab1-a8f9-392295486185\") " pod="openstack/nova-cell0-cell-mapping-4h7jd" Dec 11 08:41:05 crc kubenswrapper[4881]: I1211 08:41:05.161317 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bfq26\" (UniqueName: \"kubernetes.io/projected/b6d5fc48-a707-4ab1-a8f9-392295486185-kube-api-access-bfq26\") pod \"nova-cell0-cell-mapping-4h7jd\" (UID: \"b6d5fc48-a707-4ab1-a8f9-392295486185\") " pod="openstack/nova-cell0-cell-mapping-4h7jd" Dec 11 08:41:05 crc kubenswrapper[4881]: I1211 08:41:05.161435 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b6d5fc48-a707-4ab1-a8f9-392295486185-config-data\") pod \"nova-cell0-cell-mapping-4h7jd\" (UID: \"b6d5fc48-a707-4ab1-a8f9-392295486185\") " pod="openstack/nova-cell0-cell-mapping-4h7jd" Dec 11 08:41:05 crc kubenswrapper[4881]: I1211 08:41:05.161632 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b6d5fc48-a707-4ab1-a8f9-392295486185-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-4h7jd\" (UID: \"b6d5fc48-a707-4ab1-a8f9-392295486185\") " pod="openstack/nova-cell0-cell-mapping-4h7jd" Dec 11 08:41:05 crc kubenswrapper[4881]: I1211 08:41:05.167220 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b6d5fc48-a707-4ab1-a8f9-392295486185-scripts\") pod \"nova-cell0-cell-mapping-4h7jd\" (UID: \"b6d5fc48-a707-4ab1-a8f9-392295486185\") " pod="openstack/nova-cell0-cell-mapping-4h7jd" Dec 11 08:41:05 crc kubenswrapper[4881]: I1211 08:41:05.169767 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b6d5fc48-a707-4ab1-a8f9-392295486185-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-4h7jd\" (UID: \"b6d5fc48-a707-4ab1-a8f9-392295486185\") " pod="openstack/nova-cell0-cell-mapping-4h7jd" Dec 11 08:41:05 crc kubenswrapper[4881]: I1211 08:41:05.196167 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b6d5fc48-a707-4ab1-a8f9-392295486185-config-data\") pod \"nova-cell0-cell-mapping-4h7jd\" (UID: \"b6d5fc48-a707-4ab1-a8f9-392295486185\") " pod="openstack/nova-cell0-cell-mapping-4h7jd" Dec 11 08:41:05 crc kubenswrapper[4881]: I1211 08:41:05.208923 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Dec 11 08:41:05 crc kubenswrapper[4881]: I1211 08:41:05.216911 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Dec 11 08:41:05 crc kubenswrapper[4881]: I1211 08:41:05.225645 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Dec 11 08:41:05 crc kubenswrapper[4881]: I1211 08:41:05.231949 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Dec 11 08:41:05 crc kubenswrapper[4881]: I1211 08:41:05.243146 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Dec 11 08:41:05 crc kubenswrapper[4881]: I1211 08:41:05.247846 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Dec 11 08:41:05 crc kubenswrapper[4881]: I1211 08:41:05.269279 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bfq26\" (UniqueName: \"kubernetes.io/projected/b6d5fc48-a707-4ab1-a8f9-392295486185-kube-api-access-bfq26\") pod \"nova-cell0-cell-mapping-4h7jd\" (UID: \"b6d5fc48-a707-4ab1-a8f9-392295486185\") " pod="openstack/nova-cell0-cell-mapping-4h7jd" Dec 11 08:41:05 crc kubenswrapper[4881]: I1211 08:41:05.271182 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/494c324b-b9ff-46d5-aafd-bcf1d3f72dea-logs\") pod \"nova-metadata-0\" (UID: \"494c324b-b9ff-46d5-aafd-bcf1d3f72dea\") " pod="openstack/nova-metadata-0" Dec 11 08:41:05 crc kubenswrapper[4881]: I1211 08:41:05.271578 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/10d7837c-f2c1-475b-a8b2-0885aee68f82-config-data\") pod \"nova-api-0\" (UID: \"10d7837c-f2c1-475b-a8b2-0885aee68f82\") " pod="openstack/nova-api-0" Dec 11 08:41:05 crc kubenswrapper[4881]: I1211 08:41:05.271753 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/10d7837c-f2c1-475b-a8b2-0885aee68f82-logs\") pod \"nova-api-0\" (UID: \"10d7837c-f2c1-475b-a8b2-0885aee68f82\") " pod="openstack/nova-api-0" Dec 11 08:41:05 crc kubenswrapper[4881]: I1211 08:41:05.271962 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zgrtx\" (UniqueName: \"kubernetes.io/projected/494c324b-b9ff-46d5-aafd-bcf1d3f72dea-kube-api-access-zgrtx\") pod \"nova-metadata-0\" (UID: \"494c324b-b9ff-46d5-aafd-bcf1d3f72dea\") " pod="openstack/nova-metadata-0" Dec 11 08:41:05 crc kubenswrapper[4881]: I1211 08:41:05.272063 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/10d7837c-f2c1-475b-a8b2-0885aee68f82-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"10d7837c-f2c1-475b-a8b2-0885aee68f82\") " pod="openstack/nova-api-0" Dec 11 08:41:05 crc kubenswrapper[4881]: I1211 08:41:05.272225 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/494c324b-b9ff-46d5-aafd-bcf1d3f72dea-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"494c324b-b9ff-46d5-aafd-bcf1d3f72dea\") " pod="openstack/nova-metadata-0" Dec 11 08:41:05 crc kubenswrapper[4881]: I1211 08:41:05.272306 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/494c324b-b9ff-46d5-aafd-bcf1d3f72dea-config-data\") pod \"nova-metadata-0\" (UID: \"494c324b-b9ff-46d5-aafd-bcf1d3f72dea\") " pod="openstack/nova-metadata-0" Dec 11 08:41:05 crc kubenswrapper[4881]: I1211 08:41:05.272413 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rn94k\" (UniqueName: \"kubernetes.io/projected/10d7837c-f2c1-475b-a8b2-0885aee68f82-kube-api-access-rn94k\") pod \"nova-api-0\" (UID: \"10d7837c-f2c1-475b-a8b2-0885aee68f82\") " pod="openstack/nova-api-0" Dec 11 08:41:05 crc kubenswrapper[4881]: I1211 08:41:05.305527 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Dec 11 08:41:05 crc kubenswrapper[4881]: I1211 08:41:05.327749 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-4h7jd" Dec 11 08:41:05 crc kubenswrapper[4881]: I1211 08:41:05.329593 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Dec 11 08:41:05 crc kubenswrapper[4881]: I1211 08:41:05.374298 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rn94k\" (UniqueName: \"kubernetes.io/projected/10d7837c-f2c1-475b-a8b2-0885aee68f82-kube-api-access-rn94k\") pod \"nova-api-0\" (UID: \"10d7837c-f2c1-475b-a8b2-0885aee68f82\") " pod="openstack/nova-api-0" Dec 11 08:41:05 crc kubenswrapper[4881]: I1211 08:41:05.374384 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/494c324b-b9ff-46d5-aafd-bcf1d3f72dea-logs\") pod \"nova-metadata-0\" (UID: \"494c324b-b9ff-46d5-aafd-bcf1d3f72dea\") " pod="openstack/nova-metadata-0" Dec 11 08:41:05 crc kubenswrapper[4881]: I1211 08:41:05.374409 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/10d7837c-f2c1-475b-a8b2-0885aee68f82-config-data\") pod \"nova-api-0\" (UID: \"10d7837c-f2c1-475b-a8b2-0885aee68f82\") " pod="openstack/nova-api-0" Dec 11 08:41:05 crc kubenswrapper[4881]: I1211 08:41:05.374432 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/10d7837c-f2c1-475b-a8b2-0885aee68f82-logs\") pod \"nova-api-0\" (UID: \"10d7837c-f2c1-475b-a8b2-0885aee68f82\") " pod="openstack/nova-api-0" Dec 11 08:41:05 crc kubenswrapper[4881]: I1211 08:41:05.374538 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zgrtx\" (UniqueName: \"kubernetes.io/projected/494c324b-b9ff-46d5-aafd-bcf1d3f72dea-kube-api-access-zgrtx\") pod \"nova-metadata-0\" (UID: \"494c324b-b9ff-46d5-aafd-bcf1d3f72dea\") " pod="openstack/nova-metadata-0" Dec 11 08:41:05 crc kubenswrapper[4881]: I1211 08:41:05.374573 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/10d7837c-f2c1-475b-a8b2-0885aee68f82-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"10d7837c-f2c1-475b-a8b2-0885aee68f82\") " pod="openstack/nova-api-0" Dec 11 08:41:05 crc kubenswrapper[4881]: I1211 08:41:05.374663 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/494c324b-b9ff-46d5-aafd-bcf1d3f72dea-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"494c324b-b9ff-46d5-aafd-bcf1d3f72dea\") " pod="openstack/nova-metadata-0" Dec 11 08:41:05 crc kubenswrapper[4881]: I1211 08:41:05.374692 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/494c324b-b9ff-46d5-aafd-bcf1d3f72dea-config-data\") pod \"nova-metadata-0\" (UID: \"494c324b-b9ff-46d5-aafd-bcf1d3f72dea\") " pod="openstack/nova-metadata-0" Dec 11 08:41:05 crc kubenswrapper[4881]: I1211 08:41:05.378766 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Dec 11 08:41:05 crc kubenswrapper[4881]: I1211 08:41:05.383960 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Dec 11 08:41:05 crc kubenswrapper[4881]: I1211 08:41:05.391138 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/494c324b-b9ff-46d5-aafd-bcf1d3f72dea-logs\") pod \"nova-metadata-0\" (UID: \"494c324b-b9ff-46d5-aafd-bcf1d3f72dea\") " pod="openstack/nova-metadata-0" Dec 11 08:41:05 crc kubenswrapper[4881]: I1211 08:41:05.398404 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/494c324b-b9ff-46d5-aafd-bcf1d3f72dea-config-data\") pod \"nova-metadata-0\" (UID: \"494c324b-b9ff-46d5-aafd-bcf1d3f72dea\") " pod="openstack/nova-metadata-0" Dec 11 08:41:05 crc kubenswrapper[4881]: I1211 08:41:05.401947 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/10d7837c-f2c1-475b-a8b2-0885aee68f82-logs\") pod \"nova-api-0\" (UID: \"10d7837c-f2c1-475b-a8b2-0885aee68f82\") " pod="openstack/nova-api-0" Dec 11 08:41:05 crc kubenswrapper[4881]: I1211 08:41:05.406722 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Dec 11 08:41:05 crc kubenswrapper[4881]: I1211 08:41:05.407010 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/494c324b-b9ff-46d5-aafd-bcf1d3f72dea-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"494c324b-b9ff-46d5-aafd-bcf1d3f72dea\") " pod="openstack/nova-metadata-0" Dec 11 08:41:05 crc kubenswrapper[4881]: I1211 08:41:05.411930 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/10d7837c-f2c1-475b-a8b2-0885aee68f82-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"10d7837c-f2c1-475b-a8b2-0885aee68f82\") " pod="openstack/nova-api-0" Dec 11 08:41:05 crc kubenswrapper[4881]: I1211 08:41:05.431030 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/10d7837c-f2c1-475b-a8b2-0885aee68f82-config-data\") pod \"nova-api-0\" (UID: \"10d7837c-f2c1-475b-a8b2-0885aee68f82\") " pod="openstack/nova-api-0" Dec 11 08:41:05 crc kubenswrapper[4881]: I1211 08:41:05.435670 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rn94k\" (UniqueName: \"kubernetes.io/projected/10d7837c-f2c1-475b-a8b2-0885aee68f82-kube-api-access-rn94k\") pod \"nova-api-0\" (UID: \"10d7837c-f2c1-475b-a8b2-0885aee68f82\") " pod="openstack/nova-api-0" Dec 11 08:41:05 crc kubenswrapper[4881]: I1211 08:41:05.436986 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zgrtx\" (UniqueName: \"kubernetes.io/projected/494c324b-b9ff-46d5-aafd-bcf1d3f72dea-kube-api-access-zgrtx\") pod \"nova-metadata-0\" (UID: \"494c324b-b9ff-46d5-aafd-bcf1d3f72dea\") " pod="openstack/nova-metadata-0" Dec 11 08:41:05 crc kubenswrapper[4881]: I1211 08:41:05.463601 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Dec 11 08:41:05 crc kubenswrapper[4881]: I1211 08:41:05.482106 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-59s7m\" (UniqueName: \"kubernetes.io/projected/e688f7d9-d215-48aa-8092-e176d3437f09-kube-api-access-59s7m\") pod \"nova-scheduler-0\" (UID: \"e688f7d9-d215-48aa-8092-e176d3437f09\") " pod="openstack/nova-scheduler-0" Dec 11 08:41:05 crc kubenswrapper[4881]: I1211 08:41:05.482544 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e688f7d9-d215-48aa-8092-e176d3437f09-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"e688f7d9-d215-48aa-8092-e176d3437f09\") " pod="openstack/nova-scheduler-0" Dec 11 08:41:05 crc kubenswrapper[4881]: I1211 08:41:05.482700 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e688f7d9-d215-48aa-8092-e176d3437f09-config-data\") pod \"nova-scheduler-0\" (UID: \"e688f7d9-d215-48aa-8092-e176d3437f09\") " pod="openstack/nova-scheduler-0" Dec 11 08:41:05 crc kubenswrapper[4881]: I1211 08:41:05.504304 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Dec 11 08:41:05 crc kubenswrapper[4881]: I1211 08:41:05.553826 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5fbc4d444f-5xbg9"] Dec 11 08:41:05 crc kubenswrapper[4881]: I1211 08:41:05.557634 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5fbc4d444f-5xbg9" Dec 11 08:41:05 crc kubenswrapper[4881]: I1211 08:41:05.583031 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5fbc4d444f-5xbg9"] Dec 11 08:41:05 crc kubenswrapper[4881]: I1211 08:41:05.588672 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8rmhf\" (UniqueName: \"kubernetes.io/projected/8a89d31f-1156-4509-85be-1ac98304de6c-kube-api-access-8rmhf\") pod \"dnsmasq-dns-5fbc4d444f-5xbg9\" (UID: \"8a89d31f-1156-4509-85be-1ac98304de6c\") " pod="openstack/dnsmasq-dns-5fbc4d444f-5xbg9" Dec 11 08:41:05 crc kubenswrapper[4881]: I1211 08:41:05.590161 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8a89d31f-1156-4509-85be-1ac98304de6c-dns-svc\") pod \"dnsmasq-dns-5fbc4d444f-5xbg9\" (UID: \"8a89d31f-1156-4509-85be-1ac98304de6c\") " pod="openstack/dnsmasq-dns-5fbc4d444f-5xbg9" Dec 11 08:41:05 crc kubenswrapper[4881]: I1211 08:41:05.590202 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-59s7m\" (UniqueName: \"kubernetes.io/projected/e688f7d9-d215-48aa-8092-e176d3437f09-kube-api-access-59s7m\") pod \"nova-scheduler-0\" (UID: \"e688f7d9-d215-48aa-8092-e176d3437f09\") " pod="openstack/nova-scheduler-0" Dec 11 08:41:05 crc kubenswrapper[4881]: I1211 08:41:05.590812 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e688f7d9-d215-48aa-8092-e176d3437f09-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"e688f7d9-d215-48aa-8092-e176d3437f09\") " pod="openstack/nova-scheduler-0" Dec 11 08:41:05 crc kubenswrapper[4881]: I1211 08:41:05.590878 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/8a89d31f-1156-4509-85be-1ac98304de6c-ovsdbserver-sb\") pod \"dnsmasq-dns-5fbc4d444f-5xbg9\" (UID: \"8a89d31f-1156-4509-85be-1ac98304de6c\") " pod="openstack/dnsmasq-dns-5fbc4d444f-5xbg9" Dec 11 08:41:05 crc kubenswrapper[4881]: I1211 08:41:05.590915 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/8a89d31f-1156-4509-85be-1ac98304de6c-ovsdbserver-nb\") pod \"dnsmasq-dns-5fbc4d444f-5xbg9\" (UID: \"8a89d31f-1156-4509-85be-1ac98304de6c\") " pod="openstack/dnsmasq-dns-5fbc4d444f-5xbg9" Dec 11 08:41:05 crc kubenswrapper[4881]: I1211 08:41:05.590972 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e688f7d9-d215-48aa-8092-e176d3437f09-config-data\") pod \"nova-scheduler-0\" (UID: \"e688f7d9-d215-48aa-8092-e176d3437f09\") " pod="openstack/nova-scheduler-0" Dec 11 08:41:05 crc kubenswrapper[4881]: I1211 08:41:05.591013 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/8a89d31f-1156-4509-85be-1ac98304de6c-dns-swift-storage-0\") pod \"dnsmasq-dns-5fbc4d444f-5xbg9\" (UID: \"8a89d31f-1156-4509-85be-1ac98304de6c\") " pod="openstack/dnsmasq-dns-5fbc4d444f-5xbg9" Dec 11 08:41:05 crc kubenswrapper[4881]: I1211 08:41:05.591035 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8a89d31f-1156-4509-85be-1ac98304de6c-config\") pod \"dnsmasq-dns-5fbc4d444f-5xbg9\" (UID: \"8a89d31f-1156-4509-85be-1ac98304de6c\") " pod="openstack/dnsmasq-dns-5fbc4d444f-5xbg9" Dec 11 08:41:05 crc kubenswrapper[4881]: I1211 08:41:05.599078 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e688f7d9-d215-48aa-8092-e176d3437f09-config-data\") pod \"nova-scheduler-0\" (UID: \"e688f7d9-d215-48aa-8092-e176d3437f09\") " pod="openstack/nova-scheduler-0" Dec 11 08:41:05 crc kubenswrapper[4881]: I1211 08:41:05.601667 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e688f7d9-d215-48aa-8092-e176d3437f09-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"e688f7d9-d215-48aa-8092-e176d3437f09\") " pod="openstack/nova-scheduler-0" Dec 11 08:41:05 crc kubenswrapper[4881]: I1211 08:41:05.615736 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-59s7m\" (UniqueName: \"kubernetes.io/projected/e688f7d9-d215-48aa-8092-e176d3437f09-kube-api-access-59s7m\") pod \"nova-scheduler-0\" (UID: \"e688f7d9-d215-48aa-8092-e176d3437f09\") " pod="openstack/nova-scheduler-0" Dec 11 08:41:05 crc kubenswrapper[4881]: I1211 08:41:05.617323 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Dec 11 08:41:05 crc kubenswrapper[4881]: I1211 08:41:05.619596 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Dec 11 08:41:05 crc kubenswrapper[4881]: I1211 08:41:05.624790 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-novncproxy-config-data" Dec 11 08:41:05 crc kubenswrapper[4881]: I1211 08:41:05.635837 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Dec 11 08:41:05 crc kubenswrapper[4881]: I1211 08:41:05.659322 4881 generic.go:334] "Generic (PLEG): container finished" podID="7c22dd13-8a15-412b-a351-851d91a763ba" containerID="16efdf8e4070fcffa5289512b3f3b3d42643ad9087c9ee2d8117b428fe2aa04f" exitCode=0 Dec 11 08:41:05 crc kubenswrapper[4881]: I1211 08:41:05.659382 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7c22dd13-8a15-412b-a351-851d91a763ba","Type":"ContainerDied","Data":"16efdf8e4070fcffa5289512b3f3b3d42643ad9087c9ee2d8117b428fe2aa04f"} Dec 11 08:41:05 crc kubenswrapper[4881]: I1211 08:41:05.705960 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/8a89d31f-1156-4509-85be-1ac98304de6c-dns-swift-storage-0\") pod \"dnsmasq-dns-5fbc4d444f-5xbg9\" (UID: \"8a89d31f-1156-4509-85be-1ac98304de6c\") " pod="openstack/dnsmasq-dns-5fbc4d444f-5xbg9" Dec 11 08:41:05 crc kubenswrapper[4881]: I1211 08:41:05.706035 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8a89d31f-1156-4509-85be-1ac98304de6c-config\") pod \"dnsmasq-dns-5fbc4d444f-5xbg9\" (UID: \"8a89d31f-1156-4509-85be-1ac98304de6c\") " pod="openstack/dnsmasq-dns-5fbc4d444f-5xbg9" Dec 11 08:41:05 crc kubenswrapper[4881]: I1211 08:41:05.706225 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/532f11a0-2d04-48b6-87a8-b27e99195ac9-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"532f11a0-2d04-48b6-87a8-b27e99195ac9\") " pod="openstack/nova-cell1-novncproxy-0" Dec 11 08:41:05 crc kubenswrapper[4881]: I1211 08:41:05.706265 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8rmhf\" (UniqueName: \"kubernetes.io/projected/8a89d31f-1156-4509-85be-1ac98304de6c-kube-api-access-8rmhf\") pod \"dnsmasq-dns-5fbc4d444f-5xbg9\" (UID: \"8a89d31f-1156-4509-85be-1ac98304de6c\") " pod="openstack/dnsmasq-dns-5fbc4d444f-5xbg9" Dec 11 08:41:05 crc kubenswrapper[4881]: I1211 08:41:05.706975 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/8a89d31f-1156-4509-85be-1ac98304de6c-dns-swift-storage-0\") pod \"dnsmasq-dns-5fbc4d444f-5xbg9\" (UID: \"8a89d31f-1156-4509-85be-1ac98304de6c\") " pod="openstack/dnsmasq-dns-5fbc4d444f-5xbg9" Dec 11 08:41:05 crc kubenswrapper[4881]: I1211 08:41:05.708523 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8a89d31f-1156-4509-85be-1ac98304de6c-config\") pod \"dnsmasq-dns-5fbc4d444f-5xbg9\" (UID: \"8a89d31f-1156-4509-85be-1ac98304de6c\") " pod="openstack/dnsmasq-dns-5fbc4d444f-5xbg9" Dec 11 08:41:05 crc kubenswrapper[4881]: I1211 08:41:05.713821 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8a89d31f-1156-4509-85be-1ac98304de6c-dns-svc\") pod \"dnsmasq-dns-5fbc4d444f-5xbg9\" (UID: \"8a89d31f-1156-4509-85be-1ac98304de6c\") " pod="openstack/dnsmasq-dns-5fbc4d444f-5xbg9" Dec 11 08:41:05 crc kubenswrapper[4881]: I1211 08:41:05.714027 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-824fx\" (UniqueName: \"kubernetes.io/projected/532f11a0-2d04-48b6-87a8-b27e99195ac9-kube-api-access-824fx\") pod \"nova-cell1-novncproxy-0\" (UID: \"532f11a0-2d04-48b6-87a8-b27e99195ac9\") " pod="openstack/nova-cell1-novncproxy-0" Dec 11 08:41:05 crc kubenswrapper[4881]: I1211 08:41:05.714162 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/8a89d31f-1156-4509-85be-1ac98304de6c-ovsdbserver-sb\") pod \"dnsmasq-dns-5fbc4d444f-5xbg9\" (UID: \"8a89d31f-1156-4509-85be-1ac98304de6c\") " pod="openstack/dnsmasq-dns-5fbc4d444f-5xbg9" Dec 11 08:41:05 crc kubenswrapper[4881]: I1211 08:41:05.714192 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/532f11a0-2d04-48b6-87a8-b27e99195ac9-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"532f11a0-2d04-48b6-87a8-b27e99195ac9\") " pod="openstack/nova-cell1-novncproxy-0" Dec 11 08:41:05 crc kubenswrapper[4881]: I1211 08:41:05.714228 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/8a89d31f-1156-4509-85be-1ac98304de6c-ovsdbserver-nb\") pod \"dnsmasq-dns-5fbc4d444f-5xbg9\" (UID: \"8a89d31f-1156-4509-85be-1ac98304de6c\") " pod="openstack/dnsmasq-dns-5fbc4d444f-5xbg9" Dec 11 08:41:05 crc kubenswrapper[4881]: I1211 08:41:05.720053 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/8a89d31f-1156-4509-85be-1ac98304de6c-ovsdbserver-nb\") pod \"dnsmasq-dns-5fbc4d444f-5xbg9\" (UID: \"8a89d31f-1156-4509-85be-1ac98304de6c\") " pod="openstack/dnsmasq-dns-5fbc4d444f-5xbg9" Dec 11 08:41:05 crc kubenswrapper[4881]: I1211 08:41:05.720494 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/8a89d31f-1156-4509-85be-1ac98304de6c-ovsdbserver-sb\") pod \"dnsmasq-dns-5fbc4d444f-5xbg9\" (UID: \"8a89d31f-1156-4509-85be-1ac98304de6c\") " pod="openstack/dnsmasq-dns-5fbc4d444f-5xbg9" Dec 11 08:41:05 crc kubenswrapper[4881]: I1211 08:41:05.720566 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8a89d31f-1156-4509-85be-1ac98304de6c-dns-svc\") pod \"dnsmasq-dns-5fbc4d444f-5xbg9\" (UID: \"8a89d31f-1156-4509-85be-1ac98304de6c\") " pod="openstack/dnsmasq-dns-5fbc4d444f-5xbg9" Dec 11 08:41:05 crc kubenswrapper[4881]: I1211 08:41:05.735934 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Dec 11 08:41:05 crc kubenswrapper[4881]: I1211 08:41:05.736022 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8rmhf\" (UniqueName: \"kubernetes.io/projected/8a89d31f-1156-4509-85be-1ac98304de6c-kube-api-access-8rmhf\") pod \"dnsmasq-dns-5fbc4d444f-5xbg9\" (UID: \"8a89d31f-1156-4509-85be-1ac98304de6c\") " pod="openstack/dnsmasq-dns-5fbc4d444f-5xbg9" Dec 11 08:41:05 crc kubenswrapper[4881]: I1211 08:41:05.785843 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Dec 11 08:41:05 crc kubenswrapper[4881]: I1211 08:41:05.822050 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-824fx\" (UniqueName: \"kubernetes.io/projected/532f11a0-2d04-48b6-87a8-b27e99195ac9-kube-api-access-824fx\") pod \"nova-cell1-novncproxy-0\" (UID: \"532f11a0-2d04-48b6-87a8-b27e99195ac9\") " pod="openstack/nova-cell1-novncproxy-0" Dec 11 08:41:05 crc kubenswrapper[4881]: I1211 08:41:05.822172 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/532f11a0-2d04-48b6-87a8-b27e99195ac9-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"532f11a0-2d04-48b6-87a8-b27e99195ac9\") " pod="openstack/nova-cell1-novncproxy-0" Dec 11 08:41:05 crc kubenswrapper[4881]: I1211 08:41:05.822382 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/532f11a0-2d04-48b6-87a8-b27e99195ac9-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"532f11a0-2d04-48b6-87a8-b27e99195ac9\") " pod="openstack/nova-cell1-novncproxy-0" Dec 11 08:41:05 crc kubenswrapper[4881]: I1211 08:41:05.829417 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/532f11a0-2d04-48b6-87a8-b27e99195ac9-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"532f11a0-2d04-48b6-87a8-b27e99195ac9\") " pod="openstack/nova-cell1-novncproxy-0" Dec 11 08:41:05 crc kubenswrapper[4881]: I1211 08:41:05.835260 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/532f11a0-2d04-48b6-87a8-b27e99195ac9-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"532f11a0-2d04-48b6-87a8-b27e99195ac9\") " pod="openstack/nova-cell1-novncproxy-0" Dec 11 08:41:05 crc kubenswrapper[4881]: I1211 08:41:05.852212 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-824fx\" (UniqueName: \"kubernetes.io/projected/532f11a0-2d04-48b6-87a8-b27e99195ac9-kube-api-access-824fx\") pod \"nova-cell1-novncproxy-0\" (UID: \"532f11a0-2d04-48b6-87a8-b27e99195ac9\") " pod="openstack/nova-cell1-novncproxy-0" Dec 11 08:41:05 crc kubenswrapper[4881]: I1211 08:41:05.879807 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5fbc4d444f-5xbg9" Dec 11 08:41:05 crc kubenswrapper[4881]: I1211 08:41:05.950423 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Dec 11 08:41:06 crc kubenswrapper[4881]: E1211 08:41:06.291840 4881 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod324526c9_d17c_4dfd_b2f7_b86e9577c36c.slice/crio-c90ce7cd8fcf126d46d59245f06190daa1830091da52fd5b1af057889d7e2442.scope\": RecentStats: unable to find data in memory cache]" Dec 11 08:41:06 crc kubenswrapper[4881]: I1211 08:41:06.431808 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-db-sync-dgdhn"] Dec 11 08:41:06 crc kubenswrapper[4881]: I1211 08:41:06.433542 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-dgdhn" Dec 11 08:41:06 crc kubenswrapper[4881]: I1211 08:41:06.438515 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Dec 11 08:41:06 crc kubenswrapper[4881]: I1211 08:41:06.439451 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-scripts" Dec 11 08:41:06 crc kubenswrapper[4881]: I1211 08:41:06.450435 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-dgdhn"] Dec 11 08:41:06 crc kubenswrapper[4881]: I1211 08:41:06.538351 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4ce4c4b4-5f9a-4e64-af32-6c3d805a01fc-config-data\") pod \"nova-cell1-conductor-db-sync-dgdhn\" (UID: \"4ce4c4b4-5f9a-4e64-af32-6c3d805a01fc\") " pod="openstack/nova-cell1-conductor-db-sync-dgdhn" Dec 11 08:41:06 crc kubenswrapper[4881]: I1211 08:41:06.538431 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4ce4c4b4-5f9a-4e64-af32-6c3d805a01fc-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-dgdhn\" (UID: \"4ce4c4b4-5f9a-4e64-af32-6c3d805a01fc\") " pod="openstack/nova-cell1-conductor-db-sync-dgdhn" Dec 11 08:41:06 crc kubenswrapper[4881]: I1211 08:41:06.538923 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4ce4c4b4-5f9a-4e64-af32-6c3d805a01fc-scripts\") pod \"nova-cell1-conductor-db-sync-dgdhn\" (UID: \"4ce4c4b4-5f9a-4e64-af32-6c3d805a01fc\") " pod="openstack/nova-cell1-conductor-db-sync-dgdhn" Dec 11 08:41:06 crc kubenswrapper[4881]: I1211 08:41:06.538993 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-95ht9\" (UniqueName: \"kubernetes.io/projected/4ce4c4b4-5f9a-4e64-af32-6c3d805a01fc-kube-api-access-95ht9\") pod \"nova-cell1-conductor-db-sync-dgdhn\" (UID: \"4ce4c4b4-5f9a-4e64-af32-6c3d805a01fc\") " pod="openstack/nova-cell1-conductor-db-sync-dgdhn" Dec 11 08:41:06 crc kubenswrapper[4881]: I1211 08:41:06.641467 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4ce4c4b4-5f9a-4e64-af32-6c3d805a01fc-scripts\") pod \"nova-cell1-conductor-db-sync-dgdhn\" (UID: \"4ce4c4b4-5f9a-4e64-af32-6c3d805a01fc\") " pod="openstack/nova-cell1-conductor-db-sync-dgdhn" Dec 11 08:41:06 crc kubenswrapper[4881]: I1211 08:41:06.641539 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-95ht9\" (UniqueName: \"kubernetes.io/projected/4ce4c4b4-5f9a-4e64-af32-6c3d805a01fc-kube-api-access-95ht9\") pod \"nova-cell1-conductor-db-sync-dgdhn\" (UID: \"4ce4c4b4-5f9a-4e64-af32-6c3d805a01fc\") " pod="openstack/nova-cell1-conductor-db-sync-dgdhn" Dec 11 08:41:06 crc kubenswrapper[4881]: I1211 08:41:06.641668 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4ce4c4b4-5f9a-4e64-af32-6c3d805a01fc-config-data\") pod \"nova-cell1-conductor-db-sync-dgdhn\" (UID: \"4ce4c4b4-5f9a-4e64-af32-6c3d805a01fc\") " pod="openstack/nova-cell1-conductor-db-sync-dgdhn" Dec 11 08:41:06 crc kubenswrapper[4881]: I1211 08:41:06.641704 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4ce4c4b4-5f9a-4e64-af32-6c3d805a01fc-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-dgdhn\" (UID: \"4ce4c4b4-5f9a-4e64-af32-6c3d805a01fc\") " pod="openstack/nova-cell1-conductor-db-sync-dgdhn" Dec 11 08:41:06 crc kubenswrapper[4881]: I1211 08:41:06.648091 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4ce4c4b4-5f9a-4e64-af32-6c3d805a01fc-scripts\") pod \"nova-cell1-conductor-db-sync-dgdhn\" (UID: \"4ce4c4b4-5f9a-4e64-af32-6c3d805a01fc\") " pod="openstack/nova-cell1-conductor-db-sync-dgdhn" Dec 11 08:41:06 crc kubenswrapper[4881]: I1211 08:41:06.651620 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4ce4c4b4-5f9a-4e64-af32-6c3d805a01fc-config-data\") pod \"nova-cell1-conductor-db-sync-dgdhn\" (UID: \"4ce4c4b4-5f9a-4e64-af32-6c3d805a01fc\") " pod="openstack/nova-cell1-conductor-db-sync-dgdhn" Dec 11 08:41:06 crc kubenswrapper[4881]: I1211 08:41:06.652492 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4ce4c4b4-5f9a-4e64-af32-6c3d805a01fc-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-dgdhn\" (UID: \"4ce4c4b4-5f9a-4e64-af32-6c3d805a01fc\") " pod="openstack/nova-cell1-conductor-db-sync-dgdhn" Dec 11 08:41:06 crc kubenswrapper[4881]: I1211 08:41:06.664994 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-95ht9\" (UniqueName: \"kubernetes.io/projected/4ce4c4b4-5f9a-4e64-af32-6c3d805a01fc-kube-api-access-95ht9\") pod \"nova-cell1-conductor-db-sync-dgdhn\" (UID: \"4ce4c4b4-5f9a-4e64-af32-6c3d805a01fc\") " pod="openstack/nova-cell1-conductor-db-sync-dgdhn" Dec 11 08:41:06 crc kubenswrapper[4881]: I1211 08:41:06.764906 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-dgdhn" Dec 11 08:41:08 crc kubenswrapper[4881]: I1211 08:41:08.311643 4881 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/kube-state-metrics-0" podUID="558f097f-277d-4824-bafc-28c4c0f139f3" containerName="kube-state-metrics" probeResult="failure" output="Get \"http://10.217.0.134:8081/readyz\": dial tcp 10.217.0.134:8081: connect: connection refused" Dec 11 08:41:09 crc kubenswrapper[4881]: I1211 08:41:09.060647 4881 prober.go:107] "Probe failed" probeType="Readiness" pod="metallb-system/metallb-operator-controller-manager-5646b5c6f5-clxl4" podUID="fda9a059-2ee6-41ae-ad81-e4f694080990" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.93:8080/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Dec 11 08:41:09 crc kubenswrapper[4881]: I1211 08:41:09.730062 4881 generic.go:334] "Generic (PLEG): container finished" podID="324526c9-d17c-4dfd-b2f7-b86e9577c36c" containerID="a957d9ac23a8d9794d75049cfcd6cd927636c635e317f85cbdb77490a1f2eb9b" exitCode=0 Dec 11 08:41:09 crc kubenswrapper[4881]: I1211 08:41:09.730655 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-vk695" event={"ID":"324526c9-d17c-4dfd-b2f7-b86e9577c36c","Type":"ContainerDied","Data":"a957d9ac23a8d9794d75049cfcd6cd927636c635e317f85cbdb77490a1f2eb9b"} Dec 11 08:41:09 crc kubenswrapper[4881]: I1211 08:41:09.743234 4881 generic.go:334] "Generic (PLEG): container finished" podID="7c22dd13-8a15-412b-a351-851d91a763ba" containerID="2a3a1064d42f444ae15ac25f5dbf69b384a98da7dd40d5787d27be2c49036beb" exitCode=0 Dec 11 08:41:09 crc kubenswrapper[4881]: I1211 08:41:09.743277 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7c22dd13-8a15-412b-a351-851d91a763ba","Type":"ContainerDied","Data":"2a3a1064d42f444ae15ac25f5dbf69b384a98da7dd40d5787d27be2c49036beb"} Dec 11 08:41:09 crc kubenswrapper[4881]: E1211 08:41:09.832484 4881 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-aodh-api:current-podified" Dec 11 08:41:09 crc kubenswrapper[4881]: E1211 08:41:09.832686 4881 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:aodh-db-sync,Image:quay.io/podified-antelope-centos9/openstack-aodh-api:current-podified,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_set_configs && /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:AodhPassword,Value:,ValueFrom:&EnvVarSource{FieldRef:nil,ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:&SecretKeySelector{LocalObjectReference:LocalObjectReference{Name:osp-secret,},Key:AodhPassword,Optional:nil,},},},EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:aodh-dbsync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-6qgbh,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*0,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod aodh-db-sync-tt4br_openstack(67c4b27a-5352-4006-9944-6de8dc05d3d1): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 11 08:41:09 crc kubenswrapper[4881]: E1211 08:41:09.834029 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"aodh-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/aodh-db-sync-tt4br" podUID="67c4b27a-5352-4006-9944-6de8dc05d3d1" Dec 11 08:41:10 crc kubenswrapper[4881]: I1211 08:41:10.539378 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Dec 11 08:41:10 crc kubenswrapper[4881]: E1211 08:41:10.763510 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"aodh-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-aodh-api:current-podified\\\"\"" pod="openstack/aodh-db-sync-tt4br" podUID="67c4b27a-5352-4006-9944-6de8dc05d3d1" Dec 11 08:41:11 crc kubenswrapper[4881]: I1211 08:41:10.995497 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-dgdhn"] Dec 11 08:41:11 crc kubenswrapper[4881]: I1211 08:41:11.022559 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-cell-mapping-4h7jd"] Dec 11 08:41:11 crc kubenswrapper[4881]: I1211 08:41:11.032039 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-shvrh"] Dec 11 08:41:11 crc kubenswrapper[4881]: I1211 08:41:11.046091 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5fbc4d444f-5xbg9"] Dec 11 08:41:11 crc kubenswrapper[4881]: W1211 08:41:11.496749 4881 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode688f7d9_d215_48aa_8092_e176d3437f09.slice/crio-5212e12db9086955469cdaeb40997be0eaac6cab01484238de7fe94120e16dfa WatchSource:0}: Error finding container 5212e12db9086955469cdaeb40997be0eaac6cab01484238de7fe94120e16dfa: Status 404 returned error can't find the container with id 5212e12db9086955469cdaeb40997be0eaac6cab01484238de7fe94120e16dfa Dec 11 08:41:11 crc kubenswrapper[4881]: I1211 08:41:11.499048 4881 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Dec 11 08:41:11 crc kubenswrapper[4881]: I1211 08:41:11.582104 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Dec 11 08:41:11 crc kubenswrapper[4881]: W1211 08:41:11.670063 4881 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod494c324b_b9ff_46d5_aafd_bcf1d3f72dea.slice/crio-c60e1b0f97154c39c1ecb6d188c232ac10f12f11cc7b64b57981eb4f5b3a1c50 WatchSource:0}: Error finding container c60e1b0f97154c39c1ecb6d188c232ac10f12f11cc7b64b57981eb4f5b3a1c50: Status 404 returned error can't find the container with id c60e1b0f97154c39c1ecb6d188c232ac10f12f11cc7b64b57981eb4f5b3a1c50 Dec 11 08:41:11 crc kubenswrapper[4881]: I1211 08:41:11.734457 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Dec 11 08:41:11 crc kubenswrapper[4881]: I1211 08:41:11.779162 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"532f11a0-2d04-48b6-87a8-b27e99195ac9","Type":"ContainerStarted","Data":"6b27f9a88ba0e9df7605f0240965774967a307b37e86a1c9c2a9d43264bb5042"} Dec 11 08:41:11 crc kubenswrapper[4881]: I1211 08:41:11.781963 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-shvrh" event={"ID":"755c1a62-7945-4edf-9e40-90d8abfea7bd","Type":"ContainerStarted","Data":"c83751848ff1fceb6be62d50ed6cd9e829f50fa38fb0b8dae5eb158042446f37"} Dec 11 08:41:11 crc kubenswrapper[4881]: I1211 08:41:11.784437 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-0" event={"ID":"3dca57af-8220-449b-a5fc-8001bcc8c180","Type":"ContainerDied","Data":"4ccc156b3fed1a049ca01fe859b4b141a69afe251293b4611698658c47764f88"} Dec 11 08:41:11 crc kubenswrapper[4881]: I1211 08:41:11.784477 4881 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4ccc156b3fed1a049ca01fe859b4b141a69afe251293b4611698658c47764f88" Dec 11 08:41:11 crc kubenswrapper[4881]: I1211 08:41:11.786193 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"10d7837c-f2c1-475b-a8b2-0885aee68f82","Type":"ContainerStarted","Data":"98a6154b9bd4072738d3ec52504ef7ca1719f91f3f0271fbb63a2b8367cc1208"} Dec 11 08:41:11 crc kubenswrapper[4881]: I1211 08:41:11.788077 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"558f097f-277d-4824-bafc-28c4c0f139f3","Type":"ContainerDied","Data":"be56e0bea3e809c928a527716966f23b675b617e5c19e67a00151d075ae6f19b"} Dec 11 08:41:11 crc kubenswrapper[4881]: I1211 08:41:11.788102 4881 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="be56e0bea3e809c928a527716966f23b675b617e5c19e67a00151d075ae6f19b" Dec 11 08:41:11 crc kubenswrapper[4881]: I1211 08:41:11.788142 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Dec 11 08:41:11 crc kubenswrapper[4881]: I1211 08:41:11.789485 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"494c324b-b9ff-46d5-aafd-bcf1d3f72dea","Type":"ContainerStarted","Data":"c60e1b0f97154c39c1ecb6d188c232ac10f12f11cc7b64b57981eb4f5b3a1c50"} Dec 11 08:41:11 crc kubenswrapper[4881]: I1211 08:41:11.790551 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-dgdhn" event={"ID":"4ce4c4b4-5f9a-4e64-af32-6c3d805a01fc","Type":"ContainerStarted","Data":"f697e3148cabc05aacae526bee51af71f26a21fc511787e9d7e84e3da5b7998c"} Dec 11 08:41:11 crc kubenswrapper[4881]: I1211 08:41:11.793802 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"e688f7d9-d215-48aa-8092-e176d3437f09","Type":"ContainerStarted","Data":"5212e12db9086955469cdaeb40997be0eaac6cab01484238de7fe94120e16dfa"} Dec 11 08:41:11 crc kubenswrapper[4881]: I1211 08:41:11.799871 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-4h7jd" event={"ID":"b6d5fc48-a707-4ab1-a8f9-392295486185","Type":"ContainerStarted","Data":"d552b0fb88de7338f5190d2e8fe118117332612147a31819ae23c464e70d6035"} Dec 11 08:41:11 crc kubenswrapper[4881]: I1211 08:41:11.801113 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Dec 11 08:41:11 crc kubenswrapper[4881]: I1211 08:41:11.801455 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5fbc4d444f-5xbg9" event={"ID":"8a89d31f-1156-4509-85be-1ac98304de6c","Type":"ContainerStarted","Data":"424515f5587f34e6aebd1dde992be73912d373f6dfb46ea944f97d3199192c6a"} Dec 11 08:41:11 crc kubenswrapper[4881]: I1211 08:41:11.807979 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7c22dd13-8a15-412b-a351-851d91a763ba","Type":"ContainerDied","Data":"1e60923b8a101ca74eb8a0796f23df21a0eaf6ec68c345252198c77716e9cd23"} Dec 11 08:41:11 crc kubenswrapper[4881]: I1211 08:41:11.808034 4881 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1e60923b8a101ca74eb8a0796f23df21a0eaf6ec68c345252198c77716e9cd23" Dec 11 08:41:11 crc kubenswrapper[4881]: I1211 08:41:11.835375 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Dec 11 08:41:12 crc kubenswrapper[4881]: I1211 08:41:12.187559 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-0" Dec 11 08:41:12 crc kubenswrapper[4881]: I1211 08:41:12.219370 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3dca57af-8220-449b-a5fc-8001bcc8c180-combined-ca-bundle\") pod \"3dca57af-8220-449b-a5fc-8001bcc8c180\" (UID: \"3dca57af-8220-449b-a5fc-8001bcc8c180\") " Dec 11 08:41:12 crc kubenswrapper[4881]: I1211 08:41:12.219427 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3dca57af-8220-449b-a5fc-8001bcc8c180-config-data\") pod \"3dca57af-8220-449b-a5fc-8001bcc8c180\" (UID: \"3dca57af-8220-449b-a5fc-8001bcc8c180\") " Dec 11 08:41:12 crc kubenswrapper[4881]: I1211 08:41:12.219834 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mqsg2\" (UniqueName: \"kubernetes.io/projected/3dca57af-8220-449b-a5fc-8001bcc8c180-kube-api-access-mqsg2\") pod \"3dca57af-8220-449b-a5fc-8001bcc8c180\" (UID: \"3dca57af-8220-449b-a5fc-8001bcc8c180\") " Dec 11 08:41:12 crc kubenswrapper[4881]: I1211 08:41:12.232674 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3dca57af-8220-449b-a5fc-8001bcc8c180-kube-api-access-mqsg2" (OuterVolumeSpecName: "kube-api-access-mqsg2") pod "3dca57af-8220-449b-a5fc-8001bcc8c180" (UID: "3dca57af-8220-449b-a5fc-8001bcc8c180"). InnerVolumeSpecName "kube-api-access-mqsg2". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:41:12 crc kubenswrapper[4881]: I1211 08:41:12.259633 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3dca57af-8220-449b-a5fc-8001bcc8c180-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "3dca57af-8220-449b-a5fc-8001bcc8c180" (UID: "3dca57af-8220-449b-a5fc-8001bcc8c180"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:41:12 crc kubenswrapper[4881]: I1211 08:41:12.294848 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3dca57af-8220-449b-a5fc-8001bcc8c180-config-data" (OuterVolumeSpecName: "config-data") pod "3dca57af-8220-449b-a5fc-8001bcc8c180" (UID: "3dca57af-8220-449b-a5fc-8001bcc8c180"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:41:12 crc kubenswrapper[4881]: I1211 08:41:12.323940 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mqsg2\" (UniqueName: \"kubernetes.io/projected/3dca57af-8220-449b-a5fc-8001bcc8c180-kube-api-access-mqsg2\") on node \"crc\" DevicePath \"\"" Dec 11 08:41:12 crc kubenswrapper[4881]: I1211 08:41:12.323985 4881 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3dca57af-8220-449b-a5fc-8001bcc8c180-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 11 08:41:12 crc kubenswrapper[4881]: I1211 08:41:12.323999 4881 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3dca57af-8220-449b-a5fc-8001bcc8c180-config-data\") on node \"crc\" DevicePath \"\"" Dec 11 08:41:12 crc kubenswrapper[4881]: I1211 08:41:12.366817 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 11 08:41:12 crc kubenswrapper[4881]: I1211 08:41:12.407662 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Dec 11 08:41:12 crc kubenswrapper[4881]: I1211 08:41:12.424968 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qc66q\" (UniqueName: \"kubernetes.io/projected/7c22dd13-8a15-412b-a351-851d91a763ba-kube-api-access-qc66q\") pod \"7c22dd13-8a15-412b-a351-851d91a763ba\" (UID: \"7c22dd13-8a15-412b-a351-851d91a763ba\") " Dec 11 08:41:12 crc kubenswrapper[4881]: I1211 08:41:12.425173 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7c22dd13-8a15-412b-a351-851d91a763ba-scripts\") pod \"7c22dd13-8a15-412b-a351-851d91a763ba\" (UID: \"7c22dd13-8a15-412b-a351-851d91a763ba\") " Dec 11 08:41:12 crc kubenswrapper[4881]: I1211 08:41:12.425268 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/7c22dd13-8a15-412b-a351-851d91a763ba-sg-core-conf-yaml\") pod \"7c22dd13-8a15-412b-a351-851d91a763ba\" (UID: \"7c22dd13-8a15-412b-a351-851d91a763ba\") " Dec 11 08:41:12 crc kubenswrapper[4881]: I1211 08:41:12.425328 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7c22dd13-8a15-412b-a351-851d91a763ba-log-httpd\") pod \"7c22dd13-8a15-412b-a351-851d91a763ba\" (UID: \"7c22dd13-8a15-412b-a351-851d91a763ba\") " Dec 11 08:41:12 crc kubenswrapper[4881]: I1211 08:41:12.425404 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7c22dd13-8a15-412b-a351-851d91a763ba-run-httpd\") pod \"7c22dd13-8a15-412b-a351-851d91a763ba\" (UID: \"7c22dd13-8a15-412b-a351-851d91a763ba\") " Dec 11 08:41:12 crc kubenswrapper[4881]: I1211 08:41:12.425474 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7c22dd13-8a15-412b-a351-851d91a763ba-config-data\") pod \"7c22dd13-8a15-412b-a351-851d91a763ba\" (UID: \"7c22dd13-8a15-412b-a351-851d91a763ba\") " Dec 11 08:41:12 crc kubenswrapper[4881]: I1211 08:41:12.425499 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7c22dd13-8a15-412b-a351-851d91a763ba-combined-ca-bundle\") pod \"7c22dd13-8a15-412b-a351-851d91a763ba\" (UID: \"7c22dd13-8a15-412b-a351-851d91a763ba\") " Dec 11 08:41:12 crc kubenswrapper[4881]: I1211 08:41:12.426512 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7c22dd13-8a15-412b-a351-851d91a763ba-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "7c22dd13-8a15-412b-a351-851d91a763ba" (UID: "7c22dd13-8a15-412b-a351-851d91a763ba"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 08:41:12 crc kubenswrapper[4881]: I1211 08:41:12.426867 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7c22dd13-8a15-412b-a351-851d91a763ba-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "7c22dd13-8a15-412b-a351-851d91a763ba" (UID: "7c22dd13-8a15-412b-a351-851d91a763ba"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 08:41:12 crc kubenswrapper[4881]: I1211 08:41:12.441306 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7c22dd13-8a15-412b-a351-851d91a763ba-scripts" (OuterVolumeSpecName: "scripts") pod "7c22dd13-8a15-412b-a351-851d91a763ba" (UID: "7c22dd13-8a15-412b-a351-851d91a763ba"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:41:12 crc kubenswrapper[4881]: I1211 08:41:12.442456 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7c22dd13-8a15-412b-a351-851d91a763ba-kube-api-access-qc66q" (OuterVolumeSpecName: "kube-api-access-qc66q") pod "7c22dd13-8a15-412b-a351-851d91a763ba" (UID: "7c22dd13-8a15-412b-a351-851d91a763ba"). InnerVolumeSpecName "kube-api-access-qc66q". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:41:12 crc kubenswrapper[4881]: I1211 08:41:12.530176 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-h8f96\" (UniqueName: \"kubernetes.io/projected/558f097f-277d-4824-bafc-28c4c0f139f3-kube-api-access-h8f96\") pod \"558f097f-277d-4824-bafc-28c4c0f139f3\" (UID: \"558f097f-277d-4824-bafc-28c4c0f139f3\") " Dec 11 08:41:12 crc kubenswrapper[4881]: I1211 08:41:12.531860 4881 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7c22dd13-8a15-412b-a351-851d91a763ba-scripts\") on node \"crc\" DevicePath \"\"" Dec 11 08:41:12 crc kubenswrapper[4881]: I1211 08:41:12.531890 4881 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7c22dd13-8a15-412b-a351-851d91a763ba-log-httpd\") on node \"crc\" DevicePath \"\"" Dec 11 08:41:12 crc kubenswrapper[4881]: I1211 08:41:12.531902 4881 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7c22dd13-8a15-412b-a351-851d91a763ba-run-httpd\") on node \"crc\" DevicePath \"\"" Dec 11 08:41:12 crc kubenswrapper[4881]: I1211 08:41:12.531913 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qc66q\" (UniqueName: \"kubernetes.io/projected/7c22dd13-8a15-412b-a351-851d91a763ba-kube-api-access-qc66q\") on node \"crc\" DevicePath \"\"" Dec 11 08:41:12 crc kubenswrapper[4881]: I1211 08:41:12.538797 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/558f097f-277d-4824-bafc-28c4c0f139f3-kube-api-access-h8f96" (OuterVolumeSpecName: "kube-api-access-h8f96") pod "558f097f-277d-4824-bafc-28c4c0f139f3" (UID: "558f097f-277d-4824-bafc-28c4c0f139f3"). InnerVolumeSpecName "kube-api-access-h8f96". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:41:12 crc kubenswrapper[4881]: I1211 08:41:12.634584 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-h8f96\" (UniqueName: \"kubernetes.io/projected/558f097f-277d-4824-bafc-28c4c0f139f3-kube-api-access-h8f96\") on node \"crc\" DevicePath \"\"" Dec 11 08:41:12 crc kubenswrapper[4881]: I1211 08:41:12.702016 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7c22dd13-8a15-412b-a351-851d91a763ba-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "7c22dd13-8a15-412b-a351-851d91a763ba" (UID: "7c22dd13-8a15-412b-a351-851d91a763ba"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:41:12 crc kubenswrapper[4881]: I1211 08:41:12.737690 4881 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/7c22dd13-8a15-412b-a351-851d91a763ba-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Dec 11 08:41:12 crc kubenswrapper[4881]: I1211 08:41:12.763012 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7c22dd13-8a15-412b-a351-851d91a763ba-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "7c22dd13-8a15-412b-a351-851d91a763ba" (UID: "7c22dd13-8a15-412b-a351-851d91a763ba"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:41:12 crc kubenswrapper[4881]: I1211 08:41:12.799931 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7c22dd13-8a15-412b-a351-851d91a763ba-config-data" (OuterVolumeSpecName: "config-data") pod "7c22dd13-8a15-412b-a351-851d91a763ba" (UID: "7c22dd13-8a15-412b-a351-851d91a763ba"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:41:12 crc kubenswrapper[4881]: I1211 08:41:12.826589 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-dgdhn" event={"ID":"4ce4c4b4-5f9a-4e64-af32-6c3d805a01fc","Type":"ContainerStarted","Data":"f97911d88b636654f9534271dbb427322400e2484df7a386c23013f0fa9bfd8a"} Dec 11 08:41:12 crc kubenswrapper[4881]: I1211 08:41:12.834030 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-4h7jd" event={"ID":"b6d5fc48-a707-4ab1-a8f9-392295486185","Type":"ContainerStarted","Data":"59913bd1ac6e438f295e06b92ae8b703b58c3f2270de6ae1db0d0b8aada64c33"} Dec 11 08:41:12 crc kubenswrapper[4881]: I1211 08:41:12.840385 4881 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7c22dd13-8a15-412b-a351-851d91a763ba-config-data\") on node \"crc\" DevicePath \"\"" Dec 11 08:41:12 crc kubenswrapper[4881]: I1211 08:41:12.840415 4881 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7c22dd13-8a15-412b-a351-851d91a763ba-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 11 08:41:12 crc kubenswrapper[4881]: I1211 08:41:12.841260 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5fbc4d444f-5xbg9" event={"ID":"8a89d31f-1156-4509-85be-1ac98304de6c","Type":"ContainerStarted","Data":"5dec1101ecfed5306fb6b0c67ef748dd6d81429ad00cdfaed4c5098ea4dc7592"} Dec 11 08:41:12 crc kubenswrapper[4881]: I1211 08:41:12.852228 4881 generic.go:334] "Generic (PLEG): container finished" podID="755c1a62-7945-4edf-9e40-90d8abfea7bd" containerID="5a416b08953da58bcdbb0e85950b94c70408c486b8cd5d91092d40acf22156ef" exitCode=0 Dec 11 08:41:12 crc kubenswrapper[4881]: I1211 08:41:12.852320 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Dec 11 08:41:12 crc kubenswrapper[4881]: I1211 08:41:12.855420 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-shvrh" event={"ID":"755c1a62-7945-4edf-9e40-90d8abfea7bd","Type":"ContainerDied","Data":"5a416b08953da58bcdbb0e85950b94c70408c486b8cd5d91092d40acf22156ef"} Dec 11 08:41:12 crc kubenswrapper[4881]: I1211 08:41:12.855513 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-0" Dec 11 08:41:12 crc kubenswrapper[4881]: I1211 08:41:12.855757 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 11 08:41:12 crc kubenswrapper[4881]: I1211 08:41:12.875750 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-db-sync-dgdhn" podStartSLOduration=6.874670095 podStartE2EDuration="6.874670095s" podCreationTimestamp="2025-12-11 08:41:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 08:41:12.849958986 +0000 UTC m=+1521.227327683" watchObservedRunningTime="2025-12-11 08:41:12.874670095 +0000 UTC m=+1521.252038792" Dec 11 08:41:12 crc kubenswrapper[4881]: I1211 08:41:12.890023 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-cell-mapping-4h7jd" podStartSLOduration=8.890001813 podStartE2EDuration="8.890001813s" podCreationTimestamp="2025-12-11 08:41:04 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 08:41:12.871706507 +0000 UTC m=+1521.249075194" watchObservedRunningTime="2025-12-11 08:41:12.890001813 +0000 UTC m=+1521.267370510" Dec 11 08:41:13 crc kubenswrapper[4881]: I1211 08:41:13.245763 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mysqld-exporter-0"] Dec 11 08:41:13 crc kubenswrapper[4881]: I1211 08:41:13.266048 4881 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/mysqld-exporter-0"] Dec 11 08:41:13 crc kubenswrapper[4881]: I1211 08:41:13.287667 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Dec 11 08:41:13 crc kubenswrapper[4881]: I1211 08:41:13.311410 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mysqld-exporter-0"] Dec 11 08:41:13 crc kubenswrapper[4881]: E1211 08:41:13.311972 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7c22dd13-8a15-412b-a351-851d91a763ba" containerName="ceilometer-central-agent" Dec 11 08:41:13 crc kubenswrapper[4881]: I1211 08:41:13.311986 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="7c22dd13-8a15-412b-a351-851d91a763ba" containerName="ceilometer-central-agent" Dec 11 08:41:13 crc kubenswrapper[4881]: E1211 08:41:13.311997 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7c22dd13-8a15-412b-a351-851d91a763ba" containerName="proxy-httpd" Dec 11 08:41:13 crc kubenswrapper[4881]: I1211 08:41:13.312003 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="7c22dd13-8a15-412b-a351-851d91a763ba" containerName="proxy-httpd" Dec 11 08:41:13 crc kubenswrapper[4881]: E1211 08:41:13.312026 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3dca57af-8220-449b-a5fc-8001bcc8c180" containerName="mysqld-exporter" Dec 11 08:41:13 crc kubenswrapper[4881]: I1211 08:41:13.312031 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="3dca57af-8220-449b-a5fc-8001bcc8c180" containerName="mysqld-exporter" Dec 11 08:41:13 crc kubenswrapper[4881]: E1211 08:41:13.312043 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7c22dd13-8a15-412b-a351-851d91a763ba" containerName="ceilometer-notification-agent" Dec 11 08:41:13 crc kubenswrapper[4881]: I1211 08:41:13.312049 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="7c22dd13-8a15-412b-a351-851d91a763ba" containerName="ceilometer-notification-agent" Dec 11 08:41:13 crc kubenswrapper[4881]: E1211 08:41:13.312067 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="558f097f-277d-4824-bafc-28c4c0f139f3" containerName="kube-state-metrics" Dec 11 08:41:13 crc kubenswrapper[4881]: I1211 08:41:13.312073 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="558f097f-277d-4824-bafc-28c4c0f139f3" containerName="kube-state-metrics" Dec 11 08:41:13 crc kubenswrapper[4881]: E1211 08:41:13.312094 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7c22dd13-8a15-412b-a351-851d91a763ba" containerName="sg-core" Dec 11 08:41:13 crc kubenswrapper[4881]: I1211 08:41:13.312100 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="7c22dd13-8a15-412b-a351-851d91a763ba" containerName="sg-core" Dec 11 08:41:13 crc kubenswrapper[4881]: I1211 08:41:13.312310 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="558f097f-277d-4824-bafc-28c4c0f139f3" containerName="kube-state-metrics" Dec 11 08:41:13 crc kubenswrapper[4881]: I1211 08:41:13.312351 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="7c22dd13-8a15-412b-a351-851d91a763ba" containerName="ceilometer-notification-agent" Dec 11 08:41:13 crc kubenswrapper[4881]: I1211 08:41:13.312367 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="7c22dd13-8a15-412b-a351-851d91a763ba" containerName="ceilometer-central-agent" Dec 11 08:41:13 crc kubenswrapper[4881]: I1211 08:41:13.312375 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="7c22dd13-8a15-412b-a351-851d91a763ba" containerName="proxy-httpd" Dec 11 08:41:13 crc kubenswrapper[4881]: I1211 08:41:13.312385 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="7c22dd13-8a15-412b-a351-851d91a763ba" containerName="sg-core" Dec 11 08:41:13 crc kubenswrapper[4881]: I1211 08:41:13.312396 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="3dca57af-8220-449b-a5fc-8001bcc8c180" containerName="mysqld-exporter" Dec 11 08:41:13 crc kubenswrapper[4881]: I1211 08:41:13.316580 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-0" Dec 11 08:41:13 crc kubenswrapper[4881]: I1211 08:41:13.322067 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-mysqld-exporter-svc" Dec 11 08:41:13 crc kubenswrapper[4881]: I1211 08:41:13.322186 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"telemetry-ceilometer-dockercfg-ng9b8" Dec 11 08:41:13 crc kubenswrapper[4881]: I1211 08:41:13.322728 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"mysqld-exporter-config-data" Dec 11 08:41:13 crc kubenswrapper[4881]: I1211 08:41:13.333507 4881 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/kube-state-metrics-0"] Dec 11 08:41:13 crc kubenswrapper[4881]: I1211 08:41:13.355151 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/598ba082-c5c4-4dc3-b4ec-5db6677fdb61-combined-ca-bundle\") pod \"mysqld-exporter-0\" (UID: \"598ba082-c5c4-4dc3-b4ec-5db6677fdb61\") " pod="openstack/mysqld-exporter-0" Dec 11 08:41:13 crc kubenswrapper[4881]: I1211 08:41:13.355591 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gjvxg\" (UniqueName: \"kubernetes.io/projected/598ba082-c5c4-4dc3-b4ec-5db6677fdb61-kube-api-access-gjvxg\") pod \"mysqld-exporter-0\" (UID: \"598ba082-c5c4-4dc3-b4ec-5db6677fdb61\") " pod="openstack/mysqld-exporter-0" Dec 11 08:41:13 crc kubenswrapper[4881]: I1211 08:41:13.356188 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mysqld-exporter-tls-certs\" (UniqueName: \"kubernetes.io/secret/598ba082-c5c4-4dc3-b4ec-5db6677fdb61-mysqld-exporter-tls-certs\") pod \"mysqld-exporter-0\" (UID: \"598ba082-c5c4-4dc3-b4ec-5db6677fdb61\") " pod="openstack/mysqld-exporter-0" Dec 11 08:41:13 crc kubenswrapper[4881]: I1211 08:41:13.358182 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/598ba082-c5c4-4dc3-b4ec-5db6677fdb61-config-data\") pod \"mysqld-exporter-0\" (UID: \"598ba082-c5c4-4dc3-b4ec-5db6677fdb61\") " pod="openstack/mysqld-exporter-0" Dec 11 08:41:13 crc kubenswrapper[4881]: I1211 08:41:13.368174 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/kube-state-metrics-0"] Dec 11 08:41:13 crc kubenswrapper[4881]: I1211 08:41:13.370117 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Dec 11 08:41:13 crc kubenswrapper[4881]: I1211 08:41:13.371837 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"kube-state-metrics-tls-config" Dec 11 08:41:13 crc kubenswrapper[4881]: I1211 08:41:13.371900 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-kube-state-metrics-svc" Dec 11 08:41:13 crc kubenswrapper[4881]: I1211 08:41:13.386517 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mysqld-exporter-0"] Dec 11 08:41:13 crc kubenswrapper[4881]: I1211 08:41:13.427865 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Dec 11 08:41:13 crc kubenswrapper[4881]: I1211 08:41:13.471391 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hdx8g\" (UniqueName: \"kubernetes.io/projected/dad35c24-846e-4c89-aa50-20ccea9fd132-kube-api-access-hdx8g\") pod \"kube-state-metrics-0\" (UID: \"dad35c24-846e-4c89-aa50-20ccea9fd132\") " pod="openstack/kube-state-metrics-0" Dec 11 08:41:13 crc kubenswrapper[4881]: I1211 08:41:13.471476 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/dad35c24-846e-4c89-aa50-20ccea9fd132-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"dad35c24-846e-4c89-aa50-20ccea9fd132\") " pod="openstack/kube-state-metrics-0" Dec 11 08:41:13 crc kubenswrapper[4881]: I1211 08:41:13.471590 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/dad35c24-846e-4c89-aa50-20ccea9fd132-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"dad35c24-846e-4c89-aa50-20ccea9fd132\") " pod="openstack/kube-state-metrics-0" Dec 11 08:41:13 crc kubenswrapper[4881]: I1211 08:41:13.471674 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/598ba082-c5c4-4dc3-b4ec-5db6677fdb61-combined-ca-bundle\") pod \"mysqld-exporter-0\" (UID: \"598ba082-c5c4-4dc3-b4ec-5db6677fdb61\") " pod="openstack/mysqld-exporter-0" Dec 11 08:41:13 crc kubenswrapper[4881]: I1211 08:41:13.471722 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gjvxg\" (UniqueName: \"kubernetes.io/projected/598ba082-c5c4-4dc3-b4ec-5db6677fdb61-kube-api-access-gjvxg\") pod \"mysqld-exporter-0\" (UID: \"598ba082-c5c4-4dc3-b4ec-5db6677fdb61\") " pod="openstack/mysqld-exporter-0" Dec 11 08:41:13 crc kubenswrapper[4881]: I1211 08:41:13.471760 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mysqld-exporter-tls-certs\" (UniqueName: \"kubernetes.io/secret/598ba082-c5c4-4dc3-b4ec-5db6677fdb61-mysqld-exporter-tls-certs\") pod \"mysqld-exporter-0\" (UID: \"598ba082-c5c4-4dc3-b4ec-5db6677fdb61\") " pod="openstack/mysqld-exporter-0" Dec 11 08:41:13 crc kubenswrapper[4881]: I1211 08:41:13.471922 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dad35c24-846e-4c89-aa50-20ccea9fd132-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"dad35c24-846e-4c89-aa50-20ccea9fd132\") " pod="openstack/kube-state-metrics-0" Dec 11 08:41:13 crc kubenswrapper[4881]: I1211 08:41:13.471988 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/598ba082-c5c4-4dc3-b4ec-5db6677fdb61-config-data\") pod \"mysqld-exporter-0\" (UID: \"598ba082-c5c4-4dc3-b4ec-5db6677fdb61\") " pod="openstack/mysqld-exporter-0" Dec 11 08:41:13 crc kubenswrapper[4881]: I1211 08:41:13.477832 4881 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Dec 11 08:41:13 crc kubenswrapper[4881]: I1211 08:41:13.480873 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/598ba082-c5c4-4dc3-b4ec-5db6677fdb61-config-data\") pod \"mysqld-exporter-0\" (UID: \"598ba082-c5c4-4dc3-b4ec-5db6677fdb61\") " pod="openstack/mysqld-exporter-0" Dec 11 08:41:13 crc kubenswrapper[4881]: I1211 08:41:13.486224 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/598ba082-c5c4-4dc3-b4ec-5db6677fdb61-combined-ca-bundle\") pod \"mysqld-exporter-0\" (UID: \"598ba082-c5c4-4dc3-b4ec-5db6677fdb61\") " pod="openstack/mysqld-exporter-0" Dec 11 08:41:13 crc kubenswrapper[4881]: I1211 08:41:13.486364 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mysqld-exporter-tls-certs\" (UniqueName: \"kubernetes.io/secret/598ba082-c5c4-4dc3-b4ec-5db6677fdb61-mysqld-exporter-tls-certs\") pod \"mysqld-exporter-0\" (UID: \"598ba082-c5c4-4dc3-b4ec-5db6677fdb61\") " pod="openstack/mysqld-exporter-0" Dec 11 08:41:13 crc kubenswrapper[4881]: I1211 08:41:13.501079 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gjvxg\" (UniqueName: \"kubernetes.io/projected/598ba082-c5c4-4dc3-b4ec-5db6677fdb61-kube-api-access-gjvxg\") pod \"mysqld-exporter-0\" (UID: \"598ba082-c5c4-4dc3-b4ec-5db6677fdb61\") " pod="openstack/mysqld-exporter-0" Dec 11 08:41:13 crc kubenswrapper[4881]: I1211 08:41:13.501159 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Dec 11 08:41:13 crc kubenswrapper[4881]: I1211 08:41:13.521390 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Dec 11 08:41:13 crc kubenswrapper[4881]: I1211 08:41:13.524569 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 11 08:41:13 crc kubenswrapper[4881]: I1211 08:41:13.526481 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Dec 11 08:41:13 crc kubenswrapper[4881]: I1211 08:41:13.526779 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Dec 11 08:41:13 crc kubenswrapper[4881]: I1211 08:41:13.527730 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Dec 11 08:41:13 crc kubenswrapper[4881]: I1211 08:41:13.536268 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Dec 11 08:41:13 crc kubenswrapper[4881]: I1211 08:41:13.574008 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q5fv4\" (UniqueName: \"kubernetes.io/projected/2c3f1515-b9d7-49cc-a96f-65e3713c3311-kube-api-access-q5fv4\") pod \"ceilometer-0\" (UID: \"2c3f1515-b9d7-49cc-a96f-65e3713c3311\") " pod="openstack/ceilometer-0" Dec 11 08:41:13 crc kubenswrapper[4881]: I1211 08:41:13.574058 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2c3f1515-b9d7-49cc-a96f-65e3713c3311-scripts\") pod \"ceilometer-0\" (UID: \"2c3f1515-b9d7-49cc-a96f-65e3713c3311\") " pod="openstack/ceilometer-0" Dec 11 08:41:13 crc kubenswrapper[4881]: I1211 08:41:13.574095 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dad35c24-846e-4c89-aa50-20ccea9fd132-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"dad35c24-846e-4c89-aa50-20ccea9fd132\") " pod="openstack/kube-state-metrics-0" Dec 11 08:41:13 crc kubenswrapper[4881]: I1211 08:41:13.574148 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2c3f1515-b9d7-49cc-a96f-65e3713c3311-config-data\") pod \"ceilometer-0\" (UID: \"2c3f1515-b9d7-49cc-a96f-65e3713c3311\") " pod="openstack/ceilometer-0" Dec 11 08:41:13 crc kubenswrapper[4881]: I1211 08:41:13.574196 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/2c3f1515-b9d7-49cc-a96f-65e3713c3311-run-httpd\") pod \"ceilometer-0\" (UID: \"2c3f1515-b9d7-49cc-a96f-65e3713c3311\") " pod="openstack/ceilometer-0" Dec 11 08:41:13 crc kubenswrapper[4881]: I1211 08:41:13.574215 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hdx8g\" (UniqueName: \"kubernetes.io/projected/dad35c24-846e-4c89-aa50-20ccea9fd132-kube-api-access-hdx8g\") pod \"kube-state-metrics-0\" (UID: \"dad35c24-846e-4c89-aa50-20ccea9fd132\") " pod="openstack/kube-state-metrics-0" Dec 11 08:41:13 crc kubenswrapper[4881]: I1211 08:41:13.574244 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/dad35c24-846e-4c89-aa50-20ccea9fd132-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"dad35c24-846e-4c89-aa50-20ccea9fd132\") " pod="openstack/kube-state-metrics-0" Dec 11 08:41:13 crc kubenswrapper[4881]: I1211 08:41:13.574304 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2c3f1515-b9d7-49cc-a96f-65e3713c3311-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"2c3f1515-b9d7-49cc-a96f-65e3713c3311\") " pod="openstack/ceilometer-0" Dec 11 08:41:13 crc kubenswrapper[4881]: I1211 08:41:13.574848 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/dad35c24-846e-4c89-aa50-20ccea9fd132-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"dad35c24-846e-4c89-aa50-20ccea9fd132\") " pod="openstack/kube-state-metrics-0" Dec 11 08:41:13 crc kubenswrapper[4881]: I1211 08:41:13.574951 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/2c3f1515-b9d7-49cc-a96f-65e3713c3311-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"2c3f1515-b9d7-49cc-a96f-65e3713c3311\") " pod="openstack/ceilometer-0" Dec 11 08:41:13 crc kubenswrapper[4881]: I1211 08:41:13.574987 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/2c3f1515-b9d7-49cc-a96f-65e3713c3311-log-httpd\") pod \"ceilometer-0\" (UID: \"2c3f1515-b9d7-49cc-a96f-65e3713c3311\") " pod="openstack/ceilometer-0" Dec 11 08:41:13 crc kubenswrapper[4881]: I1211 08:41:13.575016 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/2c3f1515-b9d7-49cc-a96f-65e3713c3311-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"2c3f1515-b9d7-49cc-a96f-65e3713c3311\") " pod="openstack/ceilometer-0" Dec 11 08:41:13 crc kubenswrapper[4881]: I1211 08:41:13.580880 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/dad35c24-846e-4c89-aa50-20ccea9fd132-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"dad35c24-846e-4c89-aa50-20ccea9fd132\") " pod="openstack/kube-state-metrics-0" Dec 11 08:41:13 crc kubenswrapper[4881]: I1211 08:41:13.581522 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/dad35c24-846e-4c89-aa50-20ccea9fd132-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"dad35c24-846e-4c89-aa50-20ccea9fd132\") " pod="openstack/kube-state-metrics-0" Dec 11 08:41:13 crc kubenswrapper[4881]: I1211 08:41:13.581930 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dad35c24-846e-4c89-aa50-20ccea9fd132-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"dad35c24-846e-4c89-aa50-20ccea9fd132\") " pod="openstack/kube-state-metrics-0" Dec 11 08:41:13 crc kubenswrapper[4881]: I1211 08:41:13.592431 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hdx8g\" (UniqueName: \"kubernetes.io/projected/dad35c24-846e-4c89-aa50-20ccea9fd132-kube-api-access-hdx8g\") pod \"kube-state-metrics-0\" (UID: \"dad35c24-846e-4c89-aa50-20ccea9fd132\") " pod="openstack/kube-state-metrics-0" Dec 11 08:41:13 crc kubenswrapper[4881]: I1211 08:41:13.662974 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-0" Dec 11 08:41:13 crc kubenswrapper[4881]: I1211 08:41:13.677773 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/2c3f1515-b9d7-49cc-a96f-65e3713c3311-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"2c3f1515-b9d7-49cc-a96f-65e3713c3311\") " pod="openstack/ceilometer-0" Dec 11 08:41:13 crc kubenswrapper[4881]: I1211 08:41:13.677823 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/2c3f1515-b9d7-49cc-a96f-65e3713c3311-log-httpd\") pod \"ceilometer-0\" (UID: \"2c3f1515-b9d7-49cc-a96f-65e3713c3311\") " pod="openstack/ceilometer-0" Dec 11 08:41:13 crc kubenswrapper[4881]: I1211 08:41:13.677863 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/2c3f1515-b9d7-49cc-a96f-65e3713c3311-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"2c3f1515-b9d7-49cc-a96f-65e3713c3311\") " pod="openstack/ceilometer-0" Dec 11 08:41:13 crc kubenswrapper[4881]: I1211 08:41:13.677937 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q5fv4\" (UniqueName: \"kubernetes.io/projected/2c3f1515-b9d7-49cc-a96f-65e3713c3311-kube-api-access-q5fv4\") pod \"ceilometer-0\" (UID: \"2c3f1515-b9d7-49cc-a96f-65e3713c3311\") " pod="openstack/ceilometer-0" Dec 11 08:41:13 crc kubenswrapper[4881]: I1211 08:41:13.677980 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2c3f1515-b9d7-49cc-a96f-65e3713c3311-scripts\") pod \"ceilometer-0\" (UID: \"2c3f1515-b9d7-49cc-a96f-65e3713c3311\") " pod="openstack/ceilometer-0" Dec 11 08:41:13 crc kubenswrapper[4881]: I1211 08:41:13.678475 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/2c3f1515-b9d7-49cc-a96f-65e3713c3311-log-httpd\") pod \"ceilometer-0\" (UID: \"2c3f1515-b9d7-49cc-a96f-65e3713c3311\") " pod="openstack/ceilometer-0" Dec 11 08:41:13 crc kubenswrapper[4881]: I1211 08:41:13.678867 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2c3f1515-b9d7-49cc-a96f-65e3713c3311-config-data\") pod \"ceilometer-0\" (UID: \"2c3f1515-b9d7-49cc-a96f-65e3713c3311\") " pod="openstack/ceilometer-0" Dec 11 08:41:13 crc kubenswrapper[4881]: I1211 08:41:13.678934 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/2c3f1515-b9d7-49cc-a96f-65e3713c3311-run-httpd\") pod \"ceilometer-0\" (UID: \"2c3f1515-b9d7-49cc-a96f-65e3713c3311\") " pod="openstack/ceilometer-0" Dec 11 08:41:13 crc kubenswrapper[4881]: I1211 08:41:13.679013 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2c3f1515-b9d7-49cc-a96f-65e3713c3311-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"2c3f1515-b9d7-49cc-a96f-65e3713c3311\") " pod="openstack/ceilometer-0" Dec 11 08:41:13 crc kubenswrapper[4881]: I1211 08:41:13.679896 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/2c3f1515-b9d7-49cc-a96f-65e3713c3311-run-httpd\") pod \"ceilometer-0\" (UID: \"2c3f1515-b9d7-49cc-a96f-65e3713c3311\") " pod="openstack/ceilometer-0" Dec 11 08:41:13 crc kubenswrapper[4881]: I1211 08:41:13.681619 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/2c3f1515-b9d7-49cc-a96f-65e3713c3311-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"2c3f1515-b9d7-49cc-a96f-65e3713c3311\") " pod="openstack/ceilometer-0" Dec 11 08:41:13 crc kubenswrapper[4881]: I1211 08:41:13.681961 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2c3f1515-b9d7-49cc-a96f-65e3713c3311-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"2c3f1515-b9d7-49cc-a96f-65e3713c3311\") " pod="openstack/ceilometer-0" Dec 11 08:41:13 crc kubenswrapper[4881]: I1211 08:41:13.682772 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2c3f1515-b9d7-49cc-a96f-65e3713c3311-scripts\") pod \"ceilometer-0\" (UID: \"2c3f1515-b9d7-49cc-a96f-65e3713c3311\") " pod="openstack/ceilometer-0" Dec 11 08:41:13 crc kubenswrapper[4881]: I1211 08:41:13.683136 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2c3f1515-b9d7-49cc-a96f-65e3713c3311-config-data\") pod \"ceilometer-0\" (UID: \"2c3f1515-b9d7-49cc-a96f-65e3713c3311\") " pod="openstack/ceilometer-0" Dec 11 08:41:13 crc kubenswrapper[4881]: I1211 08:41:13.683878 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/2c3f1515-b9d7-49cc-a96f-65e3713c3311-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"2c3f1515-b9d7-49cc-a96f-65e3713c3311\") " pod="openstack/ceilometer-0" Dec 11 08:41:13 crc kubenswrapper[4881]: I1211 08:41:13.698883 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q5fv4\" (UniqueName: \"kubernetes.io/projected/2c3f1515-b9d7-49cc-a96f-65e3713c3311-kube-api-access-q5fv4\") pod \"ceilometer-0\" (UID: \"2c3f1515-b9d7-49cc-a96f-65e3713c3311\") " pod="openstack/ceilometer-0" Dec 11 08:41:13 crc kubenswrapper[4881]: I1211 08:41:13.719737 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Dec 11 08:41:13 crc kubenswrapper[4881]: I1211 08:41:13.847023 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 11 08:41:13 crc kubenswrapper[4881]: I1211 08:41:13.880255 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-vk695" event={"ID":"324526c9-d17c-4dfd-b2f7-b86e9577c36c","Type":"ContainerStarted","Data":"d89c12eca30a4bde2f8bc067026e755b3972417eccab9ffdce126ecbbba33e88"} Dec 11 08:41:13 crc kubenswrapper[4881]: I1211 08:41:13.889608 4881 generic.go:334] "Generic (PLEG): container finished" podID="8a89d31f-1156-4509-85be-1ac98304de6c" containerID="5dec1101ecfed5306fb6b0c67ef748dd6d81429ad00cdfaed4c5098ea4dc7592" exitCode=0 Dec 11 08:41:13 crc kubenswrapper[4881]: I1211 08:41:13.890507 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5fbc4d444f-5xbg9" event={"ID":"8a89d31f-1156-4509-85be-1ac98304de6c","Type":"ContainerDied","Data":"5dec1101ecfed5306fb6b0c67ef748dd6d81429ad00cdfaed4c5098ea4dc7592"} Dec 11 08:41:13 crc kubenswrapper[4881]: I1211 08:41:13.890584 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5fbc4d444f-5xbg9" event={"ID":"8a89d31f-1156-4509-85be-1ac98304de6c","Type":"ContainerStarted","Data":"1b181dbb63ad929241e5e7449c3df035202daaaa67bd99f06382e726021653d3"} Dec 11 08:41:13 crc kubenswrapper[4881]: I1211 08:41:13.921975 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-vk695" podStartSLOduration=12.865688069 podStartE2EDuration="31.921952753s" podCreationTimestamp="2025-12-11 08:40:42 +0000 UTC" firstStartedPulling="2025-12-11 08:40:53.45755332 +0000 UTC m=+1501.834922017" lastFinishedPulling="2025-12-11 08:41:12.513818004 +0000 UTC m=+1520.891186701" observedRunningTime="2025-12-11 08:41:13.899130642 +0000 UTC m=+1522.276499339" watchObservedRunningTime="2025-12-11 08:41:13.921952753 +0000 UTC m=+1522.299321440" Dec 11 08:41:14 crc kubenswrapper[4881]: I1211 08:41:14.391387 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mysqld-exporter-0"] Dec 11 08:41:14 crc kubenswrapper[4881]: I1211 08:41:14.412157 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Dec 11 08:41:14 crc kubenswrapper[4881]: I1211 08:41:14.584267 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Dec 11 08:41:14 crc kubenswrapper[4881]: I1211 08:41:14.898942 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-5fbc4d444f-5xbg9" Dec 11 08:41:14 crc kubenswrapper[4881]: I1211 08:41:14.922122 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-5fbc4d444f-5xbg9" podStartSLOduration=9.922108364 podStartE2EDuration="9.922108364s" podCreationTimestamp="2025-12-11 08:41:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 08:41:14.91897785 +0000 UTC m=+1523.296346547" watchObservedRunningTime="2025-12-11 08:41:14.922108364 +0000 UTC m=+1523.299477061" Dec 11 08:41:15 crc kubenswrapper[4881]: I1211 08:41:15.024298 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3dca57af-8220-449b-a5fc-8001bcc8c180" path="/var/lib/kubelet/pods/3dca57af-8220-449b-a5fc-8001bcc8c180/volumes" Dec 11 08:41:15 crc kubenswrapper[4881]: I1211 08:41:15.386208 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="558f097f-277d-4824-bafc-28c4c0f139f3" path="/var/lib/kubelet/pods/558f097f-277d-4824-bafc-28c4c0f139f3/volumes" Dec 11 08:41:15 crc kubenswrapper[4881]: I1211 08:41:15.387349 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7c22dd13-8a15-412b-a351-851d91a763ba" path="/var/lib/kubelet/pods/7c22dd13-8a15-412b-a351-851d91a763ba/volumes" Dec 11 08:41:16 crc kubenswrapper[4881]: E1211 08:41:16.652487 4881 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod324526c9_d17c_4dfd_b2f7_b86e9577c36c.slice/crio-c90ce7cd8fcf126d46d59245f06190daa1830091da52fd5b1af057889d7e2442.scope\": RecentStats: unable to find data in memory cache]" Dec 11 08:41:17 crc kubenswrapper[4881]: E1211 08:41:17.301498 4881 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod324526c9_d17c_4dfd_b2f7_b86e9577c36c.slice/crio-c90ce7cd8fcf126d46d59245f06190daa1830091da52fd5b1af057889d7e2442.scope\": RecentStats: unable to find data in memory cache]" Dec 11 08:41:18 crc kubenswrapper[4881]: W1211 08:41:18.721360 4881 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2c3f1515_b9d7_49cc_a96f_65e3713c3311.slice/crio-5ccd150953f734600d7c57127107a992c0c6d1660adf601d1eaea84519c3ac59 WatchSource:0}: Error finding container 5ccd150953f734600d7c57127107a992c0c6d1660adf601d1eaea84519c3ac59: Status 404 returned error can't find the container with id 5ccd150953f734600d7c57127107a992c0c6d1660adf601d1eaea84519c3ac59 Dec 11 08:41:18 crc kubenswrapper[4881]: W1211 08:41:18.726119 4881 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poddad35c24_846e_4c89_aa50_20ccea9fd132.slice/crio-4e7966678d2ae08101d87a6c68060fc71e1b66868750ea4fe0cc666388165ab7 WatchSource:0}: Error finding container 4e7966678d2ae08101d87a6c68060fc71e1b66868750ea4fe0cc666388165ab7: Status 404 returned error can't find the container with id 4e7966678d2ae08101d87a6c68060fc71e1b66868750ea4fe0cc666388165ab7 Dec 11 08:41:18 crc kubenswrapper[4881]: I1211 08:41:18.954378 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"dad35c24-846e-4c89-aa50-20ccea9fd132","Type":"ContainerStarted","Data":"4e7966678d2ae08101d87a6c68060fc71e1b66868750ea4fe0cc666388165ab7"} Dec 11 08:41:18 crc kubenswrapper[4881]: I1211 08:41:18.955544 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"2c3f1515-b9d7-49cc-a96f-65e3713c3311","Type":"ContainerStarted","Data":"5ccd150953f734600d7c57127107a992c0c6d1660adf601d1eaea84519c3ac59"} Dec 11 08:41:18 crc kubenswrapper[4881]: I1211 08:41:18.957041 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-0" event={"ID":"598ba082-c5c4-4dc3-b4ec-5db6677fdb61","Type":"ContainerStarted","Data":"5ba465c514925a24332ef2e4b2225131de5ba964dd57bbe117d1ea989be97df4"} Dec 11 08:41:20 crc kubenswrapper[4881]: I1211 08:41:20.881558 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-5fbc4d444f-5xbg9" Dec 11 08:41:20 crc kubenswrapper[4881]: I1211 08:41:20.985248 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-f6bc4c6c9-4r7pn"] Dec 11 08:41:20 crc kubenswrapper[4881]: I1211 08:41:20.986467 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-f6bc4c6c9-4r7pn" podUID="8ae1990b-5d22-4cb7-ace7-92acddc6df35" containerName="dnsmasq-dns" containerID="cri-o://3e1e885c89156a7d385bcec17fd37c76098b23f9a0ffd2288225cae75a0514cd" gracePeriod=10 Dec 11 08:41:22 crc kubenswrapper[4881]: I1211 08:41:22.010455 4881 generic.go:334] "Generic (PLEG): container finished" podID="8ae1990b-5d22-4cb7-ace7-92acddc6df35" containerID="3e1e885c89156a7d385bcec17fd37c76098b23f9a0ffd2288225cae75a0514cd" exitCode=0 Dec 11 08:41:22 crc kubenswrapper[4881]: I1211 08:41:22.010528 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-f6bc4c6c9-4r7pn" event={"ID":"8ae1990b-5d22-4cb7-ace7-92acddc6df35","Type":"ContainerDied","Data":"3e1e885c89156a7d385bcec17fd37c76098b23f9a0ffd2288225cae75a0514cd"} Dec 11 08:41:22 crc kubenswrapper[4881]: I1211 08:41:22.451004 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-vk695" Dec 11 08:41:22 crc kubenswrapper[4881]: I1211 08:41:22.451303 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-vk695" Dec 11 08:41:23 crc kubenswrapper[4881]: I1211 08:41:23.044102 4881 generic.go:334] "Generic (PLEG): container finished" podID="b6d5fc48-a707-4ab1-a8f9-392295486185" containerID="59913bd1ac6e438f295e06b92ae8b703b58c3f2270de6ae1db0d0b8aada64c33" exitCode=0 Dec 11 08:41:23 crc kubenswrapper[4881]: I1211 08:41:23.044204 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-4h7jd" event={"ID":"b6d5fc48-a707-4ab1-a8f9-392295486185","Type":"ContainerDied","Data":"59913bd1ac6e438f295e06b92ae8b703b58c3f2270de6ae1db0d0b8aada64c33"} Dec 11 08:41:23 crc kubenswrapper[4881]: I1211 08:41:23.467749 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-f6bc4c6c9-4r7pn" Dec 11 08:41:23 crc kubenswrapper[4881]: I1211 08:41:23.508832 4881 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-vk695" podUID="324526c9-d17c-4dfd-b2f7-b86e9577c36c" containerName="registry-server" probeResult="failure" output=< Dec 11 08:41:23 crc kubenswrapper[4881]: timeout: failed to connect service ":50051" within 1s Dec 11 08:41:23 crc kubenswrapper[4881]: > Dec 11 08:41:23 crc kubenswrapper[4881]: I1211 08:41:23.579499 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8ae1990b-5d22-4cb7-ace7-92acddc6df35-dns-svc\") pod \"8ae1990b-5d22-4cb7-ace7-92acddc6df35\" (UID: \"8ae1990b-5d22-4cb7-ace7-92acddc6df35\") " Dec 11 08:41:23 crc kubenswrapper[4881]: I1211 08:41:23.579560 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/8ae1990b-5d22-4cb7-ace7-92acddc6df35-dns-swift-storage-0\") pod \"8ae1990b-5d22-4cb7-ace7-92acddc6df35\" (UID: \"8ae1990b-5d22-4cb7-ace7-92acddc6df35\") " Dec 11 08:41:23 crc kubenswrapper[4881]: I1211 08:41:23.579680 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/8ae1990b-5d22-4cb7-ace7-92acddc6df35-ovsdbserver-nb\") pod \"8ae1990b-5d22-4cb7-ace7-92acddc6df35\" (UID: \"8ae1990b-5d22-4cb7-ace7-92acddc6df35\") " Dec 11 08:41:23 crc kubenswrapper[4881]: I1211 08:41:23.579861 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8ae1990b-5d22-4cb7-ace7-92acddc6df35-config\") pod \"8ae1990b-5d22-4cb7-ace7-92acddc6df35\" (UID: \"8ae1990b-5d22-4cb7-ace7-92acddc6df35\") " Dec 11 08:41:23 crc kubenswrapper[4881]: I1211 08:41:23.579930 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8dkwh\" (UniqueName: \"kubernetes.io/projected/8ae1990b-5d22-4cb7-ace7-92acddc6df35-kube-api-access-8dkwh\") pod \"8ae1990b-5d22-4cb7-ace7-92acddc6df35\" (UID: \"8ae1990b-5d22-4cb7-ace7-92acddc6df35\") " Dec 11 08:41:23 crc kubenswrapper[4881]: I1211 08:41:23.579995 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/8ae1990b-5d22-4cb7-ace7-92acddc6df35-ovsdbserver-sb\") pod \"8ae1990b-5d22-4cb7-ace7-92acddc6df35\" (UID: \"8ae1990b-5d22-4cb7-ace7-92acddc6df35\") " Dec 11 08:41:23 crc kubenswrapper[4881]: I1211 08:41:23.586532 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8ae1990b-5d22-4cb7-ace7-92acddc6df35-kube-api-access-8dkwh" (OuterVolumeSpecName: "kube-api-access-8dkwh") pod "8ae1990b-5d22-4cb7-ace7-92acddc6df35" (UID: "8ae1990b-5d22-4cb7-ace7-92acddc6df35"). InnerVolumeSpecName "kube-api-access-8dkwh". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:41:23 crc kubenswrapper[4881]: I1211 08:41:23.685802 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8dkwh\" (UniqueName: \"kubernetes.io/projected/8ae1990b-5d22-4cb7-ace7-92acddc6df35-kube-api-access-8dkwh\") on node \"crc\" DevicePath \"\"" Dec 11 08:41:23 crc kubenswrapper[4881]: I1211 08:41:23.780919 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8ae1990b-5d22-4cb7-ace7-92acddc6df35-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "8ae1990b-5d22-4cb7-ace7-92acddc6df35" (UID: "8ae1990b-5d22-4cb7-ace7-92acddc6df35"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:41:23 crc kubenswrapper[4881]: I1211 08:41:23.788635 4881 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8ae1990b-5d22-4cb7-ace7-92acddc6df35-dns-svc\") on node \"crc\" DevicePath \"\"" Dec 11 08:41:23 crc kubenswrapper[4881]: I1211 08:41:23.802433 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8ae1990b-5d22-4cb7-ace7-92acddc6df35-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "8ae1990b-5d22-4cb7-ace7-92acddc6df35" (UID: "8ae1990b-5d22-4cb7-ace7-92acddc6df35"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:41:23 crc kubenswrapper[4881]: I1211 08:41:23.884062 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8ae1990b-5d22-4cb7-ace7-92acddc6df35-config" (OuterVolumeSpecName: "config") pod "8ae1990b-5d22-4cb7-ace7-92acddc6df35" (UID: "8ae1990b-5d22-4cb7-ace7-92acddc6df35"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:41:23 crc kubenswrapper[4881]: I1211 08:41:23.891033 4881 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/8ae1990b-5d22-4cb7-ace7-92acddc6df35-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Dec 11 08:41:23 crc kubenswrapper[4881]: I1211 08:41:23.891066 4881 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8ae1990b-5d22-4cb7-ace7-92acddc6df35-config\") on node \"crc\" DevicePath \"\"" Dec 11 08:41:23 crc kubenswrapper[4881]: I1211 08:41:23.895162 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8ae1990b-5d22-4cb7-ace7-92acddc6df35-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "8ae1990b-5d22-4cb7-ace7-92acddc6df35" (UID: "8ae1990b-5d22-4cb7-ace7-92acddc6df35"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:41:23 crc kubenswrapper[4881]: I1211 08:41:23.899682 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8ae1990b-5d22-4cb7-ace7-92acddc6df35-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "8ae1990b-5d22-4cb7-ace7-92acddc6df35" (UID: "8ae1990b-5d22-4cb7-ace7-92acddc6df35"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:41:24 crc kubenswrapper[4881]: I1211 08:41:24.001269 4881 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/8ae1990b-5d22-4cb7-ace7-92acddc6df35-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Dec 11 08:41:24 crc kubenswrapper[4881]: I1211 08:41:24.001316 4881 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/8ae1990b-5d22-4cb7-ace7-92acddc6df35-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Dec 11 08:41:24 crc kubenswrapper[4881]: I1211 08:41:24.061908 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"532f11a0-2d04-48b6-87a8-b27e99195ac9","Type":"ContainerStarted","Data":"eca5d6f7bfbb9f1cc81b72c1afde398cc730cee98865ca62bceac737883a8dd4"} Dec 11 08:41:24 crc kubenswrapper[4881]: I1211 08:41:24.062074 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell1-novncproxy-0" podUID="532f11a0-2d04-48b6-87a8-b27e99195ac9" containerName="nova-cell1-novncproxy-novncproxy" containerID="cri-o://eca5d6f7bfbb9f1cc81b72c1afde398cc730cee98865ca62bceac737883a8dd4" gracePeriod=30 Dec 11 08:41:24 crc kubenswrapper[4881]: I1211 08:41:24.065392 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-shvrh" event={"ID":"755c1a62-7945-4edf-9e40-90d8abfea7bd","Type":"ContainerStarted","Data":"03c9954e0b751b9e83f89e61d1cca603165f6ae7d28ab9fae4631707e363808c"} Dec 11 08:41:24 crc kubenswrapper[4881]: I1211 08:41:24.072321 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"10d7837c-f2c1-475b-a8b2-0885aee68f82","Type":"ContainerStarted","Data":"a18e96d280ea2f3e6bfc69105df815a2f3302869e32ac2d684f518a54fb2a029"} Dec 11 08:41:24 crc kubenswrapper[4881]: I1211 08:41:24.086989 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-f6bc4c6c9-4r7pn" event={"ID":"8ae1990b-5d22-4cb7-ace7-92acddc6df35","Type":"ContainerDied","Data":"f3ede814408af67f714fa7560345425599d168f707853b292da87b048fb4dd10"} Dec 11 08:41:24 crc kubenswrapper[4881]: I1211 08:41:24.087047 4881 scope.go:117] "RemoveContainer" containerID="3e1e885c89156a7d385bcec17fd37c76098b23f9a0ffd2288225cae75a0514cd" Dec 11 08:41:24 crc kubenswrapper[4881]: I1211 08:41:24.087209 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-f6bc4c6c9-4r7pn" Dec 11 08:41:24 crc kubenswrapper[4881]: I1211 08:41:24.101069 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-0" event={"ID":"598ba082-c5c4-4dc3-b4ec-5db6677fdb61","Type":"ContainerStarted","Data":"d63dcc074dd4efb9176b6898faa77de724909da18ffd8fc29573cf79840ee60e"} Dec 11 08:41:24 crc kubenswrapper[4881]: I1211 08:41:24.101267 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-novncproxy-0" podStartSLOduration=7.445962967 podStartE2EDuration="19.10125092s" podCreationTimestamp="2025-12-11 08:41:05 +0000 UTC" firstStartedPulling="2025-12-11 08:41:11.63938111 +0000 UTC m=+1520.016749807" lastFinishedPulling="2025-12-11 08:41:23.294669063 +0000 UTC m=+1531.672037760" observedRunningTime="2025-12-11 08:41:24.083105758 +0000 UTC m=+1532.460474455" watchObservedRunningTime="2025-12-11 08:41:24.10125092 +0000 UTC m=+1532.478619627" Dec 11 08:41:24 crc kubenswrapper[4881]: I1211 08:41:24.124578 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"2c3f1515-b9d7-49cc-a96f-65e3713c3311","Type":"ContainerStarted","Data":"52b2419c29189f05c8ec1d616c9af2d71612b70012997c4e04970145d2b46d7d"} Dec 11 08:41:24 crc kubenswrapper[4881]: I1211 08:41:24.145552 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-f6bc4c6c9-4r7pn"] Dec 11 08:41:24 crc kubenswrapper[4881]: I1211 08:41:24.146187 4881 scope.go:117] "RemoveContainer" containerID="4840d348945bd8796ccac6a5b0d54d1e4143b7cea5f2a1f61918a38dfecec705" Dec 11 08:41:24 crc kubenswrapper[4881]: I1211 08:41:24.170844 4881 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-f6bc4c6c9-4r7pn"] Dec 11 08:41:24 crc kubenswrapper[4881]: I1211 08:41:24.178966 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/mysqld-exporter-0" podStartSLOduration=9.625033097 podStartE2EDuration="11.178946262s" podCreationTimestamp="2025-12-11 08:41:13 +0000 UTC" firstStartedPulling="2025-12-11 08:41:21.811381258 +0000 UTC m=+1530.188749945" lastFinishedPulling="2025-12-11 08:41:23.365294413 +0000 UTC m=+1531.742663110" observedRunningTime="2025-12-11 08:41:24.140858314 +0000 UTC m=+1532.518227041" watchObservedRunningTime="2025-12-11 08:41:24.178946262 +0000 UTC m=+1532.556314959" Dec 11 08:41:24 crc kubenswrapper[4881]: I1211 08:41:24.554071 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-4h7jd" Dec 11 08:41:24 crc kubenswrapper[4881]: I1211 08:41:24.618856 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b6d5fc48-a707-4ab1-a8f9-392295486185-scripts\") pod \"b6d5fc48-a707-4ab1-a8f9-392295486185\" (UID: \"b6d5fc48-a707-4ab1-a8f9-392295486185\") " Dec 11 08:41:24 crc kubenswrapper[4881]: I1211 08:41:24.618994 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b6d5fc48-a707-4ab1-a8f9-392295486185-combined-ca-bundle\") pod \"b6d5fc48-a707-4ab1-a8f9-392295486185\" (UID: \"b6d5fc48-a707-4ab1-a8f9-392295486185\") " Dec 11 08:41:24 crc kubenswrapper[4881]: I1211 08:41:24.619059 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bfq26\" (UniqueName: \"kubernetes.io/projected/b6d5fc48-a707-4ab1-a8f9-392295486185-kube-api-access-bfq26\") pod \"b6d5fc48-a707-4ab1-a8f9-392295486185\" (UID: \"b6d5fc48-a707-4ab1-a8f9-392295486185\") " Dec 11 08:41:24 crc kubenswrapper[4881]: I1211 08:41:24.620271 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b6d5fc48-a707-4ab1-a8f9-392295486185-config-data\") pod \"b6d5fc48-a707-4ab1-a8f9-392295486185\" (UID: \"b6d5fc48-a707-4ab1-a8f9-392295486185\") " Dec 11 08:41:24 crc kubenswrapper[4881]: I1211 08:41:24.624736 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6d5fc48-a707-4ab1-a8f9-392295486185-scripts" (OuterVolumeSpecName: "scripts") pod "b6d5fc48-a707-4ab1-a8f9-392295486185" (UID: "b6d5fc48-a707-4ab1-a8f9-392295486185"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:41:24 crc kubenswrapper[4881]: I1211 08:41:24.625180 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6d5fc48-a707-4ab1-a8f9-392295486185-kube-api-access-bfq26" (OuterVolumeSpecName: "kube-api-access-bfq26") pod "b6d5fc48-a707-4ab1-a8f9-392295486185" (UID: "b6d5fc48-a707-4ab1-a8f9-392295486185"). InnerVolumeSpecName "kube-api-access-bfq26". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:41:24 crc kubenswrapper[4881]: I1211 08:41:24.649825 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6d5fc48-a707-4ab1-a8f9-392295486185-config-data" (OuterVolumeSpecName: "config-data") pod "b6d5fc48-a707-4ab1-a8f9-392295486185" (UID: "b6d5fc48-a707-4ab1-a8f9-392295486185"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:41:24 crc kubenswrapper[4881]: I1211 08:41:24.657721 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6d5fc48-a707-4ab1-a8f9-392295486185-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b6d5fc48-a707-4ab1-a8f9-392295486185" (UID: "b6d5fc48-a707-4ab1-a8f9-392295486185"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:41:24 crc kubenswrapper[4881]: I1211 08:41:24.723939 4881 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b6d5fc48-a707-4ab1-a8f9-392295486185-config-data\") on node \"crc\" DevicePath \"\"" Dec 11 08:41:24 crc kubenswrapper[4881]: I1211 08:41:24.723976 4881 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b6d5fc48-a707-4ab1-a8f9-392295486185-scripts\") on node \"crc\" DevicePath \"\"" Dec 11 08:41:24 crc kubenswrapper[4881]: I1211 08:41:24.723985 4881 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b6d5fc48-a707-4ab1-a8f9-392295486185-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 11 08:41:24 crc kubenswrapper[4881]: I1211 08:41:24.723996 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bfq26\" (UniqueName: \"kubernetes.io/projected/b6d5fc48-a707-4ab1-a8f9-392295486185-kube-api-access-bfq26\") on node \"crc\" DevicePath \"\"" Dec 11 08:41:25 crc kubenswrapper[4881]: I1211 08:41:25.020851 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8ae1990b-5d22-4cb7-ace7-92acddc6df35" path="/var/lib/kubelet/pods/8ae1990b-5d22-4cb7-ace7-92acddc6df35/volumes" Dec 11 08:41:25 crc kubenswrapper[4881]: I1211 08:41:25.142995 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"dad35c24-846e-4c89-aa50-20ccea9fd132","Type":"ContainerStarted","Data":"fe7cda9b0c6358244637fcfc349b1a2cc3eaba9f41b39ed3b0713f3caf000d3d"} Dec 11 08:41:25 crc kubenswrapper[4881]: I1211 08:41:25.143134 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Dec 11 08:41:25 crc kubenswrapper[4881]: I1211 08:41:25.147401 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-4h7jd" event={"ID":"b6d5fc48-a707-4ab1-a8f9-392295486185","Type":"ContainerDied","Data":"d552b0fb88de7338f5190d2e8fe118117332612147a31819ae23c464e70d6035"} Dec 11 08:41:25 crc kubenswrapper[4881]: I1211 08:41:25.147444 4881 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d552b0fb88de7338f5190d2e8fe118117332612147a31819ae23c464e70d6035" Dec 11 08:41:25 crc kubenswrapper[4881]: I1211 08:41:25.147514 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-4h7jd" Dec 11 08:41:25 crc kubenswrapper[4881]: I1211 08:41:25.153593 4881 generic.go:334] "Generic (PLEG): container finished" podID="755c1a62-7945-4edf-9e40-90d8abfea7bd" containerID="03c9954e0b751b9e83f89e61d1cca603165f6ae7d28ab9fae4631707e363808c" exitCode=0 Dec 11 08:41:25 crc kubenswrapper[4881]: I1211 08:41:25.153657 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-shvrh" event={"ID":"755c1a62-7945-4edf-9e40-90d8abfea7bd","Type":"ContainerDied","Data":"03c9954e0b751b9e83f89e61d1cca603165f6ae7d28ab9fae4631707e363808c"} Dec 11 08:41:25 crc kubenswrapper[4881]: I1211 08:41:25.168773 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/kube-state-metrics-0" podStartSLOduration=10.546286069 podStartE2EDuration="12.168754623s" podCreationTimestamp="2025-12-11 08:41:13 +0000 UTC" firstStartedPulling="2025-12-11 08:41:21.811277275 +0000 UTC m=+1530.188645972" lastFinishedPulling="2025-12-11 08:41:23.433745809 +0000 UTC m=+1531.811114526" observedRunningTime="2025-12-11 08:41:25.16159606 +0000 UTC m=+1533.538964747" watchObservedRunningTime="2025-12-11 08:41:25.168754623 +0000 UTC m=+1533.546123340" Dec 11 08:41:25 crc kubenswrapper[4881]: I1211 08:41:25.171627 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"10d7837c-f2c1-475b-a8b2-0885aee68f82","Type":"ContainerStarted","Data":"6631df914318af95e3a39c1f5c7f78e4a309884b46af0e194fce6730ac932ba3"} Dec 11 08:41:25 crc kubenswrapper[4881]: I1211 08:41:25.184078 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"494c324b-b9ff-46d5-aafd-bcf1d3f72dea","Type":"ContainerStarted","Data":"d01e62e48f86201056e5b28c05d098fc51301f3c281251be1c7d7de1e4c7cdd6"} Dec 11 08:41:25 crc kubenswrapper[4881]: I1211 08:41:25.188328 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"e688f7d9-d215-48aa-8092-e176d3437f09","Type":"ContainerStarted","Data":"e1b52b19ca98e36e161fee41ff46127d736b904ad1e26525435cefec416d25ee"} Dec 11 08:41:25 crc kubenswrapper[4881]: I1211 08:41:25.204043 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=8.5499739 podStartE2EDuration="20.204028727s" podCreationTimestamp="2025-12-11 08:41:05 +0000 UTC" firstStartedPulling="2025-12-11 08:41:11.639827953 +0000 UTC m=+1520.017196650" lastFinishedPulling="2025-12-11 08:41:23.29388278 +0000 UTC m=+1531.671251477" observedRunningTime="2025-12-11 08:41:25.203272224 +0000 UTC m=+1533.580640921" watchObservedRunningTime="2025-12-11 08:41:25.204028727 +0000 UTC m=+1533.581397414" Dec 11 08:41:25 crc kubenswrapper[4881]: I1211 08:41:25.237750 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=8.442243662 podStartE2EDuration="20.237734654s" podCreationTimestamp="2025-12-11 08:41:05 +0000 UTC" firstStartedPulling="2025-12-11 08:41:11.49881507 +0000 UTC m=+1519.876183767" lastFinishedPulling="2025-12-11 08:41:23.294306062 +0000 UTC m=+1531.671674759" observedRunningTime="2025-12-11 08:41:25.2312445 +0000 UTC m=+1533.608613197" watchObservedRunningTime="2025-12-11 08:41:25.237734654 +0000 UTC m=+1533.615103351" Dec 11 08:41:25 crc kubenswrapper[4881]: I1211 08:41:25.384836 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Dec 11 08:41:25 crc kubenswrapper[4881]: I1211 08:41:25.412757 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Dec 11 08:41:25 crc kubenswrapper[4881]: I1211 08:41:25.737290 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Dec 11 08:41:25 crc kubenswrapper[4881]: I1211 08:41:25.737370 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Dec 11 08:41:25 crc kubenswrapper[4881]: I1211 08:41:25.786644 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Dec 11 08:41:25 crc kubenswrapper[4881]: I1211 08:41:25.786713 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Dec 11 08:41:25 crc kubenswrapper[4881]: I1211 08:41:25.838845 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Dec 11 08:41:25 crc kubenswrapper[4881]: I1211 08:41:25.951272 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-novncproxy-0" Dec 11 08:41:26 crc kubenswrapper[4881]: I1211 08:41:26.220103 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"494c324b-b9ff-46d5-aafd-bcf1d3f72dea","Type":"ContainerStarted","Data":"1c0b7f76ecb7ef9f68ee6fa630ef01d8245a1bae5c7314a1f03f7d71cb50e613"} Dec 11 08:41:26 crc kubenswrapper[4881]: I1211 08:41:26.220545 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="494c324b-b9ff-46d5-aafd-bcf1d3f72dea" containerName="nova-metadata-log" containerID="cri-o://d01e62e48f86201056e5b28c05d098fc51301f3c281251be1c7d7de1e4c7cdd6" gracePeriod=30 Dec 11 08:41:26 crc kubenswrapper[4881]: I1211 08:41:26.220675 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="494c324b-b9ff-46d5-aafd-bcf1d3f72dea" containerName="nova-metadata-metadata" containerID="cri-o://1c0b7f76ecb7ef9f68ee6fa630ef01d8245a1bae5c7314a1f03f7d71cb50e613" gracePeriod=30 Dec 11 08:41:26 crc kubenswrapper[4881]: I1211 08:41:26.237309 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-sync-tt4br" event={"ID":"67c4b27a-5352-4006-9944-6de8dc05d3d1","Type":"ContainerStarted","Data":"f75e8a784092df0565ac791cab535d3d1d098203dcd0d357752cff3750eabb5d"} Dec 11 08:41:26 crc kubenswrapper[4881]: I1211 08:41:26.252096 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=9.592919169 podStartE2EDuration="21.252076128s" podCreationTimestamp="2025-12-11 08:41:05 +0000 UTC" firstStartedPulling="2025-12-11 08:41:11.70599835 +0000 UTC m=+1520.083367037" lastFinishedPulling="2025-12-11 08:41:23.365155299 +0000 UTC m=+1531.742523996" observedRunningTime="2025-12-11 08:41:26.244280365 +0000 UTC m=+1534.621649072" watchObservedRunningTime="2025-12-11 08:41:26.252076128 +0000 UTC m=+1534.629444825" Dec 11 08:41:26 crc kubenswrapper[4881]: I1211 08:41:26.260439 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"2c3f1515-b9d7-49cc-a96f-65e3713c3311","Type":"ContainerStarted","Data":"649bf814d8589ce30cf26c1a32e3bd9df9a66a3076ea58aa465831dfa51b1cc2"} Dec 11 08:41:26 crc kubenswrapper[4881]: I1211 08:41:26.274028 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/aodh-db-sync-tt4br" podStartSLOduration=1.8635827759999999 podStartE2EDuration="43.274004823s" podCreationTimestamp="2025-12-11 08:40:43 +0000 UTC" firstStartedPulling="2025-12-11 08:40:43.943885409 +0000 UTC m=+1492.321254106" lastFinishedPulling="2025-12-11 08:41:25.354307456 +0000 UTC m=+1533.731676153" observedRunningTime="2025-12-11 08:41:26.261003615 +0000 UTC m=+1534.638372312" watchObservedRunningTime="2025-12-11 08:41:26.274004823 +0000 UTC m=+1534.651373520" Dec 11 08:41:26 crc kubenswrapper[4881]: I1211 08:41:26.304259 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-shvrh" podStartSLOduration=15.264919355 podStartE2EDuration="28.304236627s" podCreationTimestamp="2025-12-11 08:40:58 +0000 UTC" firstStartedPulling="2025-12-11 08:41:12.862040248 +0000 UTC m=+1521.239408945" lastFinishedPulling="2025-12-11 08:41:25.90135752 +0000 UTC m=+1534.278726217" observedRunningTime="2025-12-11 08:41:26.298494176 +0000 UTC m=+1534.675862883" watchObservedRunningTime="2025-12-11 08:41:26.304236627 +0000 UTC m=+1534.681605314" Dec 11 08:41:26 crc kubenswrapper[4881]: I1211 08:41:26.319159 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Dec 11 08:41:26 crc kubenswrapper[4881]: I1211 08:41:26.829923 4881 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="10d7837c-f2c1-475b-a8b2-0885aee68f82" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.0.236:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Dec 11 08:41:26 crc kubenswrapper[4881]: I1211 08:41:26.830431 4881 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="10d7837c-f2c1-475b-a8b2-0885aee68f82" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.0.236:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Dec 11 08:41:27 crc kubenswrapper[4881]: E1211 08:41:27.041812 4881 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod324526c9_d17c_4dfd_b2f7_b86e9577c36c.slice/crio-c90ce7cd8fcf126d46d59245f06190daa1830091da52fd5b1af057889d7e2442.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod494c324b_b9ff_46d5_aafd_bcf1d3f72dea.slice/crio-conmon-1c0b7f76ecb7ef9f68ee6fa630ef01d8245a1bae5c7314a1f03f7d71cb50e613.scope\": RecentStats: unable to find data in memory cache]" Dec 11 08:41:27 crc kubenswrapper[4881]: I1211 08:41:27.281675 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-shvrh" event={"ID":"755c1a62-7945-4edf-9e40-90d8abfea7bd","Type":"ContainerStarted","Data":"6c39932f97886e8da00bf37a3a85bc8841a49a2271d73d3c3f6d6fc85564150b"} Dec 11 08:41:27 crc kubenswrapper[4881]: I1211 08:41:27.285201 4881 generic.go:334] "Generic (PLEG): container finished" podID="494c324b-b9ff-46d5-aafd-bcf1d3f72dea" containerID="1c0b7f76ecb7ef9f68ee6fa630ef01d8245a1bae5c7314a1f03f7d71cb50e613" exitCode=0 Dec 11 08:41:27 crc kubenswrapper[4881]: I1211 08:41:27.285234 4881 generic.go:334] "Generic (PLEG): container finished" podID="494c324b-b9ff-46d5-aafd-bcf1d3f72dea" containerID="d01e62e48f86201056e5b28c05d098fc51301f3c281251be1c7d7de1e4c7cdd6" exitCode=143 Dec 11 08:41:27 crc kubenswrapper[4881]: I1211 08:41:27.285290 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"494c324b-b9ff-46d5-aafd-bcf1d3f72dea","Type":"ContainerDied","Data":"1c0b7f76ecb7ef9f68ee6fa630ef01d8245a1bae5c7314a1f03f7d71cb50e613"} Dec 11 08:41:27 crc kubenswrapper[4881]: I1211 08:41:27.285373 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"494c324b-b9ff-46d5-aafd-bcf1d3f72dea","Type":"ContainerDied","Data":"d01e62e48f86201056e5b28c05d098fc51301f3c281251be1c7d7de1e4c7cdd6"} Dec 11 08:41:27 crc kubenswrapper[4881]: I1211 08:41:27.285386 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"494c324b-b9ff-46d5-aafd-bcf1d3f72dea","Type":"ContainerDied","Data":"c60e1b0f97154c39c1ecb6d188c232ac10f12f11cc7b64b57981eb4f5b3a1c50"} Dec 11 08:41:27 crc kubenswrapper[4881]: I1211 08:41:27.285398 4881 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c60e1b0f97154c39c1ecb6d188c232ac10f12f11cc7b64b57981eb4f5b3a1c50" Dec 11 08:41:27 crc kubenswrapper[4881]: I1211 08:41:27.288520 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"2c3f1515-b9d7-49cc-a96f-65e3713c3311","Type":"ContainerStarted","Data":"12960751b4f82cf137ad111c7ced056d1e795a77f7be2976a44523a9c048a25c"} Dec 11 08:41:27 crc kubenswrapper[4881]: I1211 08:41:27.288673 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="e688f7d9-d215-48aa-8092-e176d3437f09" containerName="nova-scheduler-scheduler" containerID="cri-o://e1b52b19ca98e36e161fee41ff46127d736b904ad1e26525435cefec416d25ee" gracePeriod=30 Dec 11 08:41:27 crc kubenswrapper[4881]: I1211 08:41:27.289593 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="10d7837c-f2c1-475b-a8b2-0885aee68f82" containerName="nova-api-log" containerID="cri-o://a18e96d280ea2f3e6bfc69105df815a2f3302869e32ac2d684f518a54fb2a029" gracePeriod=30 Dec 11 08:41:27 crc kubenswrapper[4881]: I1211 08:41:27.289686 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="10d7837c-f2c1-475b-a8b2-0885aee68f82" containerName="nova-api-api" containerID="cri-o://6631df914318af95e3a39c1f5c7f78e4a309884b46af0e194fce6730ac932ba3" gracePeriod=30 Dec 11 08:41:27 crc kubenswrapper[4881]: I1211 08:41:27.373681 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Dec 11 08:41:27 crc kubenswrapper[4881]: I1211 08:41:27.415486 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zgrtx\" (UniqueName: \"kubernetes.io/projected/494c324b-b9ff-46d5-aafd-bcf1d3f72dea-kube-api-access-zgrtx\") pod \"494c324b-b9ff-46d5-aafd-bcf1d3f72dea\" (UID: \"494c324b-b9ff-46d5-aafd-bcf1d3f72dea\") " Dec 11 08:41:27 crc kubenswrapper[4881]: I1211 08:41:27.416012 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/494c324b-b9ff-46d5-aafd-bcf1d3f72dea-combined-ca-bundle\") pod \"494c324b-b9ff-46d5-aafd-bcf1d3f72dea\" (UID: \"494c324b-b9ff-46d5-aafd-bcf1d3f72dea\") " Dec 11 08:41:27 crc kubenswrapper[4881]: I1211 08:41:27.416065 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/494c324b-b9ff-46d5-aafd-bcf1d3f72dea-config-data\") pod \"494c324b-b9ff-46d5-aafd-bcf1d3f72dea\" (UID: \"494c324b-b9ff-46d5-aafd-bcf1d3f72dea\") " Dec 11 08:41:27 crc kubenswrapper[4881]: I1211 08:41:27.416493 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/494c324b-b9ff-46d5-aafd-bcf1d3f72dea-logs\") pod \"494c324b-b9ff-46d5-aafd-bcf1d3f72dea\" (UID: \"494c324b-b9ff-46d5-aafd-bcf1d3f72dea\") " Dec 11 08:41:27 crc kubenswrapper[4881]: I1211 08:41:27.419366 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/494c324b-b9ff-46d5-aafd-bcf1d3f72dea-logs" (OuterVolumeSpecName: "logs") pod "494c324b-b9ff-46d5-aafd-bcf1d3f72dea" (UID: "494c324b-b9ff-46d5-aafd-bcf1d3f72dea"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 08:41:27 crc kubenswrapper[4881]: I1211 08:41:27.426700 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/494c324b-b9ff-46d5-aafd-bcf1d3f72dea-kube-api-access-zgrtx" (OuterVolumeSpecName: "kube-api-access-zgrtx") pod "494c324b-b9ff-46d5-aafd-bcf1d3f72dea" (UID: "494c324b-b9ff-46d5-aafd-bcf1d3f72dea"). InnerVolumeSpecName "kube-api-access-zgrtx". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:41:27 crc kubenswrapper[4881]: I1211 08:41:27.452544 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/494c324b-b9ff-46d5-aafd-bcf1d3f72dea-config-data" (OuterVolumeSpecName: "config-data") pod "494c324b-b9ff-46d5-aafd-bcf1d3f72dea" (UID: "494c324b-b9ff-46d5-aafd-bcf1d3f72dea"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:41:27 crc kubenswrapper[4881]: I1211 08:41:27.471487 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/494c324b-b9ff-46d5-aafd-bcf1d3f72dea-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "494c324b-b9ff-46d5-aafd-bcf1d3f72dea" (UID: "494c324b-b9ff-46d5-aafd-bcf1d3f72dea"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:41:27 crc kubenswrapper[4881]: I1211 08:41:27.520109 4881 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/494c324b-b9ff-46d5-aafd-bcf1d3f72dea-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 11 08:41:27 crc kubenswrapper[4881]: I1211 08:41:27.520141 4881 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/494c324b-b9ff-46d5-aafd-bcf1d3f72dea-config-data\") on node \"crc\" DevicePath \"\"" Dec 11 08:41:27 crc kubenswrapper[4881]: I1211 08:41:27.520152 4881 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/494c324b-b9ff-46d5-aafd-bcf1d3f72dea-logs\") on node \"crc\" DevicePath \"\"" Dec 11 08:41:27 crc kubenswrapper[4881]: I1211 08:41:27.520161 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zgrtx\" (UniqueName: \"kubernetes.io/projected/494c324b-b9ff-46d5-aafd-bcf1d3f72dea-kube-api-access-zgrtx\") on node \"crc\" DevicePath \"\"" Dec 11 08:41:28 crc kubenswrapper[4881]: I1211 08:41:28.301404 4881 generic.go:334] "Generic (PLEG): container finished" podID="10d7837c-f2c1-475b-a8b2-0885aee68f82" containerID="a18e96d280ea2f3e6bfc69105df815a2f3302869e32ac2d684f518a54fb2a029" exitCode=143 Dec 11 08:41:28 crc kubenswrapper[4881]: I1211 08:41:28.301487 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Dec 11 08:41:28 crc kubenswrapper[4881]: I1211 08:41:28.301487 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"10d7837c-f2c1-475b-a8b2-0885aee68f82","Type":"ContainerDied","Data":"a18e96d280ea2f3e6bfc69105df815a2f3302869e32ac2d684f518a54fb2a029"} Dec 11 08:41:28 crc kubenswrapper[4881]: I1211 08:41:28.340642 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Dec 11 08:41:28 crc kubenswrapper[4881]: I1211 08:41:28.359495 4881 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Dec 11 08:41:28 crc kubenswrapper[4881]: I1211 08:41:28.373970 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Dec 11 08:41:28 crc kubenswrapper[4881]: E1211 08:41:28.374581 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8ae1990b-5d22-4cb7-ace7-92acddc6df35" containerName="dnsmasq-dns" Dec 11 08:41:28 crc kubenswrapper[4881]: I1211 08:41:28.374607 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="8ae1990b-5d22-4cb7-ace7-92acddc6df35" containerName="dnsmasq-dns" Dec 11 08:41:28 crc kubenswrapper[4881]: E1211 08:41:28.374624 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8ae1990b-5d22-4cb7-ace7-92acddc6df35" containerName="init" Dec 11 08:41:28 crc kubenswrapper[4881]: I1211 08:41:28.374631 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="8ae1990b-5d22-4cb7-ace7-92acddc6df35" containerName="init" Dec 11 08:41:28 crc kubenswrapper[4881]: E1211 08:41:28.374655 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="494c324b-b9ff-46d5-aafd-bcf1d3f72dea" containerName="nova-metadata-log" Dec 11 08:41:28 crc kubenswrapper[4881]: I1211 08:41:28.374664 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="494c324b-b9ff-46d5-aafd-bcf1d3f72dea" containerName="nova-metadata-log" Dec 11 08:41:28 crc kubenswrapper[4881]: E1211 08:41:28.374696 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="494c324b-b9ff-46d5-aafd-bcf1d3f72dea" containerName="nova-metadata-metadata" Dec 11 08:41:28 crc kubenswrapper[4881]: I1211 08:41:28.374701 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="494c324b-b9ff-46d5-aafd-bcf1d3f72dea" containerName="nova-metadata-metadata" Dec 11 08:41:28 crc kubenswrapper[4881]: E1211 08:41:28.374727 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b6d5fc48-a707-4ab1-a8f9-392295486185" containerName="nova-manage" Dec 11 08:41:28 crc kubenswrapper[4881]: I1211 08:41:28.374733 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="b6d5fc48-a707-4ab1-a8f9-392295486185" containerName="nova-manage" Dec 11 08:41:28 crc kubenswrapper[4881]: I1211 08:41:28.374969 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="494c324b-b9ff-46d5-aafd-bcf1d3f72dea" containerName="nova-metadata-metadata" Dec 11 08:41:28 crc kubenswrapper[4881]: I1211 08:41:28.374988 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="494c324b-b9ff-46d5-aafd-bcf1d3f72dea" containerName="nova-metadata-log" Dec 11 08:41:28 crc kubenswrapper[4881]: I1211 08:41:28.374999 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="b6d5fc48-a707-4ab1-a8f9-392295486185" containerName="nova-manage" Dec 11 08:41:28 crc kubenswrapper[4881]: I1211 08:41:28.375015 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="8ae1990b-5d22-4cb7-ace7-92acddc6df35" containerName="dnsmasq-dns" Dec 11 08:41:28 crc kubenswrapper[4881]: I1211 08:41:28.376302 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Dec 11 08:41:28 crc kubenswrapper[4881]: I1211 08:41:28.378528 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Dec 11 08:41:28 crc kubenswrapper[4881]: I1211 08:41:28.378846 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Dec 11 08:41:28 crc kubenswrapper[4881]: I1211 08:41:28.387163 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Dec 11 08:41:28 crc kubenswrapper[4881]: I1211 08:41:28.441951 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7a5a5663-d97f-47f5-b4a8-4c18ae3bd844-config-data\") pod \"nova-metadata-0\" (UID: \"7a5a5663-d97f-47f5-b4a8-4c18ae3bd844\") " pod="openstack/nova-metadata-0" Dec 11 08:41:28 crc kubenswrapper[4881]: I1211 08:41:28.441995 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7a5a5663-d97f-47f5-b4a8-4c18ae3bd844-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"7a5a5663-d97f-47f5-b4a8-4c18ae3bd844\") " pod="openstack/nova-metadata-0" Dec 11 08:41:28 crc kubenswrapper[4881]: I1211 08:41:28.442095 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7a5a5663-d97f-47f5-b4a8-4c18ae3bd844-logs\") pod \"nova-metadata-0\" (UID: \"7a5a5663-d97f-47f5-b4a8-4c18ae3bd844\") " pod="openstack/nova-metadata-0" Dec 11 08:41:28 crc kubenswrapper[4881]: I1211 08:41:28.442194 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/7a5a5663-d97f-47f5-b4a8-4c18ae3bd844-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"7a5a5663-d97f-47f5-b4a8-4c18ae3bd844\") " pod="openstack/nova-metadata-0" Dec 11 08:41:28 crc kubenswrapper[4881]: I1211 08:41:28.442283 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c2gzs\" (UniqueName: \"kubernetes.io/projected/7a5a5663-d97f-47f5-b4a8-4c18ae3bd844-kube-api-access-c2gzs\") pod \"nova-metadata-0\" (UID: \"7a5a5663-d97f-47f5-b4a8-4c18ae3bd844\") " pod="openstack/nova-metadata-0" Dec 11 08:41:28 crc kubenswrapper[4881]: I1211 08:41:28.544584 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7a5a5663-d97f-47f5-b4a8-4c18ae3bd844-config-data\") pod \"nova-metadata-0\" (UID: \"7a5a5663-d97f-47f5-b4a8-4c18ae3bd844\") " pod="openstack/nova-metadata-0" Dec 11 08:41:28 crc kubenswrapper[4881]: I1211 08:41:28.544653 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7a5a5663-d97f-47f5-b4a8-4c18ae3bd844-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"7a5a5663-d97f-47f5-b4a8-4c18ae3bd844\") " pod="openstack/nova-metadata-0" Dec 11 08:41:28 crc kubenswrapper[4881]: I1211 08:41:28.544750 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7a5a5663-d97f-47f5-b4a8-4c18ae3bd844-logs\") pod \"nova-metadata-0\" (UID: \"7a5a5663-d97f-47f5-b4a8-4c18ae3bd844\") " pod="openstack/nova-metadata-0" Dec 11 08:41:28 crc kubenswrapper[4881]: I1211 08:41:28.544842 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/7a5a5663-d97f-47f5-b4a8-4c18ae3bd844-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"7a5a5663-d97f-47f5-b4a8-4c18ae3bd844\") " pod="openstack/nova-metadata-0" Dec 11 08:41:28 crc kubenswrapper[4881]: I1211 08:41:28.544920 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c2gzs\" (UniqueName: \"kubernetes.io/projected/7a5a5663-d97f-47f5-b4a8-4c18ae3bd844-kube-api-access-c2gzs\") pod \"nova-metadata-0\" (UID: \"7a5a5663-d97f-47f5-b4a8-4c18ae3bd844\") " pod="openstack/nova-metadata-0" Dec 11 08:41:28 crc kubenswrapper[4881]: I1211 08:41:28.545327 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7a5a5663-d97f-47f5-b4a8-4c18ae3bd844-logs\") pod \"nova-metadata-0\" (UID: \"7a5a5663-d97f-47f5-b4a8-4c18ae3bd844\") " pod="openstack/nova-metadata-0" Dec 11 08:41:28 crc kubenswrapper[4881]: I1211 08:41:28.549508 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7a5a5663-d97f-47f5-b4a8-4c18ae3bd844-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"7a5a5663-d97f-47f5-b4a8-4c18ae3bd844\") " pod="openstack/nova-metadata-0" Dec 11 08:41:28 crc kubenswrapper[4881]: I1211 08:41:28.549615 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7a5a5663-d97f-47f5-b4a8-4c18ae3bd844-config-data\") pod \"nova-metadata-0\" (UID: \"7a5a5663-d97f-47f5-b4a8-4c18ae3bd844\") " pod="openstack/nova-metadata-0" Dec 11 08:41:28 crc kubenswrapper[4881]: I1211 08:41:28.549623 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/7a5a5663-d97f-47f5-b4a8-4c18ae3bd844-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"7a5a5663-d97f-47f5-b4a8-4c18ae3bd844\") " pod="openstack/nova-metadata-0" Dec 11 08:41:28 crc kubenswrapper[4881]: I1211 08:41:28.562866 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c2gzs\" (UniqueName: \"kubernetes.io/projected/7a5a5663-d97f-47f5-b4a8-4c18ae3bd844-kube-api-access-c2gzs\") pod \"nova-metadata-0\" (UID: \"7a5a5663-d97f-47f5-b4a8-4c18ae3bd844\") " pod="openstack/nova-metadata-0" Dec 11 08:41:28 crc kubenswrapper[4881]: I1211 08:41:28.693677 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Dec 11 08:41:29 crc kubenswrapper[4881]: I1211 08:41:29.027566 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="494c324b-b9ff-46d5-aafd-bcf1d3f72dea" path="/var/lib/kubelet/pods/494c324b-b9ff-46d5-aafd-bcf1d3f72dea/volumes" Dec 11 08:41:30 crc kubenswrapper[4881]: I1211 08:41:30.235770 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-shvrh" Dec 11 08:41:30 crc kubenswrapper[4881]: I1211 08:41:30.237287 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-shvrh" Dec 11 08:41:30 crc kubenswrapper[4881]: I1211 08:41:30.283996 4881 trace.go:236] Trace[1310712946]: "Calculate volume metrics of utilities for pod openshift-marketplace/certified-operators-zqwb5" (11-Dec-2025 08:41:28.182) (total time: 2101ms): Dec 11 08:41:30 crc kubenswrapper[4881]: Trace[1310712946]: [2.101569606s] [2.101569606s] END Dec 11 08:41:30 crc kubenswrapper[4881]: I1211 08:41:30.340511 4881 trace.go:236] Trace[374537235]: "Calculate volume metrics of catalog-content for pod openshift-marketplace/redhat-operators-kfk9x" (11-Dec-2025 08:41:28.191) (total time: 2148ms): Dec 11 08:41:30 crc kubenswrapper[4881]: Trace[374537235]: [2.148939031s] [2.148939031s] END Dec 11 08:41:30 crc kubenswrapper[4881]: E1211 08:41:30.788185 4881 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="e1b52b19ca98e36e161fee41ff46127d736b904ad1e26525435cefec416d25ee" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Dec 11 08:41:30 crc kubenswrapper[4881]: E1211 08:41:30.789810 4881 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="e1b52b19ca98e36e161fee41ff46127d736b904ad1e26525435cefec416d25ee" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Dec 11 08:41:30 crc kubenswrapper[4881]: E1211 08:41:30.790992 4881 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="e1b52b19ca98e36e161fee41ff46127d736b904ad1e26525435cefec416d25ee" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Dec 11 08:41:30 crc kubenswrapper[4881]: E1211 08:41:30.791025 4881 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-scheduler-0" podUID="e688f7d9-d215-48aa-8092-e176d3437f09" containerName="nova-scheduler-scheduler" Dec 11 08:41:30 crc kubenswrapper[4881]: I1211 08:41:30.883629 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Dec 11 08:41:31 crc kubenswrapper[4881]: I1211 08:41:31.339531 4881 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-marketplace-shvrh" podUID="755c1a62-7945-4edf-9e40-90d8abfea7bd" containerName="registry-server" probeResult="failure" output=< Dec 11 08:41:31 crc kubenswrapper[4881]: timeout: failed to connect service ":50051" within 1s Dec 11 08:41:31 crc kubenswrapper[4881]: > Dec 11 08:41:31 crc kubenswrapper[4881]: I1211 08:41:31.389035 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"7a5a5663-d97f-47f5-b4a8-4c18ae3bd844","Type":"ContainerStarted","Data":"2184c385a971e223e031a3584efedad7092a43a0579a4ad57e97c461525c2d00"} Dec 11 08:41:31 crc kubenswrapper[4881]: I1211 08:41:31.389089 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"7a5a5663-d97f-47f5-b4a8-4c18ae3bd844","Type":"ContainerStarted","Data":"04037f0c77061cbc307677273fbd3020e41d9574bf0f94523ca43f536a33decb"} Dec 11 08:41:31 crc kubenswrapper[4881]: I1211 08:41:31.392382 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"2c3f1515-b9d7-49cc-a96f-65e3713c3311","Type":"ContainerStarted","Data":"25449211d300d61d95cde25b949606eb54f5698491343d8ec02f53aadc012933"} Dec 11 08:41:31 crc kubenswrapper[4881]: I1211 08:41:31.392627 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Dec 11 08:41:31 crc kubenswrapper[4881]: I1211 08:41:31.425174 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=9.321689434 podStartE2EDuration="18.425151609s" podCreationTimestamp="2025-12-11 08:41:13 +0000 UTC" firstStartedPulling="2025-12-11 08:41:21.811254595 +0000 UTC m=+1530.188623292" lastFinishedPulling="2025-12-11 08:41:30.91471677 +0000 UTC m=+1539.292085467" observedRunningTime="2025-12-11 08:41:31.414407428 +0000 UTC m=+1539.791776135" watchObservedRunningTime="2025-12-11 08:41:31.425151609 +0000 UTC m=+1539.802520296" Dec 11 08:41:32 crc kubenswrapper[4881]: I1211 08:41:32.408495 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"7a5a5663-d97f-47f5-b4a8-4c18ae3bd844","Type":"ContainerStarted","Data":"67eb97c9052c95582305c04ef8237c3a7a5fcbe153b6943ea5671bf72a85fa9b"} Dec 11 08:41:32 crc kubenswrapper[4881]: I1211 08:41:32.412526 4881 generic.go:334] "Generic (PLEG): container finished" podID="67c4b27a-5352-4006-9944-6de8dc05d3d1" containerID="f75e8a784092df0565ac791cab535d3d1d098203dcd0d357752cff3750eabb5d" exitCode=0 Dec 11 08:41:32 crc kubenswrapper[4881]: I1211 08:41:32.412885 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-sync-tt4br" event={"ID":"67c4b27a-5352-4006-9944-6de8dc05d3d1","Type":"ContainerDied","Data":"f75e8a784092df0565ac791cab535d3d1d098203dcd0d357752cff3750eabb5d"} Dec 11 08:41:32 crc kubenswrapper[4881]: I1211 08:41:32.449898 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=4.449873824 podStartE2EDuration="4.449873824s" podCreationTimestamp="2025-12-11 08:41:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 08:41:32.429265818 +0000 UTC m=+1540.806634525" watchObservedRunningTime="2025-12-11 08:41:32.449873824 +0000 UTC m=+1540.827242521" Dec 11 08:41:32 crc kubenswrapper[4881]: E1211 08:41:32.639524 4881 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod324526c9_d17c_4dfd_b2f7_b86e9577c36c.slice/crio-c90ce7cd8fcf126d46d59245f06190daa1830091da52fd5b1af057889d7e2442.scope\": RecentStats: unable to find data in memory cache]" Dec 11 08:41:33 crc kubenswrapper[4881]: I1211 08:41:33.587472 4881 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-vk695" podUID="324526c9-d17c-4dfd-b2f7-b86e9577c36c" containerName="registry-server" probeResult="failure" output=< Dec 11 08:41:33 crc kubenswrapper[4881]: timeout: failed to connect service ":50051" within 1s Dec 11 08:41:33 crc kubenswrapper[4881]: > Dec 11 08:41:33 crc kubenswrapper[4881]: I1211 08:41:33.696701 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Dec 11 08:41:33 crc kubenswrapper[4881]: I1211 08:41:33.696967 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Dec 11 08:41:33 crc kubenswrapper[4881]: I1211 08:41:33.730572 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/kube-state-metrics-0" Dec 11 08:41:34 crc kubenswrapper[4881]: I1211 08:41:34.080547 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-sync-tt4br" Dec 11 08:41:34 crc kubenswrapper[4881]: I1211 08:41:34.228794 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6qgbh\" (UniqueName: \"kubernetes.io/projected/67c4b27a-5352-4006-9944-6de8dc05d3d1-kube-api-access-6qgbh\") pod \"67c4b27a-5352-4006-9944-6de8dc05d3d1\" (UID: \"67c4b27a-5352-4006-9944-6de8dc05d3d1\") " Dec 11 08:41:34 crc kubenswrapper[4881]: I1211 08:41:34.228930 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/67c4b27a-5352-4006-9944-6de8dc05d3d1-combined-ca-bundle\") pod \"67c4b27a-5352-4006-9944-6de8dc05d3d1\" (UID: \"67c4b27a-5352-4006-9944-6de8dc05d3d1\") " Dec 11 08:41:34 crc kubenswrapper[4881]: I1211 08:41:34.228977 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/67c4b27a-5352-4006-9944-6de8dc05d3d1-config-data\") pod \"67c4b27a-5352-4006-9944-6de8dc05d3d1\" (UID: \"67c4b27a-5352-4006-9944-6de8dc05d3d1\") " Dec 11 08:41:34 crc kubenswrapper[4881]: I1211 08:41:34.229052 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/67c4b27a-5352-4006-9944-6de8dc05d3d1-scripts\") pod \"67c4b27a-5352-4006-9944-6de8dc05d3d1\" (UID: \"67c4b27a-5352-4006-9944-6de8dc05d3d1\") " Dec 11 08:41:34 crc kubenswrapper[4881]: I1211 08:41:34.237993 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/67c4b27a-5352-4006-9944-6de8dc05d3d1-kube-api-access-6qgbh" (OuterVolumeSpecName: "kube-api-access-6qgbh") pod "67c4b27a-5352-4006-9944-6de8dc05d3d1" (UID: "67c4b27a-5352-4006-9944-6de8dc05d3d1"). InnerVolumeSpecName "kube-api-access-6qgbh". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:41:34 crc kubenswrapper[4881]: I1211 08:41:34.254267 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/67c4b27a-5352-4006-9944-6de8dc05d3d1-scripts" (OuterVolumeSpecName: "scripts") pod "67c4b27a-5352-4006-9944-6de8dc05d3d1" (UID: "67c4b27a-5352-4006-9944-6de8dc05d3d1"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:41:34 crc kubenswrapper[4881]: I1211 08:41:34.275004 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/67c4b27a-5352-4006-9944-6de8dc05d3d1-config-data" (OuterVolumeSpecName: "config-data") pod "67c4b27a-5352-4006-9944-6de8dc05d3d1" (UID: "67c4b27a-5352-4006-9944-6de8dc05d3d1"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:41:34 crc kubenswrapper[4881]: I1211 08:41:34.279585 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/67c4b27a-5352-4006-9944-6de8dc05d3d1-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "67c4b27a-5352-4006-9944-6de8dc05d3d1" (UID: "67c4b27a-5352-4006-9944-6de8dc05d3d1"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:41:34 crc kubenswrapper[4881]: I1211 08:41:34.336879 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6qgbh\" (UniqueName: \"kubernetes.io/projected/67c4b27a-5352-4006-9944-6de8dc05d3d1-kube-api-access-6qgbh\") on node \"crc\" DevicePath \"\"" Dec 11 08:41:34 crc kubenswrapper[4881]: I1211 08:41:34.336925 4881 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/67c4b27a-5352-4006-9944-6de8dc05d3d1-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 11 08:41:34 crc kubenswrapper[4881]: I1211 08:41:34.336937 4881 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/67c4b27a-5352-4006-9944-6de8dc05d3d1-config-data\") on node \"crc\" DevicePath \"\"" Dec 11 08:41:34 crc kubenswrapper[4881]: I1211 08:41:34.336951 4881 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/67c4b27a-5352-4006-9944-6de8dc05d3d1-scripts\") on node \"crc\" DevicePath \"\"" Dec 11 08:41:34 crc kubenswrapper[4881]: I1211 08:41:34.436819 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-sync-tt4br" Dec 11 08:41:34 crc kubenswrapper[4881]: I1211 08:41:34.437110 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-sync-tt4br" event={"ID":"67c4b27a-5352-4006-9944-6de8dc05d3d1","Type":"ContainerDied","Data":"393e637041ef805584aca75634d58da67bd74f25246ad16b2040a7c305d3ce35"} Dec 11 08:41:34 crc kubenswrapper[4881]: I1211 08:41:34.438911 4881 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="393e637041ef805584aca75634d58da67bd74f25246ad16b2040a7c305d3ce35" Dec 11 08:41:34 crc kubenswrapper[4881]: I1211 08:41:34.645676 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/aodh-0"] Dec 11 08:41:34 crc kubenswrapper[4881]: E1211 08:41:34.646491 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="67c4b27a-5352-4006-9944-6de8dc05d3d1" containerName="aodh-db-sync" Dec 11 08:41:34 crc kubenswrapper[4881]: I1211 08:41:34.646505 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="67c4b27a-5352-4006-9944-6de8dc05d3d1" containerName="aodh-db-sync" Dec 11 08:41:34 crc kubenswrapper[4881]: I1211 08:41:34.646810 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="67c4b27a-5352-4006-9944-6de8dc05d3d1" containerName="aodh-db-sync" Dec 11 08:41:34 crc kubenswrapper[4881]: I1211 08:41:34.653155 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-0" Dec 11 08:41:34 crc kubenswrapper[4881]: I1211 08:41:34.656034 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"telemetry-autoscaling-dockercfg-gqtm2" Dec 11 08:41:34 crc kubenswrapper[4881]: I1211 08:41:34.656327 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-scripts" Dec 11 08:41:34 crc kubenswrapper[4881]: I1211 08:41:34.656608 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-config-data" Dec 11 08:41:34 crc kubenswrapper[4881]: I1211 08:41:34.663160 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-0"] Dec 11 08:41:34 crc kubenswrapper[4881]: I1211 08:41:34.747431 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c9bdc0d4-7342-4b61-8acc-c2ac4c62653b-combined-ca-bundle\") pod \"aodh-0\" (UID: \"c9bdc0d4-7342-4b61-8acc-c2ac4c62653b\") " pod="openstack/aodh-0" Dec 11 08:41:34 crc kubenswrapper[4881]: I1211 08:41:34.747488 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c9bdc0d4-7342-4b61-8acc-c2ac4c62653b-scripts\") pod \"aodh-0\" (UID: \"c9bdc0d4-7342-4b61-8acc-c2ac4c62653b\") " pod="openstack/aodh-0" Dec 11 08:41:34 crc kubenswrapper[4881]: I1211 08:41:34.747543 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c9bdc0d4-7342-4b61-8acc-c2ac4c62653b-config-data\") pod \"aodh-0\" (UID: \"c9bdc0d4-7342-4b61-8acc-c2ac4c62653b\") " pod="openstack/aodh-0" Dec 11 08:41:34 crc kubenswrapper[4881]: I1211 08:41:34.747676 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vvszs\" (UniqueName: \"kubernetes.io/projected/c9bdc0d4-7342-4b61-8acc-c2ac4c62653b-kube-api-access-vvszs\") pod \"aodh-0\" (UID: \"c9bdc0d4-7342-4b61-8acc-c2ac4c62653b\") " pod="openstack/aodh-0" Dec 11 08:41:34 crc kubenswrapper[4881]: I1211 08:41:34.850142 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c9bdc0d4-7342-4b61-8acc-c2ac4c62653b-config-data\") pod \"aodh-0\" (UID: \"c9bdc0d4-7342-4b61-8acc-c2ac4c62653b\") " pod="openstack/aodh-0" Dec 11 08:41:34 crc kubenswrapper[4881]: I1211 08:41:34.850318 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vvszs\" (UniqueName: \"kubernetes.io/projected/c9bdc0d4-7342-4b61-8acc-c2ac4c62653b-kube-api-access-vvszs\") pod \"aodh-0\" (UID: \"c9bdc0d4-7342-4b61-8acc-c2ac4c62653b\") " pod="openstack/aodh-0" Dec 11 08:41:34 crc kubenswrapper[4881]: I1211 08:41:34.850693 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c9bdc0d4-7342-4b61-8acc-c2ac4c62653b-combined-ca-bundle\") pod \"aodh-0\" (UID: \"c9bdc0d4-7342-4b61-8acc-c2ac4c62653b\") " pod="openstack/aodh-0" Dec 11 08:41:34 crc kubenswrapper[4881]: I1211 08:41:34.850728 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c9bdc0d4-7342-4b61-8acc-c2ac4c62653b-scripts\") pod \"aodh-0\" (UID: \"c9bdc0d4-7342-4b61-8acc-c2ac4c62653b\") " pod="openstack/aodh-0" Dec 11 08:41:34 crc kubenswrapper[4881]: I1211 08:41:34.855612 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c9bdc0d4-7342-4b61-8acc-c2ac4c62653b-scripts\") pod \"aodh-0\" (UID: \"c9bdc0d4-7342-4b61-8acc-c2ac4c62653b\") " pod="openstack/aodh-0" Dec 11 08:41:34 crc kubenswrapper[4881]: I1211 08:41:34.855991 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c9bdc0d4-7342-4b61-8acc-c2ac4c62653b-combined-ca-bundle\") pod \"aodh-0\" (UID: \"c9bdc0d4-7342-4b61-8acc-c2ac4c62653b\") " pod="openstack/aodh-0" Dec 11 08:41:34 crc kubenswrapper[4881]: I1211 08:41:34.856637 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c9bdc0d4-7342-4b61-8acc-c2ac4c62653b-config-data\") pod \"aodh-0\" (UID: \"c9bdc0d4-7342-4b61-8acc-c2ac4c62653b\") " pod="openstack/aodh-0" Dec 11 08:41:34 crc kubenswrapper[4881]: I1211 08:41:34.870956 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vvszs\" (UniqueName: \"kubernetes.io/projected/c9bdc0d4-7342-4b61-8acc-c2ac4c62653b-kube-api-access-vvszs\") pod \"aodh-0\" (UID: \"c9bdc0d4-7342-4b61-8acc-c2ac4c62653b\") " pod="openstack/aodh-0" Dec 11 08:41:34 crc kubenswrapper[4881]: I1211 08:41:34.969371 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-0" Dec 11 08:41:35 crc kubenswrapper[4881]: W1211 08:41:35.495904 4881 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc9bdc0d4_7342_4b61_8acc_c2ac4c62653b.slice/crio-6d5651a7391d686acf9f7a1e064a27a444fa9404cbfcc8c73b35a82fac196ed0 WatchSource:0}: Error finding container 6d5651a7391d686acf9f7a1e064a27a444fa9404cbfcc8c73b35a82fac196ed0: Status 404 returned error can't find the container with id 6d5651a7391d686acf9f7a1e064a27a444fa9404cbfcc8c73b35a82fac196ed0 Dec 11 08:41:35 crc kubenswrapper[4881]: I1211 08:41:35.496836 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-0"] Dec 11 08:41:35 crc kubenswrapper[4881]: I1211 08:41:35.746152 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Dec 11 08:41:35 crc kubenswrapper[4881]: I1211 08:41:35.746583 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Dec 11 08:41:35 crc kubenswrapper[4881]: E1211 08:41:35.789437 4881 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="e1b52b19ca98e36e161fee41ff46127d736b904ad1e26525435cefec416d25ee" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Dec 11 08:41:35 crc kubenswrapper[4881]: E1211 08:41:35.790968 4881 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="e1b52b19ca98e36e161fee41ff46127d736b904ad1e26525435cefec416d25ee" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Dec 11 08:41:35 crc kubenswrapper[4881]: E1211 08:41:35.794571 4881 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="e1b52b19ca98e36e161fee41ff46127d736b904ad1e26525435cefec416d25ee" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Dec 11 08:41:35 crc kubenswrapper[4881]: E1211 08:41:35.794790 4881 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-scheduler-0" podUID="e688f7d9-d215-48aa-8092-e176d3437f09" containerName="nova-scheduler-scheduler" Dec 11 08:41:36 crc kubenswrapper[4881]: I1211 08:41:36.481501 4881 generic.go:334] "Generic (PLEG): container finished" podID="10d7837c-f2c1-475b-a8b2-0885aee68f82" containerID="6631df914318af95e3a39c1f5c7f78e4a309884b46af0e194fce6730ac932ba3" exitCode=0 Dec 11 08:41:36 crc kubenswrapper[4881]: I1211 08:41:36.481618 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"10d7837c-f2c1-475b-a8b2-0885aee68f82","Type":"ContainerDied","Data":"6631df914318af95e3a39c1f5c7f78e4a309884b46af0e194fce6730ac932ba3"} Dec 11 08:41:36 crc kubenswrapper[4881]: I1211 08:41:36.492524 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"c9bdc0d4-7342-4b61-8acc-c2ac4c62653b","Type":"ContainerStarted","Data":"6d5651a7391d686acf9f7a1e064a27a444fa9404cbfcc8c73b35a82fac196ed0"} Dec 11 08:41:36 crc kubenswrapper[4881]: I1211 08:41:36.738365 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Dec 11 08:41:36 crc kubenswrapper[4881]: I1211 08:41:36.800324 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/10d7837c-f2c1-475b-a8b2-0885aee68f82-logs\") pod \"10d7837c-f2c1-475b-a8b2-0885aee68f82\" (UID: \"10d7837c-f2c1-475b-a8b2-0885aee68f82\") " Dec 11 08:41:36 crc kubenswrapper[4881]: I1211 08:41:36.800709 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/10d7837c-f2c1-475b-a8b2-0885aee68f82-combined-ca-bundle\") pod \"10d7837c-f2c1-475b-a8b2-0885aee68f82\" (UID: \"10d7837c-f2c1-475b-a8b2-0885aee68f82\") " Dec 11 08:41:36 crc kubenswrapper[4881]: I1211 08:41:36.800741 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/10d7837c-f2c1-475b-a8b2-0885aee68f82-config-data\") pod \"10d7837c-f2c1-475b-a8b2-0885aee68f82\" (UID: \"10d7837c-f2c1-475b-a8b2-0885aee68f82\") " Dec 11 08:41:36 crc kubenswrapper[4881]: I1211 08:41:36.800900 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rn94k\" (UniqueName: \"kubernetes.io/projected/10d7837c-f2c1-475b-a8b2-0885aee68f82-kube-api-access-rn94k\") pod \"10d7837c-f2c1-475b-a8b2-0885aee68f82\" (UID: \"10d7837c-f2c1-475b-a8b2-0885aee68f82\") " Dec 11 08:41:36 crc kubenswrapper[4881]: I1211 08:41:36.802597 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/10d7837c-f2c1-475b-a8b2-0885aee68f82-logs" (OuterVolumeSpecName: "logs") pod "10d7837c-f2c1-475b-a8b2-0885aee68f82" (UID: "10d7837c-f2c1-475b-a8b2-0885aee68f82"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 08:41:36 crc kubenswrapper[4881]: I1211 08:41:36.854468 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/10d7837c-f2c1-475b-a8b2-0885aee68f82-kube-api-access-rn94k" (OuterVolumeSpecName: "kube-api-access-rn94k") pod "10d7837c-f2c1-475b-a8b2-0885aee68f82" (UID: "10d7837c-f2c1-475b-a8b2-0885aee68f82"). InnerVolumeSpecName "kube-api-access-rn94k". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:41:36 crc kubenswrapper[4881]: I1211 08:41:36.877772 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/10d7837c-f2c1-475b-a8b2-0885aee68f82-config-data" (OuterVolumeSpecName: "config-data") pod "10d7837c-f2c1-475b-a8b2-0885aee68f82" (UID: "10d7837c-f2c1-475b-a8b2-0885aee68f82"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:41:36 crc kubenswrapper[4881]: I1211 08:41:36.887408 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/10d7837c-f2c1-475b-a8b2-0885aee68f82-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "10d7837c-f2c1-475b-a8b2-0885aee68f82" (UID: "10d7837c-f2c1-475b-a8b2-0885aee68f82"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:41:36 crc kubenswrapper[4881]: I1211 08:41:36.902925 4881 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/10d7837c-f2c1-475b-a8b2-0885aee68f82-logs\") on node \"crc\" DevicePath \"\"" Dec 11 08:41:36 crc kubenswrapper[4881]: I1211 08:41:36.902956 4881 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/10d7837c-f2c1-475b-a8b2-0885aee68f82-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 11 08:41:36 crc kubenswrapper[4881]: I1211 08:41:36.902974 4881 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/10d7837c-f2c1-475b-a8b2-0885aee68f82-config-data\") on node \"crc\" DevicePath \"\"" Dec 11 08:41:36 crc kubenswrapper[4881]: I1211 08:41:36.902984 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rn94k\" (UniqueName: \"kubernetes.io/projected/10d7837c-f2c1-475b-a8b2-0885aee68f82-kube-api-access-rn94k\") on node \"crc\" DevicePath \"\"" Dec 11 08:41:37 crc kubenswrapper[4881]: E1211 08:41:37.100322 4881 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod324526c9_d17c_4dfd_b2f7_b86e9577c36c.slice/crio-c90ce7cd8fcf126d46d59245f06190daa1830091da52fd5b1af057889d7e2442.scope\": RecentStats: unable to find data in memory cache]" Dec 11 08:41:37 crc kubenswrapper[4881]: I1211 08:41:37.512756 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"10d7837c-f2c1-475b-a8b2-0885aee68f82","Type":"ContainerDied","Data":"98a6154b9bd4072738d3ec52504ef7ca1719f91f3f0271fbb63a2b8367cc1208"} Dec 11 08:41:37 crc kubenswrapper[4881]: I1211 08:41:37.512810 4881 scope.go:117] "RemoveContainer" containerID="6631df914318af95e3a39c1f5c7f78e4a309884b46af0e194fce6730ac932ba3" Dec 11 08:41:37 crc kubenswrapper[4881]: I1211 08:41:37.512992 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Dec 11 08:41:37 crc kubenswrapper[4881]: I1211 08:41:37.553181 4881 scope.go:117] "RemoveContainer" containerID="a18e96d280ea2f3e6bfc69105df815a2f3302869e32ac2d684f518a54fb2a029" Dec 11 08:41:37 crc kubenswrapper[4881]: I1211 08:41:37.561140 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Dec 11 08:41:37 crc kubenswrapper[4881]: I1211 08:41:37.578408 4881 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Dec 11 08:41:37 crc kubenswrapper[4881]: I1211 08:41:37.594517 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Dec 11 08:41:37 crc kubenswrapper[4881]: E1211 08:41:37.595007 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="10d7837c-f2c1-475b-a8b2-0885aee68f82" containerName="nova-api-log" Dec 11 08:41:37 crc kubenswrapper[4881]: I1211 08:41:37.595019 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="10d7837c-f2c1-475b-a8b2-0885aee68f82" containerName="nova-api-log" Dec 11 08:41:37 crc kubenswrapper[4881]: E1211 08:41:37.595057 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="10d7837c-f2c1-475b-a8b2-0885aee68f82" containerName="nova-api-api" Dec 11 08:41:37 crc kubenswrapper[4881]: I1211 08:41:37.595063 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="10d7837c-f2c1-475b-a8b2-0885aee68f82" containerName="nova-api-api" Dec 11 08:41:37 crc kubenswrapper[4881]: I1211 08:41:37.595298 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="10d7837c-f2c1-475b-a8b2-0885aee68f82" containerName="nova-api-log" Dec 11 08:41:37 crc kubenswrapper[4881]: I1211 08:41:37.595328 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="10d7837c-f2c1-475b-a8b2-0885aee68f82" containerName="nova-api-api" Dec 11 08:41:37 crc kubenswrapper[4881]: I1211 08:41:37.596755 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Dec 11 08:41:37 crc kubenswrapper[4881]: I1211 08:41:37.599826 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Dec 11 08:41:37 crc kubenswrapper[4881]: I1211 08:41:37.608722 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Dec 11 08:41:37 crc kubenswrapper[4881]: I1211 08:41:37.723345 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fbrtf\" (UniqueName: \"kubernetes.io/projected/88610cc6-c8c8-47f2-8425-e5714d01f7e2-kube-api-access-fbrtf\") pod \"nova-api-0\" (UID: \"88610cc6-c8c8-47f2-8425-e5714d01f7e2\") " pod="openstack/nova-api-0" Dec 11 08:41:37 crc kubenswrapper[4881]: I1211 08:41:37.723509 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/88610cc6-c8c8-47f2-8425-e5714d01f7e2-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"88610cc6-c8c8-47f2-8425-e5714d01f7e2\") " pod="openstack/nova-api-0" Dec 11 08:41:37 crc kubenswrapper[4881]: I1211 08:41:37.723583 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/88610cc6-c8c8-47f2-8425-e5714d01f7e2-config-data\") pod \"nova-api-0\" (UID: \"88610cc6-c8c8-47f2-8425-e5714d01f7e2\") " pod="openstack/nova-api-0" Dec 11 08:41:37 crc kubenswrapper[4881]: I1211 08:41:37.723614 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/88610cc6-c8c8-47f2-8425-e5714d01f7e2-logs\") pod \"nova-api-0\" (UID: \"88610cc6-c8c8-47f2-8425-e5714d01f7e2\") " pod="openstack/nova-api-0" Dec 11 08:41:37 crc kubenswrapper[4881]: I1211 08:41:37.769482 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/aodh-0"] Dec 11 08:41:37 crc kubenswrapper[4881]: I1211 08:41:37.825444 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fbrtf\" (UniqueName: \"kubernetes.io/projected/88610cc6-c8c8-47f2-8425-e5714d01f7e2-kube-api-access-fbrtf\") pod \"nova-api-0\" (UID: \"88610cc6-c8c8-47f2-8425-e5714d01f7e2\") " pod="openstack/nova-api-0" Dec 11 08:41:37 crc kubenswrapper[4881]: I1211 08:41:37.825618 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/88610cc6-c8c8-47f2-8425-e5714d01f7e2-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"88610cc6-c8c8-47f2-8425-e5714d01f7e2\") " pod="openstack/nova-api-0" Dec 11 08:41:37 crc kubenswrapper[4881]: I1211 08:41:37.825668 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/88610cc6-c8c8-47f2-8425-e5714d01f7e2-config-data\") pod \"nova-api-0\" (UID: \"88610cc6-c8c8-47f2-8425-e5714d01f7e2\") " pod="openstack/nova-api-0" Dec 11 08:41:37 crc kubenswrapper[4881]: I1211 08:41:37.825693 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/88610cc6-c8c8-47f2-8425-e5714d01f7e2-logs\") pod \"nova-api-0\" (UID: \"88610cc6-c8c8-47f2-8425-e5714d01f7e2\") " pod="openstack/nova-api-0" Dec 11 08:41:37 crc kubenswrapper[4881]: I1211 08:41:37.826288 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/88610cc6-c8c8-47f2-8425-e5714d01f7e2-logs\") pod \"nova-api-0\" (UID: \"88610cc6-c8c8-47f2-8425-e5714d01f7e2\") " pod="openstack/nova-api-0" Dec 11 08:41:37 crc kubenswrapper[4881]: I1211 08:41:37.831839 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/88610cc6-c8c8-47f2-8425-e5714d01f7e2-config-data\") pod \"nova-api-0\" (UID: \"88610cc6-c8c8-47f2-8425-e5714d01f7e2\") " pod="openstack/nova-api-0" Dec 11 08:41:37 crc kubenswrapper[4881]: I1211 08:41:37.832596 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/88610cc6-c8c8-47f2-8425-e5714d01f7e2-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"88610cc6-c8c8-47f2-8425-e5714d01f7e2\") " pod="openstack/nova-api-0" Dec 11 08:41:37 crc kubenswrapper[4881]: I1211 08:41:37.842420 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fbrtf\" (UniqueName: \"kubernetes.io/projected/88610cc6-c8c8-47f2-8425-e5714d01f7e2-kube-api-access-fbrtf\") pod \"nova-api-0\" (UID: \"88610cc6-c8c8-47f2-8425-e5714d01f7e2\") " pod="openstack/nova-api-0" Dec 11 08:41:37 crc kubenswrapper[4881]: I1211 08:41:37.954960 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Dec 11 08:41:38 crc kubenswrapper[4881]: I1211 08:41:38.503944 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Dec 11 08:41:38 crc kubenswrapper[4881]: I1211 08:41:38.537164 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"c9bdc0d4-7342-4b61-8acc-c2ac4c62653b","Type":"ContainerStarted","Data":"30bcc67180507330b5527285e37e71256de00d00111147702b2ccdb179c0d95e"} Dec 11 08:41:38 crc kubenswrapper[4881]: I1211 08:41:38.538605 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"88610cc6-c8c8-47f2-8425-e5714d01f7e2","Type":"ContainerStarted","Data":"70ce343a0acdb45a979305c0074ec869eff0d47c50e1bc2c512c4c67777f1adb"} Dec 11 08:41:38 crc kubenswrapper[4881]: I1211 08:41:38.695372 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Dec 11 08:41:38 crc kubenswrapper[4881]: I1211 08:41:38.695429 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Dec 11 08:41:38 crc kubenswrapper[4881]: I1211 08:41:38.807962 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Dec 11 08:41:38 crc kubenswrapper[4881]: I1211 08:41:38.810292 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="2c3f1515-b9d7-49cc-a96f-65e3713c3311" containerName="ceilometer-central-agent" containerID="cri-o://52b2419c29189f05c8ec1d616c9af2d71612b70012997c4e04970145d2b46d7d" gracePeriod=30 Dec 11 08:41:38 crc kubenswrapper[4881]: I1211 08:41:38.810962 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="2c3f1515-b9d7-49cc-a96f-65e3713c3311" containerName="proxy-httpd" containerID="cri-o://25449211d300d61d95cde25b949606eb54f5698491343d8ec02f53aadc012933" gracePeriod=30 Dec 11 08:41:38 crc kubenswrapper[4881]: I1211 08:41:38.811055 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="2c3f1515-b9d7-49cc-a96f-65e3713c3311" containerName="sg-core" containerID="cri-o://12960751b4f82cf137ad111c7ced056d1e795a77f7be2976a44523a9c048a25c" gracePeriod=30 Dec 11 08:41:38 crc kubenswrapper[4881]: I1211 08:41:38.811118 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="2c3f1515-b9d7-49cc-a96f-65e3713c3311" containerName="ceilometer-notification-agent" containerID="cri-o://649bf814d8589ce30cf26c1a32e3bd9df9a66a3076ea58aa465831dfa51b1cc2" gracePeriod=30 Dec 11 08:41:39 crc kubenswrapper[4881]: I1211 08:41:39.036933 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="10d7837c-f2c1-475b-a8b2-0885aee68f82" path="/var/lib/kubelet/pods/10d7837c-f2c1-475b-a8b2-0885aee68f82/volumes" Dec 11 08:41:39 crc kubenswrapper[4881]: I1211 08:41:39.550289 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"88610cc6-c8c8-47f2-8425-e5714d01f7e2","Type":"ContainerStarted","Data":"447f6eddf46f057e9c04d1f3c4088a6a0ab0ac83f446c22b24457f654a7e5877"} Dec 11 08:41:39 crc kubenswrapper[4881]: I1211 08:41:39.710480 4881 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="7a5a5663-d97f-47f5-b4a8-4c18ae3bd844" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.245:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Dec 11 08:41:39 crc kubenswrapper[4881]: I1211 08:41:39.710475 4881 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="7a5a5663-d97f-47f5-b4a8-4c18ae3bd844" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.245:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Dec 11 08:41:40 crc kubenswrapper[4881]: I1211 08:41:40.138563 4881 prober.go:107] "Probe failed" probeType="Liveness" pod="metallb-system/frr-k8s-xj6xk" podUID="30393d1e-8b58-4cb7-9e45-23b5b79f235e" containerName="frr" probeResult="failure" output="Get \"http://127.0.0.1:7573/livez\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Dec 11 08:41:40 crc kubenswrapper[4881]: I1211 08:41:40.313456 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-shvrh" Dec 11 08:41:40 crc kubenswrapper[4881]: I1211 08:41:40.374554 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-shvrh" Dec 11 08:41:40 crc kubenswrapper[4881]: I1211 08:41:40.561305 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-shvrh"] Dec 11 08:41:40 crc kubenswrapper[4881]: I1211 08:41:40.567912 4881 generic.go:334] "Generic (PLEG): container finished" podID="2c3f1515-b9d7-49cc-a96f-65e3713c3311" containerID="25449211d300d61d95cde25b949606eb54f5698491343d8ec02f53aadc012933" exitCode=0 Dec 11 08:41:40 crc kubenswrapper[4881]: I1211 08:41:40.567963 4881 generic.go:334] "Generic (PLEG): container finished" podID="2c3f1515-b9d7-49cc-a96f-65e3713c3311" containerID="12960751b4f82cf137ad111c7ced056d1e795a77f7be2976a44523a9c048a25c" exitCode=2 Dec 11 08:41:40 crc kubenswrapper[4881]: I1211 08:41:40.567972 4881 generic.go:334] "Generic (PLEG): container finished" podID="2c3f1515-b9d7-49cc-a96f-65e3713c3311" containerID="52b2419c29189f05c8ec1d616c9af2d71612b70012997c4e04970145d2b46d7d" exitCode=0 Dec 11 08:41:40 crc kubenswrapper[4881]: I1211 08:41:40.567996 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"2c3f1515-b9d7-49cc-a96f-65e3713c3311","Type":"ContainerDied","Data":"25449211d300d61d95cde25b949606eb54f5698491343d8ec02f53aadc012933"} Dec 11 08:41:40 crc kubenswrapper[4881]: I1211 08:41:40.568072 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"2c3f1515-b9d7-49cc-a96f-65e3713c3311","Type":"ContainerDied","Data":"12960751b4f82cf137ad111c7ced056d1e795a77f7be2976a44523a9c048a25c"} Dec 11 08:41:40 crc kubenswrapper[4881]: I1211 08:41:40.568085 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"2c3f1515-b9d7-49cc-a96f-65e3713c3311","Type":"ContainerDied","Data":"52b2419c29189f05c8ec1d616c9af2d71612b70012997c4e04970145d2b46d7d"} Dec 11 08:41:40 crc kubenswrapper[4881]: I1211 08:41:40.570927 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"88610cc6-c8c8-47f2-8425-e5714d01f7e2","Type":"ContainerStarted","Data":"5547df66d0502b39203818a66c5451b5ab55b55be882862677c10c5d499b1947"} Dec 11 08:41:40 crc kubenswrapper[4881]: I1211 08:41:40.599139 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=3.59911536 podStartE2EDuration="3.59911536s" podCreationTimestamp="2025-12-11 08:41:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 08:41:40.590187174 +0000 UTC m=+1548.967555881" watchObservedRunningTime="2025-12-11 08:41:40.59911536 +0000 UTC m=+1548.976484057" Dec 11 08:41:40 crc kubenswrapper[4881]: E1211 08:41:40.788818 4881 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="e1b52b19ca98e36e161fee41ff46127d736b904ad1e26525435cefec416d25ee" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Dec 11 08:41:40 crc kubenswrapper[4881]: E1211 08:41:40.791044 4881 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="e1b52b19ca98e36e161fee41ff46127d736b904ad1e26525435cefec416d25ee" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Dec 11 08:41:40 crc kubenswrapper[4881]: E1211 08:41:40.792202 4881 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="e1b52b19ca98e36e161fee41ff46127d736b904ad1e26525435cefec416d25ee" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Dec 11 08:41:40 crc kubenswrapper[4881]: E1211 08:41:40.792271 4881 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-scheduler-0" podUID="e688f7d9-d215-48aa-8092-e176d3437f09" containerName="nova-scheduler-scheduler" Dec 11 08:41:41 crc kubenswrapper[4881]: I1211 08:41:41.583167 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-shvrh" podUID="755c1a62-7945-4edf-9e40-90d8abfea7bd" containerName="registry-server" containerID="cri-o://6c39932f97886e8da00bf37a3a85bc8841a49a2271d73d3c3f6d6fc85564150b" gracePeriod=2 Dec 11 08:41:43 crc kubenswrapper[4881]: I1211 08:41:43.496636 4881 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-vk695" podUID="324526c9-d17c-4dfd-b2f7-b86e9577c36c" containerName="registry-server" probeResult="failure" output=< Dec 11 08:41:43 crc kubenswrapper[4881]: timeout: failed to connect service ":50051" within 1s Dec 11 08:41:43 crc kubenswrapper[4881]: > Dec 11 08:41:43 crc kubenswrapper[4881]: I1211 08:41:43.606275 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"c9bdc0d4-7342-4b61-8acc-c2ac4c62653b","Type":"ContainerStarted","Data":"db3fbd048954f392125c26aaf6ca4615c6c6873297d0c73c18e6d3259a2ea1ed"} Dec 11 08:41:43 crc kubenswrapper[4881]: I1211 08:41:43.848999 4881 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ceilometer-0" podUID="2c3f1515-b9d7-49cc-a96f-65e3713c3311" containerName="proxy-httpd" probeResult="failure" output="Get \"https://10.217.0.244:3000/\": dial tcp 10.217.0.244:3000: connect: connection refused" Dec 11 08:41:44 crc kubenswrapper[4881]: I1211 08:41:44.619723 4881 generic.go:334] "Generic (PLEG): container finished" podID="2c3f1515-b9d7-49cc-a96f-65e3713c3311" containerID="649bf814d8589ce30cf26c1a32e3bd9df9a66a3076ea58aa465831dfa51b1cc2" exitCode=0 Dec 11 08:41:44 crc kubenswrapper[4881]: I1211 08:41:44.619803 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"2c3f1515-b9d7-49cc-a96f-65e3713c3311","Type":"ContainerDied","Data":"649bf814d8589ce30cf26c1a32e3bd9df9a66a3076ea58aa465831dfa51b1cc2"} Dec 11 08:41:44 crc kubenswrapper[4881]: I1211 08:41:44.622892 4881 generic.go:334] "Generic (PLEG): container finished" podID="755c1a62-7945-4edf-9e40-90d8abfea7bd" containerID="6c39932f97886e8da00bf37a3a85bc8841a49a2271d73d3c3f6d6fc85564150b" exitCode=0 Dec 11 08:41:44 crc kubenswrapper[4881]: I1211 08:41:44.622938 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-shvrh" event={"ID":"755c1a62-7945-4edf-9e40-90d8abfea7bd","Type":"ContainerDied","Data":"6c39932f97886e8da00bf37a3a85bc8841a49a2271d73d3c3f6d6fc85564150b"} Dec 11 08:41:45 crc kubenswrapper[4881]: I1211 08:41:45.669647 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"2c3f1515-b9d7-49cc-a96f-65e3713c3311","Type":"ContainerDied","Data":"5ccd150953f734600d7c57127107a992c0c6d1660adf601d1eaea84519c3ac59"} Dec 11 08:41:45 crc kubenswrapper[4881]: I1211 08:41:45.669977 4881 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5ccd150953f734600d7c57127107a992c0c6d1660adf601d1eaea84519c3ac59" Dec 11 08:41:45 crc kubenswrapper[4881]: E1211 08:41:45.791405 4881 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="e1b52b19ca98e36e161fee41ff46127d736b904ad1e26525435cefec416d25ee" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Dec 11 08:41:45 crc kubenswrapper[4881]: E1211 08:41:45.794045 4881 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="e1b52b19ca98e36e161fee41ff46127d736b904ad1e26525435cefec416d25ee" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Dec 11 08:41:45 crc kubenswrapper[4881]: E1211 08:41:45.798362 4881 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="e1b52b19ca98e36e161fee41ff46127d736b904ad1e26525435cefec416d25ee" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Dec 11 08:41:45 crc kubenswrapper[4881]: E1211 08:41:45.798427 4881 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-scheduler-0" podUID="e688f7d9-d215-48aa-8092-e176d3437f09" containerName="nova-scheduler-scheduler" Dec 11 08:41:45 crc kubenswrapper[4881]: I1211 08:41:45.804742 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 11 08:41:45 crc kubenswrapper[4881]: I1211 08:41:45.815053 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-shvrh" Dec 11 08:41:45 crc kubenswrapper[4881]: I1211 08:41:45.945316 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4vt5s\" (UniqueName: \"kubernetes.io/projected/755c1a62-7945-4edf-9e40-90d8abfea7bd-kube-api-access-4vt5s\") pod \"755c1a62-7945-4edf-9e40-90d8abfea7bd\" (UID: \"755c1a62-7945-4edf-9e40-90d8abfea7bd\") " Dec 11 08:41:45 crc kubenswrapper[4881]: I1211 08:41:45.945443 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/755c1a62-7945-4edf-9e40-90d8abfea7bd-catalog-content\") pod \"755c1a62-7945-4edf-9e40-90d8abfea7bd\" (UID: \"755c1a62-7945-4edf-9e40-90d8abfea7bd\") " Dec 11 08:41:45 crc kubenswrapper[4881]: I1211 08:41:45.945525 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2c3f1515-b9d7-49cc-a96f-65e3713c3311-combined-ca-bundle\") pod \"2c3f1515-b9d7-49cc-a96f-65e3713c3311\" (UID: \"2c3f1515-b9d7-49cc-a96f-65e3713c3311\") " Dec 11 08:41:45 crc kubenswrapper[4881]: I1211 08:41:45.945568 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2c3f1515-b9d7-49cc-a96f-65e3713c3311-config-data\") pod \"2c3f1515-b9d7-49cc-a96f-65e3713c3311\" (UID: \"2c3f1515-b9d7-49cc-a96f-65e3713c3311\") " Dec 11 08:41:45 crc kubenswrapper[4881]: I1211 08:41:45.945620 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/755c1a62-7945-4edf-9e40-90d8abfea7bd-utilities\") pod \"755c1a62-7945-4edf-9e40-90d8abfea7bd\" (UID: \"755c1a62-7945-4edf-9e40-90d8abfea7bd\") " Dec 11 08:41:45 crc kubenswrapper[4881]: I1211 08:41:45.945643 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-q5fv4\" (UniqueName: \"kubernetes.io/projected/2c3f1515-b9d7-49cc-a96f-65e3713c3311-kube-api-access-q5fv4\") pod \"2c3f1515-b9d7-49cc-a96f-65e3713c3311\" (UID: \"2c3f1515-b9d7-49cc-a96f-65e3713c3311\") " Dec 11 08:41:45 crc kubenswrapper[4881]: I1211 08:41:45.945726 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2c3f1515-b9d7-49cc-a96f-65e3713c3311-scripts\") pod \"2c3f1515-b9d7-49cc-a96f-65e3713c3311\" (UID: \"2c3f1515-b9d7-49cc-a96f-65e3713c3311\") " Dec 11 08:41:45 crc kubenswrapper[4881]: I1211 08:41:45.945833 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/2c3f1515-b9d7-49cc-a96f-65e3713c3311-log-httpd\") pod \"2c3f1515-b9d7-49cc-a96f-65e3713c3311\" (UID: \"2c3f1515-b9d7-49cc-a96f-65e3713c3311\") " Dec 11 08:41:45 crc kubenswrapper[4881]: I1211 08:41:45.945896 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/2c3f1515-b9d7-49cc-a96f-65e3713c3311-sg-core-conf-yaml\") pod \"2c3f1515-b9d7-49cc-a96f-65e3713c3311\" (UID: \"2c3f1515-b9d7-49cc-a96f-65e3713c3311\") " Dec 11 08:41:45 crc kubenswrapper[4881]: I1211 08:41:45.945935 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/2c3f1515-b9d7-49cc-a96f-65e3713c3311-run-httpd\") pod \"2c3f1515-b9d7-49cc-a96f-65e3713c3311\" (UID: \"2c3f1515-b9d7-49cc-a96f-65e3713c3311\") " Dec 11 08:41:45 crc kubenswrapper[4881]: I1211 08:41:45.946026 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/2c3f1515-b9d7-49cc-a96f-65e3713c3311-ceilometer-tls-certs\") pod \"2c3f1515-b9d7-49cc-a96f-65e3713c3311\" (UID: \"2c3f1515-b9d7-49cc-a96f-65e3713c3311\") " Dec 11 08:41:45 crc kubenswrapper[4881]: I1211 08:41:45.946440 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/755c1a62-7945-4edf-9e40-90d8abfea7bd-utilities" (OuterVolumeSpecName: "utilities") pod "755c1a62-7945-4edf-9e40-90d8abfea7bd" (UID: "755c1a62-7945-4edf-9e40-90d8abfea7bd"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 08:41:45 crc kubenswrapper[4881]: I1211 08:41:45.946715 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2c3f1515-b9d7-49cc-a96f-65e3713c3311-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "2c3f1515-b9d7-49cc-a96f-65e3713c3311" (UID: "2c3f1515-b9d7-49cc-a96f-65e3713c3311"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 08:41:45 crc kubenswrapper[4881]: I1211 08:41:45.946834 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2c3f1515-b9d7-49cc-a96f-65e3713c3311-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "2c3f1515-b9d7-49cc-a96f-65e3713c3311" (UID: "2c3f1515-b9d7-49cc-a96f-65e3713c3311"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 08:41:45 crc kubenswrapper[4881]: I1211 08:41:45.947180 4881 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/2c3f1515-b9d7-49cc-a96f-65e3713c3311-log-httpd\") on node \"crc\" DevicePath \"\"" Dec 11 08:41:45 crc kubenswrapper[4881]: I1211 08:41:45.947195 4881 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/2c3f1515-b9d7-49cc-a96f-65e3713c3311-run-httpd\") on node \"crc\" DevicePath \"\"" Dec 11 08:41:45 crc kubenswrapper[4881]: I1211 08:41:45.947205 4881 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/755c1a62-7945-4edf-9e40-90d8abfea7bd-utilities\") on node \"crc\" DevicePath \"\"" Dec 11 08:41:45 crc kubenswrapper[4881]: I1211 08:41:45.969849 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/755c1a62-7945-4edf-9e40-90d8abfea7bd-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "755c1a62-7945-4edf-9e40-90d8abfea7bd" (UID: "755c1a62-7945-4edf-9e40-90d8abfea7bd"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 08:41:46 crc kubenswrapper[4881]: I1211 08:41:46.049294 4881 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/755c1a62-7945-4edf-9e40-90d8abfea7bd-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 11 08:41:46 crc kubenswrapper[4881]: I1211 08:41:46.677844 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2c3f1515-b9d7-49cc-a96f-65e3713c3311-kube-api-access-q5fv4" (OuterVolumeSpecName: "kube-api-access-q5fv4") pod "2c3f1515-b9d7-49cc-a96f-65e3713c3311" (UID: "2c3f1515-b9d7-49cc-a96f-65e3713c3311"). InnerVolumeSpecName "kube-api-access-q5fv4". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:41:46 crc kubenswrapper[4881]: I1211 08:41:46.677973 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/755c1a62-7945-4edf-9e40-90d8abfea7bd-kube-api-access-4vt5s" (OuterVolumeSpecName: "kube-api-access-4vt5s") pod "755c1a62-7945-4edf-9e40-90d8abfea7bd" (UID: "755c1a62-7945-4edf-9e40-90d8abfea7bd"). InnerVolumeSpecName "kube-api-access-4vt5s". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:41:46 crc kubenswrapper[4881]: I1211 08:41:46.681675 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2c3f1515-b9d7-49cc-a96f-65e3713c3311-scripts" (OuterVolumeSpecName: "scripts") pod "2c3f1515-b9d7-49cc-a96f-65e3713c3311" (UID: "2c3f1515-b9d7-49cc-a96f-65e3713c3311"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:41:46 crc kubenswrapper[4881]: I1211 08:41:46.684058 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 11 08:41:46 crc kubenswrapper[4881]: I1211 08:41:46.684095 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-shvrh" Dec 11 08:41:46 crc kubenswrapper[4881]: I1211 08:41:46.684180 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-shvrh" event={"ID":"755c1a62-7945-4edf-9e40-90d8abfea7bd","Type":"ContainerDied","Data":"c83751848ff1fceb6be62d50ed6cd9e829f50fa38fb0b8dae5eb158042446f37"} Dec 11 08:41:46 crc kubenswrapper[4881]: I1211 08:41:46.684250 4881 scope.go:117] "RemoveContainer" containerID="6c39932f97886e8da00bf37a3a85bc8841a49a2271d73d3c3f6d6fc85564150b" Dec 11 08:41:46 crc kubenswrapper[4881]: I1211 08:41:46.706999 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2c3f1515-b9d7-49cc-a96f-65e3713c3311-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "2c3f1515-b9d7-49cc-a96f-65e3713c3311" (UID: "2c3f1515-b9d7-49cc-a96f-65e3713c3311"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:41:46 crc kubenswrapper[4881]: I1211 08:41:46.749132 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2c3f1515-b9d7-49cc-a96f-65e3713c3311-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "2c3f1515-b9d7-49cc-a96f-65e3713c3311" (UID: "2c3f1515-b9d7-49cc-a96f-65e3713c3311"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:41:46 crc kubenswrapper[4881]: I1211 08:41:46.765503 4881 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/2c3f1515-b9d7-49cc-a96f-65e3713c3311-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Dec 11 08:41:46 crc kubenswrapper[4881]: I1211 08:41:46.765536 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4vt5s\" (UniqueName: \"kubernetes.io/projected/755c1a62-7945-4edf-9e40-90d8abfea7bd-kube-api-access-4vt5s\") on node \"crc\" DevicePath \"\"" Dec 11 08:41:46 crc kubenswrapper[4881]: I1211 08:41:46.765547 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-q5fv4\" (UniqueName: \"kubernetes.io/projected/2c3f1515-b9d7-49cc-a96f-65e3713c3311-kube-api-access-q5fv4\") on node \"crc\" DevicePath \"\"" Dec 11 08:41:46 crc kubenswrapper[4881]: I1211 08:41:46.765555 4881 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2c3f1515-b9d7-49cc-a96f-65e3713c3311-scripts\") on node \"crc\" DevicePath \"\"" Dec 11 08:41:46 crc kubenswrapper[4881]: I1211 08:41:46.765567 4881 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/2c3f1515-b9d7-49cc-a96f-65e3713c3311-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Dec 11 08:41:46 crc kubenswrapper[4881]: I1211 08:41:46.805148 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2c3f1515-b9d7-49cc-a96f-65e3713c3311-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "2c3f1515-b9d7-49cc-a96f-65e3713c3311" (UID: "2c3f1515-b9d7-49cc-a96f-65e3713c3311"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:41:46 crc kubenswrapper[4881]: I1211 08:41:46.845119 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2c3f1515-b9d7-49cc-a96f-65e3713c3311-config-data" (OuterVolumeSpecName: "config-data") pod "2c3f1515-b9d7-49cc-a96f-65e3713c3311" (UID: "2c3f1515-b9d7-49cc-a96f-65e3713c3311"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:41:46 crc kubenswrapper[4881]: I1211 08:41:46.868267 4881 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2c3f1515-b9d7-49cc-a96f-65e3713c3311-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 11 08:41:46 crc kubenswrapper[4881]: I1211 08:41:46.868306 4881 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2c3f1515-b9d7-49cc-a96f-65e3713c3311-config-data\") on node \"crc\" DevicePath \"\"" Dec 11 08:41:46 crc kubenswrapper[4881]: I1211 08:41:46.938137 4881 scope.go:117] "RemoveContainer" containerID="03c9954e0b751b9e83f89e61d1cca603165f6ae7d28ab9fae4631707e363808c" Dec 11 08:41:46 crc kubenswrapper[4881]: I1211 08:41:46.942280 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-shvrh"] Dec 11 08:41:46 crc kubenswrapper[4881]: I1211 08:41:46.954516 4881 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-shvrh"] Dec 11 08:41:46 crc kubenswrapper[4881]: I1211 08:41:46.972760 4881 scope.go:117] "RemoveContainer" containerID="5a416b08953da58bcdbb0e85950b94c70408c486b8cd5d91092d40acf22156ef" Dec 11 08:41:47 crc kubenswrapper[4881]: I1211 08:41:47.043255 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="755c1a62-7945-4edf-9e40-90d8abfea7bd" path="/var/lib/kubelet/pods/755c1a62-7945-4edf-9e40-90d8abfea7bd/volumes" Dec 11 08:41:47 crc kubenswrapper[4881]: I1211 08:41:47.105194 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Dec 11 08:41:47 crc kubenswrapper[4881]: I1211 08:41:47.121200 4881 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Dec 11 08:41:47 crc kubenswrapper[4881]: I1211 08:41:47.132704 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Dec 11 08:41:47 crc kubenswrapper[4881]: E1211 08:41:47.133299 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2c3f1515-b9d7-49cc-a96f-65e3713c3311" containerName="ceilometer-central-agent" Dec 11 08:41:47 crc kubenswrapper[4881]: I1211 08:41:47.133322 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="2c3f1515-b9d7-49cc-a96f-65e3713c3311" containerName="ceilometer-central-agent" Dec 11 08:41:47 crc kubenswrapper[4881]: E1211 08:41:47.133374 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2c3f1515-b9d7-49cc-a96f-65e3713c3311" containerName="ceilometer-notification-agent" Dec 11 08:41:47 crc kubenswrapper[4881]: I1211 08:41:47.133382 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="2c3f1515-b9d7-49cc-a96f-65e3713c3311" containerName="ceilometer-notification-agent" Dec 11 08:41:47 crc kubenswrapper[4881]: E1211 08:41:47.133399 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="755c1a62-7945-4edf-9e40-90d8abfea7bd" containerName="registry-server" Dec 11 08:41:47 crc kubenswrapper[4881]: I1211 08:41:47.133406 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="755c1a62-7945-4edf-9e40-90d8abfea7bd" containerName="registry-server" Dec 11 08:41:47 crc kubenswrapper[4881]: E1211 08:41:47.133417 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="755c1a62-7945-4edf-9e40-90d8abfea7bd" containerName="extract-content" Dec 11 08:41:47 crc kubenswrapper[4881]: I1211 08:41:47.133424 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="755c1a62-7945-4edf-9e40-90d8abfea7bd" containerName="extract-content" Dec 11 08:41:47 crc kubenswrapper[4881]: E1211 08:41:47.133436 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="755c1a62-7945-4edf-9e40-90d8abfea7bd" containerName="extract-utilities" Dec 11 08:41:47 crc kubenswrapper[4881]: I1211 08:41:47.133442 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="755c1a62-7945-4edf-9e40-90d8abfea7bd" containerName="extract-utilities" Dec 11 08:41:47 crc kubenswrapper[4881]: E1211 08:41:47.133452 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2c3f1515-b9d7-49cc-a96f-65e3713c3311" containerName="sg-core" Dec 11 08:41:47 crc kubenswrapper[4881]: I1211 08:41:47.133459 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="2c3f1515-b9d7-49cc-a96f-65e3713c3311" containerName="sg-core" Dec 11 08:41:47 crc kubenswrapper[4881]: E1211 08:41:47.133473 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2c3f1515-b9d7-49cc-a96f-65e3713c3311" containerName="proxy-httpd" Dec 11 08:41:47 crc kubenswrapper[4881]: I1211 08:41:47.133478 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="2c3f1515-b9d7-49cc-a96f-65e3713c3311" containerName="proxy-httpd" Dec 11 08:41:47 crc kubenswrapper[4881]: I1211 08:41:47.133703 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="755c1a62-7945-4edf-9e40-90d8abfea7bd" containerName="registry-server" Dec 11 08:41:47 crc kubenswrapper[4881]: I1211 08:41:47.133725 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="2c3f1515-b9d7-49cc-a96f-65e3713c3311" containerName="proxy-httpd" Dec 11 08:41:47 crc kubenswrapper[4881]: I1211 08:41:47.133745 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="2c3f1515-b9d7-49cc-a96f-65e3713c3311" containerName="sg-core" Dec 11 08:41:47 crc kubenswrapper[4881]: I1211 08:41:47.133769 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="2c3f1515-b9d7-49cc-a96f-65e3713c3311" containerName="ceilometer-notification-agent" Dec 11 08:41:47 crc kubenswrapper[4881]: I1211 08:41:47.133784 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="2c3f1515-b9d7-49cc-a96f-65e3713c3311" containerName="ceilometer-central-agent" Dec 11 08:41:47 crc kubenswrapper[4881]: I1211 08:41:47.136051 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 11 08:41:47 crc kubenswrapper[4881]: I1211 08:41:47.138636 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Dec 11 08:41:47 crc kubenswrapper[4881]: I1211 08:41:47.138838 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Dec 11 08:41:47 crc kubenswrapper[4881]: I1211 08:41:47.138962 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Dec 11 08:41:47 crc kubenswrapper[4881]: I1211 08:41:47.147861 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Dec 11 08:41:47 crc kubenswrapper[4881]: I1211 08:41:47.279707 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1e223962-c0ce-4d1e-9db2-e0cd0dc70302-scripts\") pod \"ceilometer-0\" (UID: \"1e223962-c0ce-4d1e-9db2-e0cd0dc70302\") " pod="openstack/ceilometer-0" Dec 11 08:41:47 crc kubenswrapper[4881]: I1211 08:41:47.279933 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1e223962-c0ce-4d1e-9db2-e0cd0dc70302-config-data\") pod \"ceilometer-0\" (UID: \"1e223962-c0ce-4d1e-9db2-e0cd0dc70302\") " pod="openstack/ceilometer-0" Dec 11 08:41:47 crc kubenswrapper[4881]: I1211 08:41:47.280129 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/1e223962-c0ce-4d1e-9db2-e0cd0dc70302-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"1e223962-c0ce-4d1e-9db2-e0cd0dc70302\") " pod="openstack/ceilometer-0" Dec 11 08:41:47 crc kubenswrapper[4881]: I1211 08:41:47.280233 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1e223962-c0ce-4d1e-9db2-e0cd0dc70302-run-httpd\") pod \"ceilometer-0\" (UID: \"1e223962-c0ce-4d1e-9db2-e0cd0dc70302\") " pod="openstack/ceilometer-0" Dec 11 08:41:47 crc kubenswrapper[4881]: I1211 08:41:47.280440 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1e223962-c0ce-4d1e-9db2-e0cd0dc70302-log-httpd\") pod \"ceilometer-0\" (UID: \"1e223962-c0ce-4d1e-9db2-e0cd0dc70302\") " pod="openstack/ceilometer-0" Dec 11 08:41:47 crc kubenswrapper[4881]: I1211 08:41:47.280500 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gcqgv\" (UniqueName: \"kubernetes.io/projected/1e223962-c0ce-4d1e-9db2-e0cd0dc70302-kube-api-access-gcqgv\") pod \"ceilometer-0\" (UID: \"1e223962-c0ce-4d1e-9db2-e0cd0dc70302\") " pod="openstack/ceilometer-0" Dec 11 08:41:47 crc kubenswrapper[4881]: I1211 08:41:47.280801 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1e223962-c0ce-4d1e-9db2-e0cd0dc70302-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"1e223962-c0ce-4d1e-9db2-e0cd0dc70302\") " pod="openstack/ceilometer-0" Dec 11 08:41:47 crc kubenswrapper[4881]: I1211 08:41:47.281068 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/1e223962-c0ce-4d1e-9db2-e0cd0dc70302-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"1e223962-c0ce-4d1e-9db2-e0cd0dc70302\") " pod="openstack/ceilometer-0" Dec 11 08:41:47 crc kubenswrapper[4881]: I1211 08:41:47.382708 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1e223962-c0ce-4d1e-9db2-e0cd0dc70302-log-httpd\") pod \"ceilometer-0\" (UID: \"1e223962-c0ce-4d1e-9db2-e0cd0dc70302\") " pod="openstack/ceilometer-0" Dec 11 08:41:47 crc kubenswrapper[4881]: I1211 08:41:47.382764 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gcqgv\" (UniqueName: \"kubernetes.io/projected/1e223962-c0ce-4d1e-9db2-e0cd0dc70302-kube-api-access-gcqgv\") pod \"ceilometer-0\" (UID: \"1e223962-c0ce-4d1e-9db2-e0cd0dc70302\") " pod="openstack/ceilometer-0" Dec 11 08:41:47 crc kubenswrapper[4881]: I1211 08:41:47.382811 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1e223962-c0ce-4d1e-9db2-e0cd0dc70302-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"1e223962-c0ce-4d1e-9db2-e0cd0dc70302\") " pod="openstack/ceilometer-0" Dec 11 08:41:47 crc kubenswrapper[4881]: I1211 08:41:47.382894 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/1e223962-c0ce-4d1e-9db2-e0cd0dc70302-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"1e223962-c0ce-4d1e-9db2-e0cd0dc70302\") " pod="openstack/ceilometer-0" Dec 11 08:41:47 crc kubenswrapper[4881]: I1211 08:41:47.382956 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1e223962-c0ce-4d1e-9db2-e0cd0dc70302-scripts\") pod \"ceilometer-0\" (UID: \"1e223962-c0ce-4d1e-9db2-e0cd0dc70302\") " pod="openstack/ceilometer-0" Dec 11 08:41:47 crc kubenswrapper[4881]: I1211 08:41:47.382995 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1e223962-c0ce-4d1e-9db2-e0cd0dc70302-config-data\") pod \"ceilometer-0\" (UID: \"1e223962-c0ce-4d1e-9db2-e0cd0dc70302\") " pod="openstack/ceilometer-0" Dec 11 08:41:47 crc kubenswrapper[4881]: I1211 08:41:47.383041 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/1e223962-c0ce-4d1e-9db2-e0cd0dc70302-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"1e223962-c0ce-4d1e-9db2-e0cd0dc70302\") " pod="openstack/ceilometer-0" Dec 11 08:41:47 crc kubenswrapper[4881]: I1211 08:41:47.383070 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1e223962-c0ce-4d1e-9db2-e0cd0dc70302-run-httpd\") pod \"ceilometer-0\" (UID: \"1e223962-c0ce-4d1e-9db2-e0cd0dc70302\") " pod="openstack/ceilometer-0" Dec 11 08:41:47 crc kubenswrapper[4881]: I1211 08:41:47.383099 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1e223962-c0ce-4d1e-9db2-e0cd0dc70302-log-httpd\") pod \"ceilometer-0\" (UID: \"1e223962-c0ce-4d1e-9db2-e0cd0dc70302\") " pod="openstack/ceilometer-0" Dec 11 08:41:47 crc kubenswrapper[4881]: I1211 08:41:47.384062 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1e223962-c0ce-4d1e-9db2-e0cd0dc70302-run-httpd\") pod \"ceilometer-0\" (UID: \"1e223962-c0ce-4d1e-9db2-e0cd0dc70302\") " pod="openstack/ceilometer-0" Dec 11 08:41:47 crc kubenswrapper[4881]: I1211 08:41:47.387915 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/1e223962-c0ce-4d1e-9db2-e0cd0dc70302-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"1e223962-c0ce-4d1e-9db2-e0cd0dc70302\") " pod="openstack/ceilometer-0" Dec 11 08:41:47 crc kubenswrapper[4881]: I1211 08:41:47.387996 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1e223962-c0ce-4d1e-9db2-e0cd0dc70302-config-data\") pod \"ceilometer-0\" (UID: \"1e223962-c0ce-4d1e-9db2-e0cd0dc70302\") " pod="openstack/ceilometer-0" Dec 11 08:41:47 crc kubenswrapper[4881]: I1211 08:41:47.388430 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/1e223962-c0ce-4d1e-9db2-e0cd0dc70302-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"1e223962-c0ce-4d1e-9db2-e0cd0dc70302\") " pod="openstack/ceilometer-0" Dec 11 08:41:47 crc kubenswrapper[4881]: I1211 08:41:47.389707 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1e223962-c0ce-4d1e-9db2-e0cd0dc70302-scripts\") pod \"ceilometer-0\" (UID: \"1e223962-c0ce-4d1e-9db2-e0cd0dc70302\") " pod="openstack/ceilometer-0" Dec 11 08:41:47 crc kubenswrapper[4881]: E1211 08:41:47.390155 4881 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod324526c9_d17c_4dfd_b2f7_b86e9577c36c.slice/crio-c90ce7cd8fcf126d46d59245f06190daa1830091da52fd5b1af057889d7e2442.scope\": RecentStats: unable to find data in memory cache]" Dec 11 08:41:47 crc kubenswrapper[4881]: E1211 08:41:47.391951 4881 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod324526c9_d17c_4dfd_b2f7_b86e9577c36c.slice/crio-c90ce7cd8fcf126d46d59245f06190daa1830091da52fd5b1af057889d7e2442.scope\": RecentStats: unable to find data in memory cache]" Dec 11 08:41:47 crc kubenswrapper[4881]: I1211 08:41:47.392551 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1e223962-c0ce-4d1e-9db2-e0cd0dc70302-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"1e223962-c0ce-4d1e-9db2-e0cd0dc70302\") " pod="openstack/ceilometer-0" Dec 11 08:41:47 crc kubenswrapper[4881]: I1211 08:41:47.403831 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gcqgv\" (UniqueName: \"kubernetes.io/projected/1e223962-c0ce-4d1e-9db2-e0cd0dc70302-kube-api-access-gcqgv\") pod \"ceilometer-0\" (UID: \"1e223962-c0ce-4d1e-9db2-e0cd0dc70302\") " pod="openstack/ceilometer-0" Dec 11 08:41:47 crc kubenswrapper[4881]: I1211 08:41:47.454103 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 11 08:41:47 crc kubenswrapper[4881]: I1211 08:41:47.956103 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Dec 11 08:41:47 crc kubenswrapper[4881]: I1211 08:41:47.956165 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Dec 11 08:41:48 crc kubenswrapper[4881]: E1211 08:41:48.114719 4881 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod324526c9_d17c_4dfd_b2f7_b86e9577c36c.slice/crio-c90ce7cd8fcf126d46d59245f06190daa1830091da52fd5b1af057889d7e2442.scope\": RecentStats: unable to find data in memory cache]" Dec 11 08:41:48 crc kubenswrapper[4881]: E1211 08:41:48.121454 4881 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod324526c9_d17c_4dfd_b2f7_b86e9577c36c.slice/crio-c90ce7cd8fcf126d46d59245f06190daa1830091da52fd5b1af057889d7e2442.scope\": RecentStats: unable to find data in memory cache]" Dec 11 08:41:48 crc kubenswrapper[4881]: W1211 08:41:48.404232 4881 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1e223962_c0ce_4d1e_9db2_e0cd0dc70302.slice/crio-f5990963d4fcf23f21125d59372f5889d4b3fc5f3aba9c025a9b632bfb6c4b20 WatchSource:0}: Error finding container f5990963d4fcf23f21125d59372f5889d4b3fc5f3aba9c025a9b632bfb6c4b20: Status 404 returned error can't find the container with id f5990963d4fcf23f21125d59372f5889d4b3fc5f3aba9c025a9b632bfb6c4b20 Dec 11 08:41:48 crc kubenswrapper[4881]: I1211 08:41:48.404586 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Dec 11 08:41:48 crc kubenswrapper[4881]: I1211 08:41:48.700179 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Dec 11 08:41:48 crc kubenswrapper[4881]: I1211 08:41:48.705806 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Dec 11 08:41:48 crc kubenswrapper[4881]: I1211 08:41:48.706886 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Dec 11 08:41:48 crc kubenswrapper[4881]: I1211 08:41:48.724937 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1e223962-c0ce-4d1e-9db2-e0cd0dc70302","Type":"ContainerStarted","Data":"f5990963d4fcf23f21125d59372f5889d4b3fc5f3aba9c025a9b632bfb6c4b20"} Dec 11 08:41:48 crc kubenswrapper[4881]: I1211 08:41:48.732328 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"c9bdc0d4-7342-4b61-8acc-c2ac4c62653b","Type":"ContainerStarted","Data":"bc909f046c8674c0430d07c127bd01edb69853e0e426736024aa90df3e3ef25f"} Dec 11 08:41:48 crc kubenswrapper[4881]: I1211 08:41:48.778684 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Dec 11 08:41:49 crc kubenswrapper[4881]: I1211 08:41:49.033869 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2c3f1515-b9d7-49cc-a96f-65e3713c3311" path="/var/lib/kubelet/pods/2c3f1515-b9d7-49cc-a96f-65e3713c3311/volumes" Dec 11 08:41:49 crc kubenswrapper[4881]: I1211 08:41:49.055552 4881 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="88610cc6-c8c8-47f2-8425-e5714d01f7e2" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.0.247:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Dec 11 08:41:49 crc kubenswrapper[4881]: I1211 08:41:49.055874 4881 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="88610cc6-c8c8-47f2-8425-e5714d01f7e2" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.0.247:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Dec 11 08:41:49 crc kubenswrapper[4881]: I1211 08:41:49.744681 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1e223962-c0ce-4d1e-9db2-e0cd0dc70302","Type":"ContainerStarted","Data":"8410ffae70b3e089d18fedbde8fab2cf53abf626438218ed6bc170d633d2c921"} Dec 11 08:41:50 crc kubenswrapper[4881]: E1211 08:41:50.789048 4881 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="e1b52b19ca98e36e161fee41ff46127d736b904ad1e26525435cefec416d25ee" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Dec 11 08:41:50 crc kubenswrapper[4881]: E1211 08:41:50.792527 4881 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="e1b52b19ca98e36e161fee41ff46127d736b904ad1e26525435cefec416d25ee" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Dec 11 08:41:50 crc kubenswrapper[4881]: E1211 08:41:50.794720 4881 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="e1b52b19ca98e36e161fee41ff46127d736b904ad1e26525435cefec416d25ee" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Dec 11 08:41:50 crc kubenswrapper[4881]: E1211 08:41:50.794894 4881 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-scheduler-0" podUID="e688f7d9-d215-48aa-8092-e176d3437f09" containerName="nova-scheduler-scheduler" Dec 11 08:41:51 crc kubenswrapper[4881]: I1211 08:41:51.766576 4881 generic.go:334] "Generic (PLEG): container finished" podID="4ce4c4b4-5f9a-4e64-af32-6c3d805a01fc" containerID="f97911d88b636654f9534271dbb427322400e2484df7a386c23013f0fa9bfd8a" exitCode=0 Dec 11 08:41:51 crc kubenswrapper[4881]: I1211 08:41:51.766678 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-dgdhn" event={"ID":"4ce4c4b4-5f9a-4e64-af32-6c3d805a01fc","Type":"ContainerDied","Data":"f97911d88b636654f9534271dbb427322400e2484df7a386c23013f0fa9bfd8a"} Dec 11 08:41:51 crc kubenswrapper[4881]: I1211 08:41:51.771350 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"c9bdc0d4-7342-4b61-8acc-c2ac4c62653b","Type":"ContainerStarted","Data":"4cef02557414b487e8e8b0f835864abd3654785c1a571d517144633bc977824c"} Dec 11 08:41:51 crc kubenswrapper[4881]: I1211 08:41:51.771543 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/aodh-0" podUID="c9bdc0d4-7342-4b61-8acc-c2ac4c62653b" containerName="aodh-evaluator" containerID="cri-o://db3fbd048954f392125c26aaf6ca4615c6c6873297d0c73c18e6d3259a2ea1ed" gracePeriod=30 Dec 11 08:41:51 crc kubenswrapper[4881]: I1211 08:41:51.771541 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/aodh-0" podUID="c9bdc0d4-7342-4b61-8acc-c2ac4c62653b" containerName="aodh-notifier" containerID="cri-o://bc909f046c8674c0430d07c127bd01edb69853e0e426736024aa90df3e3ef25f" gracePeriod=30 Dec 11 08:41:51 crc kubenswrapper[4881]: I1211 08:41:51.771536 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/aodh-0" podUID="c9bdc0d4-7342-4b61-8acc-c2ac4c62653b" containerName="aodh-api" containerID="cri-o://30bcc67180507330b5527285e37e71256de00d00111147702b2ccdb179c0d95e" gracePeriod=30 Dec 11 08:41:51 crc kubenswrapper[4881]: I1211 08:41:51.771608 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/aodh-0" podUID="c9bdc0d4-7342-4b61-8acc-c2ac4c62653b" containerName="aodh-listener" containerID="cri-o://4cef02557414b487e8e8b0f835864abd3654785c1a571d517144633bc977824c" gracePeriod=30 Dec 11 08:41:51 crc kubenswrapper[4881]: I1211 08:41:51.776733 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1e223962-c0ce-4d1e-9db2-e0cd0dc70302","Type":"ContainerStarted","Data":"8ba1420e2d2939d49603eda3c4c537666c793b4ee222c1bb4743410e6d7fcc39"} Dec 11 08:41:51 crc kubenswrapper[4881]: I1211 08:41:51.820749 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/aodh-0" podStartSLOduration=2.044422504 podStartE2EDuration="17.820722296s" podCreationTimestamp="2025-12-11 08:41:34 +0000 UTC" firstStartedPulling="2025-12-11 08:41:35.499513965 +0000 UTC m=+1543.876882662" lastFinishedPulling="2025-12-11 08:41:51.275813757 +0000 UTC m=+1559.653182454" observedRunningTime="2025-12-11 08:41:51.810133429 +0000 UTC m=+1560.187502136" watchObservedRunningTime="2025-12-11 08:41:51.820722296 +0000 UTC m=+1560.198090993" Dec 11 08:41:52 crc kubenswrapper[4881]: I1211 08:41:52.517201 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-vk695" Dec 11 08:41:52 crc kubenswrapper[4881]: I1211 08:41:52.589432 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-vk695" Dec 11 08:41:52 crc kubenswrapper[4881]: I1211 08:41:52.765034 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-vk695"] Dec 11 08:41:52 crc kubenswrapper[4881]: I1211 08:41:52.791956 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1e223962-c0ce-4d1e-9db2-e0cd0dc70302","Type":"ContainerStarted","Data":"be33c703178f993b71dd30f771cd0f7d0cad4c706bbbefda1240186a83a72a92"} Dec 11 08:41:52 crc kubenswrapper[4881]: I1211 08:41:52.797519 4881 generic.go:334] "Generic (PLEG): container finished" podID="c9bdc0d4-7342-4b61-8acc-c2ac4c62653b" containerID="bc909f046c8674c0430d07c127bd01edb69853e0e426736024aa90df3e3ef25f" exitCode=0 Dec 11 08:41:52 crc kubenswrapper[4881]: I1211 08:41:52.797556 4881 generic.go:334] "Generic (PLEG): container finished" podID="c9bdc0d4-7342-4b61-8acc-c2ac4c62653b" containerID="db3fbd048954f392125c26aaf6ca4615c6c6873297d0c73c18e6d3259a2ea1ed" exitCode=0 Dec 11 08:41:52 crc kubenswrapper[4881]: I1211 08:41:52.797567 4881 generic.go:334] "Generic (PLEG): container finished" podID="c9bdc0d4-7342-4b61-8acc-c2ac4c62653b" containerID="30bcc67180507330b5527285e37e71256de00d00111147702b2ccdb179c0d95e" exitCode=0 Dec 11 08:41:52 crc kubenswrapper[4881]: I1211 08:41:52.797800 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"c9bdc0d4-7342-4b61-8acc-c2ac4c62653b","Type":"ContainerDied","Data":"bc909f046c8674c0430d07c127bd01edb69853e0e426736024aa90df3e3ef25f"} Dec 11 08:41:52 crc kubenswrapper[4881]: I1211 08:41:52.797832 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"c9bdc0d4-7342-4b61-8acc-c2ac4c62653b","Type":"ContainerDied","Data":"db3fbd048954f392125c26aaf6ca4615c6c6873297d0c73c18e6d3259a2ea1ed"} Dec 11 08:41:52 crc kubenswrapper[4881]: I1211 08:41:52.797843 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"c9bdc0d4-7342-4b61-8acc-c2ac4c62653b","Type":"ContainerDied","Data":"30bcc67180507330b5527285e37e71256de00d00111147702b2ccdb179c0d95e"} Dec 11 08:41:53 crc kubenswrapper[4881]: I1211 08:41:53.231921 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-dgdhn" Dec 11 08:41:53 crc kubenswrapper[4881]: I1211 08:41:53.375692 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-95ht9\" (UniqueName: \"kubernetes.io/projected/4ce4c4b4-5f9a-4e64-af32-6c3d805a01fc-kube-api-access-95ht9\") pod \"4ce4c4b4-5f9a-4e64-af32-6c3d805a01fc\" (UID: \"4ce4c4b4-5f9a-4e64-af32-6c3d805a01fc\") " Dec 11 08:41:53 crc kubenswrapper[4881]: I1211 08:41:53.375788 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4ce4c4b4-5f9a-4e64-af32-6c3d805a01fc-scripts\") pod \"4ce4c4b4-5f9a-4e64-af32-6c3d805a01fc\" (UID: \"4ce4c4b4-5f9a-4e64-af32-6c3d805a01fc\") " Dec 11 08:41:53 crc kubenswrapper[4881]: I1211 08:41:53.376600 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4ce4c4b4-5f9a-4e64-af32-6c3d805a01fc-combined-ca-bundle\") pod \"4ce4c4b4-5f9a-4e64-af32-6c3d805a01fc\" (UID: \"4ce4c4b4-5f9a-4e64-af32-6c3d805a01fc\") " Dec 11 08:41:53 crc kubenswrapper[4881]: I1211 08:41:53.376786 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4ce4c4b4-5f9a-4e64-af32-6c3d805a01fc-config-data\") pod \"4ce4c4b4-5f9a-4e64-af32-6c3d805a01fc\" (UID: \"4ce4c4b4-5f9a-4e64-af32-6c3d805a01fc\") " Dec 11 08:41:53 crc kubenswrapper[4881]: I1211 08:41:53.382929 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4ce4c4b4-5f9a-4e64-af32-6c3d805a01fc-scripts" (OuterVolumeSpecName: "scripts") pod "4ce4c4b4-5f9a-4e64-af32-6c3d805a01fc" (UID: "4ce4c4b4-5f9a-4e64-af32-6c3d805a01fc"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:41:53 crc kubenswrapper[4881]: I1211 08:41:53.384691 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4ce4c4b4-5f9a-4e64-af32-6c3d805a01fc-kube-api-access-95ht9" (OuterVolumeSpecName: "kube-api-access-95ht9") pod "4ce4c4b4-5f9a-4e64-af32-6c3d805a01fc" (UID: "4ce4c4b4-5f9a-4e64-af32-6c3d805a01fc"). InnerVolumeSpecName "kube-api-access-95ht9". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:41:53 crc kubenswrapper[4881]: I1211 08:41:53.414412 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4ce4c4b4-5f9a-4e64-af32-6c3d805a01fc-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "4ce4c4b4-5f9a-4e64-af32-6c3d805a01fc" (UID: "4ce4c4b4-5f9a-4e64-af32-6c3d805a01fc"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:41:53 crc kubenswrapper[4881]: I1211 08:41:53.416793 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4ce4c4b4-5f9a-4e64-af32-6c3d805a01fc-config-data" (OuterVolumeSpecName: "config-data") pod "4ce4c4b4-5f9a-4e64-af32-6c3d805a01fc" (UID: "4ce4c4b4-5f9a-4e64-af32-6c3d805a01fc"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:41:53 crc kubenswrapper[4881]: I1211 08:41:53.479736 4881 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4ce4c4b4-5f9a-4e64-af32-6c3d805a01fc-config-data\") on node \"crc\" DevicePath \"\"" Dec 11 08:41:53 crc kubenswrapper[4881]: I1211 08:41:53.479957 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-95ht9\" (UniqueName: \"kubernetes.io/projected/4ce4c4b4-5f9a-4e64-af32-6c3d805a01fc-kube-api-access-95ht9\") on node \"crc\" DevicePath \"\"" Dec 11 08:41:53 crc kubenswrapper[4881]: I1211 08:41:53.480042 4881 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4ce4c4b4-5f9a-4e64-af32-6c3d805a01fc-scripts\") on node \"crc\" DevicePath \"\"" Dec 11 08:41:53 crc kubenswrapper[4881]: I1211 08:41:53.480095 4881 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4ce4c4b4-5f9a-4e64-af32-6c3d805a01fc-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 11 08:41:53 crc kubenswrapper[4881]: I1211 08:41:53.812304 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-dgdhn" event={"ID":"4ce4c4b4-5f9a-4e64-af32-6c3d805a01fc","Type":"ContainerDied","Data":"f697e3148cabc05aacae526bee51af71f26a21fc511787e9d7e84e3da5b7998c"} Dec 11 08:41:53 crc kubenswrapper[4881]: I1211 08:41:53.812654 4881 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f697e3148cabc05aacae526bee51af71f26a21fc511787e9d7e84e3da5b7998c" Dec 11 08:41:53 crc kubenswrapper[4881]: I1211 08:41:53.812559 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-vk695" podUID="324526c9-d17c-4dfd-b2f7-b86e9577c36c" containerName="registry-server" containerID="cri-o://d89c12eca30a4bde2f8bc067026e755b3972417eccab9ffdce126ecbbba33e88" gracePeriod=2 Dec 11 08:41:53 crc kubenswrapper[4881]: I1211 08:41:53.812353 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-dgdhn" Dec 11 08:41:53 crc kubenswrapper[4881]: I1211 08:41:53.887629 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-0"] Dec 11 08:41:53 crc kubenswrapper[4881]: E1211 08:41:53.888156 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4ce4c4b4-5f9a-4e64-af32-6c3d805a01fc" containerName="nova-cell1-conductor-db-sync" Dec 11 08:41:53 crc kubenswrapper[4881]: I1211 08:41:53.888180 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="4ce4c4b4-5f9a-4e64-af32-6c3d805a01fc" containerName="nova-cell1-conductor-db-sync" Dec 11 08:41:53 crc kubenswrapper[4881]: I1211 08:41:53.888544 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="4ce4c4b4-5f9a-4e64-af32-6c3d805a01fc" containerName="nova-cell1-conductor-db-sync" Dec 11 08:41:53 crc kubenswrapper[4881]: I1211 08:41:53.889550 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Dec 11 08:41:53 crc kubenswrapper[4881]: I1211 08:41:53.893775 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Dec 11 08:41:53 crc kubenswrapper[4881]: I1211 08:41:53.924720 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Dec 11 08:41:53 crc kubenswrapper[4881]: I1211 08:41:53.990824 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7b6eaf58-bd08-44d7-bc98-0b11d1cea0b3-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"7b6eaf58-bd08-44d7-bc98-0b11d1cea0b3\") " pod="openstack/nova-cell1-conductor-0" Dec 11 08:41:53 crc kubenswrapper[4881]: I1211 08:41:53.990903 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xpzz6\" (UniqueName: \"kubernetes.io/projected/7b6eaf58-bd08-44d7-bc98-0b11d1cea0b3-kube-api-access-xpzz6\") pod \"nova-cell1-conductor-0\" (UID: \"7b6eaf58-bd08-44d7-bc98-0b11d1cea0b3\") " pod="openstack/nova-cell1-conductor-0" Dec 11 08:41:53 crc kubenswrapper[4881]: I1211 08:41:53.991079 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7b6eaf58-bd08-44d7-bc98-0b11d1cea0b3-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"7b6eaf58-bd08-44d7-bc98-0b11d1cea0b3\") " pod="openstack/nova-cell1-conductor-0" Dec 11 08:41:54 crc kubenswrapper[4881]: I1211 08:41:54.093476 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7b6eaf58-bd08-44d7-bc98-0b11d1cea0b3-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"7b6eaf58-bd08-44d7-bc98-0b11d1cea0b3\") " pod="openstack/nova-cell1-conductor-0" Dec 11 08:41:54 crc kubenswrapper[4881]: I1211 08:41:54.093563 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xpzz6\" (UniqueName: \"kubernetes.io/projected/7b6eaf58-bd08-44d7-bc98-0b11d1cea0b3-kube-api-access-xpzz6\") pod \"nova-cell1-conductor-0\" (UID: \"7b6eaf58-bd08-44d7-bc98-0b11d1cea0b3\") " pod="openstack/nova-cell1-conductor-0" Dec 11 08:41:54 crc kubenswrapper[4881]: I1211 08:41:54.093718 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7b6eaf58-bd08-44d7-bc98-0b11d1cea0b3-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"7b6eaf58-bd08-44d7-bc98-0b11d1cea0b3\") " pod="openstack/nova-cell1-conductor-0" Dec 11 08:41:54 crc kubenswrapper[4881]: I1211 08:41:54.101376 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7b6eaf58-bd08-44d7-bc98-0b11d1cea0b3-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"7b6eaf58-bd08-44d7-bc98-0b11d1cea0b3\") " pod="openstack/nova-cell1-conductor-0" Dec 11 08:41:54 crc kubenswrapper[4881]: I1211 08:41:54.101437 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7b6eaf58-bd08-44d7-bc98-0b11d1cea0b3-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"7b6eaf58-bd08-44d7-bc98-0b11d1cea0b3\") " pod="openstack/nova-cell1-conductor-0" Dec 11 08:41:54 crc kubenswrapper[4881]: I1211 08:41:54.116883 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xpzz6\" (UniqueName: \"kubernetes.io/projected/7b6eaf58-bd08-44d7-bc98-0b11d1cea0b3-kube-api-access-xpzz6\") pod \"nova-cell1-conductor-0\" (UID: \"7b6eaf58-bd08-44d7-bc98-0b11d1cea0b3\") " pod="openstack/nova-cell1-conductor-0" Dec 11 08:41:54 crc kubenswrapper[4881]: I1211 08:41:54.278887 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Dec 11 08:41:54 crc kubenswrapper[4881]: I1211 08:41:54.820773 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Dec 11 08:41:54 crc kubenswrapper[4881]: I1211 08:41:54.830367 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"7b6eaf58-bd08-44d7-bc98-0b11d1cea0b3","Type":"ContainerStarted","Data":"243dce681a4749d1fc168c7bbc8dad25aeab34c43cf22270e7b13fd31cc1e814"} Dec 11 08:41:54 crc kubenswrapper[4881]: I1211 08:41:54.836632 4881 generic.go:334] "Generic (PLEG): container finished" podID="324526c9-d17c-4dfd-b2f7-b86e9577c36c" containerID="d89c12eca30a4bde2f8bc067026e755b3972417eccab9ffdce126ecbbba33e88" exitCode=0 Dec 11 08:41:54 crc kubenswrapper[4881]: I1211 08:41:54.836803 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-vk695" event={"ID":"324526c9-d17c-4dfd-b2f7-b86e9577c36c","Type":"ContainerDied","Data":"d89c12eca30a4bde2f8bc067026e755b3972417eccab9ffdce126ecbbba33e88"} Dec 11 08:41:54 crc kubenswrapper[4881]: I1211 08:41:54.839913 4881 generic.go:334] "Generic (PLEG): container finished" podID="532f11a0-2d04-48b6-87a8-b27e99195ac9" containerID="eca5d6f7bfbb9f1cc81b72c1afde398cc730cee98865ca62bceac737883a8dd4" exitCode=137 Dec 11 08:41:54 crc kubenswrapper[4881]: I1211 08:41:54.839974 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"532f11a0-2d04-48b6-87a8-b27e99195ac9","Type":"ContainerDied","Data":"eca5d6f7bfbb9f1cc81b72c1afde398cc730cee98865ca62bceac737883a8dd4"} Dec 11 08:41:54 crc kubenswrapper[4881]: I1211 08:41:54.953197 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-vk695" Dec 11 08:41:55 crc kubenswrapper[4881]: I1211 08:41:55.016472 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/324526c9-d17c-4dfd-b2f7-b86e9577c36c-catalog-content\") pod \"324526c9-d17c-4dfd-b2f7-b86e9577c36c\" (UID: \"324526c9-d17c-4dfd-b2f7-b86e9577c36c\") " Dec 11 08:41:55 crc kubenswrapper[4881]: I1211 08:41:55.016814 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/324526c9-d17c-4dfd-b2f7-b86e9577c36c-utilities\") pod \"324526c9-d17c-4dfd-b2f7-b86e9577c36c\" (UID: \"324526c9-d17c-4dfd-b2f7-b86e9577c36c\") " Dec 11 08:41:55 crc kubenswrapper[4881]: I1211 08:41:55.017039 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-p8k7d\" (UniqueName: \"kubernetes.io/projected/324526c9-d17c-4dfd-b2f7-b86e9577c36c-kube-api-access-p8k7d\") pod \"324526c9-d17c-4dfd-b2f7-b86e9577c36c\" (UID: \"324526c9-d17c-4dfd-b2f7-b86e9577c36c\") " Dec 11 08:41:55 crc kubenswrapper[4881]: I1211 08:41:55.018286 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/324526c9-d17c-4dfd-b2f7-b86e9577c36c-utilities" (OuterVolumeSpecName: "utilities") pod "324526c9-d17c-4dfd-b2f7-b86e9577c36c" (UID: "324526c9-d17c-4dfd-b2f7-b86e9577c36c"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 08:41:55 crc kubenswrapper[4881]: I1211 08:41:55.025866 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/324526c9-d17c-4dfd-b2f7-b86e9577c36c-kube-api-access-p8k7d" (OuterVolumeSpecName: "kube-api-access-p8k7d") pod "324526c9-d17c-4dfd-b2f7-b86e9577c36c" (UID: "324526c9-d17c-4dfd-b2f7-b86e9577c36c"). InnerVolumeSpecName "kube-api-access-p8k7d". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:41:55 crc kubenswrapper[4881]: I1211 08:41:55.120568 4881 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/324526c9-d17c-4dfd-b2f7-b86e9577c36c-utilities\") on node \"crc\" DevicePath \"\"" Dec 11 08:41:55 crc kubenswrapper[4881]: I1211 08:41:55.120609 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-p8k7d\" (UniqueName: \"kubernetes.io/projected/324526c9-d17c-4dfd-b2f7-b86e9577c36c-kube-api-access-p8k7d\") on node \"crc\" DevicePath \"\"" Dec 11 08:41:55 crc kubenswrapper[4881]: I1211 08:41:55.221136 4881 pod_container_manager_linux.go:210] "Failed to delete cgroup paths" cgroupName=["kubepods","besteffort","podb6d5fc48-a707-4ab1-a8f9-392295486185"] err="unable to destroy cgroup paths for cgroup [kubepods besteffort podb6d5fc48-a707-4ab1-a8f9-392295486185] : Timed out while waiting for systemd to remove kubepods-besteffort-podb6d5fc48_a707_4ab1_a8f9_392295486185.slice" Dec 11 08:41:55 crc kubenswrapper[4881]: E1211 08:41:55.789728 4881 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="e1b52b19ca98e36e161fee41ff46127d736b904ad1e26525435cefec416d25ee" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Dec 11 08:41:55 crc kubenswrapper[4881]: E1211 08:41:55.791699 4881 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="e1b52b19ca98e36e161fee41ff46127d736b904ad1e26525435cefec416d25ee" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Dec 11 08:41:55 crc kubenswrapper[4881]: E1211 08:41:55.793559 4881 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="e1b52b19ca98e36e161fee41ff46127d736b904ad1e26525435cefec416d25ee" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Dec 11 08:41:55 crc kubenswrapper[4881]: E1211 08:41:55.793681 4881 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-scheduler-0" podUID="e688f7d9-d215-48aa-8092-e176d3437f09" containerName="nova-scheduler-scheduler" Dec 11 08:41:55 crc kubenswrapper[4881]: I1211 08:41:55.854269 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-vk695" event={"ID":"324526c9-d17c-4dfd-b2f7-b86e9577c36c","Type":"ContainerDied","Data":"1257b08e4188c1026c60fa90aa8dfd41d01ae08ea1530a7ae64a6c10d85fc1d4"} Dec 11 08:41:55 crc kubenswrapper[4881]: I1211 08:41:55.854350 4881 scope.go:117] "RemoveContainer" containerID="d89c12eca30a4bde2f8bc067026e755b3972417eccab9ffdce126ecbbba33e88" Dec 11 08:41:55 crc kubenswrapper[4881]: I1211 08:41:55.854376 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-vk695" Dec 11 08:41:56 crc kubenswrapper[4881]: I1211 08:41:56.791939 4881 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/openstack-cell1-galera-0" podUID="70402eec-968d-4ceb-b259-5e2508ee21a0" containerName="galera" probeResult="failure" output="command timed out" Dec 11 08:41:56 crc kubenswrapper[4881]: I1211 08:41:56.792142 4881 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/openstack-cell1-galera-0" podUID="70402eec-968d-4ceb-b259-5e2508ee21a0" containerName="galera" probeResult="failure" output="command timed out" Dec 11 08:41:57 crc kubenswrapper[4881]: I1211 08:41:57.960682 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Dec 11 08:41:57 crc kubenswrapper[4881]: I1211 08:41:57.961354 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Dec 11 08:41:57 crc kubenswrapper[4881]: I1211 08:41:57.961456 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Dec 11 08:41:57 crc kubenswrapper[4881]: I1211 08:41:57.968575 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Dec 11 08:41:58 crc kubenswrapper[4881]: I1211 08:41:58.317006 4881 patch_prober.go:28] interesting pod/prometheus-operator-admission-webhook-f54c54754-b52lg container/prometheus-operator-admission-webhook namespace/openshift-monitoring: Liveness probe status=failure output="Get \"https://10.217.0.69:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Dec 11 08:41:58 crc kubenswrapper[4881]: I1211 08:41:58.317144 4881 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-b52lg" podUID="cc8120de-b56f-481f-9ac5-19235df13216" containerName="prometheus-operator-admission-webhook" probeResult="failure" output="Get \"https://10.217.0.69:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Dec 11 08:41:58 crc kubenswrapper[4881]: I1211 08:41:58.897374 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Dec 11 08:41:58 crc kubenswrapper[4881]: I1211 08:41:58.902671 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Dec 11 08:41:59 crc kubenswrapper[4881]: I1211 08:41:59.043896 4881 patch_prober.go:28] interesting pod/controller-manager-6d79d95bbf-rpz78 container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.70:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Dec 11 08:41:59 crc kubenswrapper[4881]: I1211 08:41:59.044047 4881 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-6d79d95bbf-rpz78" podUID="e5cf2194-857a-4f41-a925-3ee960c29134" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.70:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Dec 11 08:42:00 crc kubenswrapper[4881]: E1211 08:42:00.787313 4881 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of e1b52b19ca98e36e161fee41ff46127d736b904ad1e26525435cefec416d25ee is running failed: container process not found" containerID="e1b52b19ca98e36e161fee41ff46127d736b904ad1e26525435cefec416d25ee" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Dec 11 08:42:00 crc kubenswrapper[4881]: E1211 08:42:00.789392 4881 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of e1b52b19ca98e36e161fee41ff46127d736b904ad1e26525435cefec416d25ee is running failed: container process not found" containerID="e1b52b19ca98e36e161fee41ff46127d736b904ad1e26525435cefec416d25ee" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Dec 11 08:42:00 crc kubenswrapper[4881]: E1211 08:42:00.789707 4881 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of e1b52b19ca98e36e161fee41ff46127d736b904ad1e26525435cefec416d25ee is running failed: container process not found" containerID="e1b52b19ca98e36e161fee41ff46127d736b904ad1e26525435cefec416d25ee" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Dec 11 08:42:00 crc kubenswrapper[4881]: E1211 08:42:00.789772 4881 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of e1b52b19ca98e36e161fee41ff46127d736b904ad1e26525435cefec416d25ee is running failed: container process not found" probeType="Readiness" pod="openstack/nova-scheduler-0" podUID="e688f7d9-d215-48aa-8092-e176d3437f09" containerName="nova-scheduler-scheduler" Dec 11 08:42:02 crc kubenswrapper[4881]: I1211 08:42:02.725517 4881 fsHandler.go:133] fs: disk usage and inodes count on following dirs took 7.509054761s: [/var/lib/containers/storage/overlay/ef31178ee893ae933e059f001381f93fa811098f9c88c45946ade383247797c9/diff /var/log/pods/openshift-logging_logging-loki-gateway-5f65744c89-tvqmf_166ecd73-e9b9-4aa0-b09c-7ad373aea239/gateway/0.log]; will not log again for this container unless duration exceeds 2s Dec 11 08:42:02 crc kubenswrapper[4881]: I1211 08:42:02.840540 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/324526c9-d17c-4dfd-b2f7-b86e9577c36c-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "324526c9-d17c-4dfd-b2f7-b86e9577c36c" (UID: "324526c9-d17c-4dfd-b2f7-b86e9577c36c"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 08:42:02 crc kubenswrapper[4881]: I1211 08:42:02.920242 4881 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/324526c9-d17c-4dfd-b2f7-b86e9577c36c-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 11 08:42:02 crc kubenswrapper[4881]: I1211 08:42:02.924785 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-79b5d74c8c-gr9md"] Dec 11 08:42:02 crc kubenswrapper[4881]: E1211 08:42:02.925361 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="324526c9-d17c-4dfd-b2f7-b86e9577c36c" containerName="registry-server" Dec 11 08:42:02 crc kubenswrapper[4881]: I1211 08:42:02.925382 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="324526c9-d17c-4dfd-b2f7-b86e9577c36c" containerName="registry-server" Dec 11 08:42:02 crc kubenswrapper[4881]: E1211 08:42:02.925398 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="324526c9-d17c-4dfd-b2f7-b86e9577c36c" containerName="extract-utilities" Dec 11 08:42:02 crc kubenswrapper[4881]: I1211 08:42:02.925405 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="324526c9-d17c-4dfd-b2f7-b86e9577c36c" containerName="extract-utilities" Dec 11 08:42:02 crc kubenswrapper[4881]: E1211 08:42:02.925453 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="324526c9-d17c-4dfd-b2f7-b86e9577c36c" containerName="extract-content" Dec 11 08:42:02 crc kubenswrapper[4881]: I1211 08:42:02.925464 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="324526c9-d17c-4dfd-b2f7-b86e9577c36c" containerName="extract-content" Dec 11 08:42:02 crc kubenswrapper[4881]: I1211 08:42:02.925713 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="324526c9-d17c-4dfd-b2f7-b86e9577c36c" containerName="registry-server" Dec 11 08:42:02 crc kubenswrapper[4881]: I1211 08:42:02.927985 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-79b5d74c8c-gr9md" Dec 11 08:42:02 crc kubenswrapper[4881]: I1211 08:42:02.981573 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-79b5d74c8c-gr9md"] Dec 11 08:42:03 crc kubenswrapper[4881]: I1211 08:42:03.012895 4881 generic.go:334] "Generic (PLEG): container finished" podID="e688f7d9-d215-48aa-8092-e176d3437f09" containerID="e1b52b19ca98e36e161fee41ff46127d736b904ad1e26525435cefec416d25ee" exitCode=137 Dec 11 08:42:03 crc kubenswrapper[4881]: I1211 08:42:03.021753 4881 scope.go:117] "RemoveContainer" containerID="a957d9ac23a8d9794d75049cfcd6cd927636c635e317f85cbdb77490a1f2eb9b" Dec 11 08:42:03 crc kubenswrapper[4881]: I1211 08:42:03.022129 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f2376e63-11dc-4d35-be7c-eff1af8f8534-config\") pod \"dnsmasq-dns-79b5d74c8c-gr9md\" (UID: \"f2376e63-11dc-4d35-be7c-eff1af8f8534\") " pod="openstack/dnsmasq-dns-79b5d74c8c-gr9md" Dec 11 08:42:03 crc kubenswrapper[4881]: I1211 08:42:03.022242 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2d4cp\" (UniqueName: \"kubernetes.io/projected/f2376e63-11dc-4d35-be7c-eff1af8f8534-kube-api-access-2d4cp\") pod \"dnsmasq-dns-79b5d74c8c-gr9md\" (UID: \"f2376e63-11dc-4d35-be7c-eff1af8f8534\") " pod="openstack/dnsmasq-dns-79b5d74c8c-gr9md" Dec 11 08:42:03 crc kubenswrapper[4881]: I1211 08:42:03.022322 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f2376e63-11dc-4d35-be7c-eff1af8f8534-ovsdbserver-nb\") pod \"dnsmasq-dns-79b5d74c8c-gr9md\" (UID: \"f2376e63-11dc-4d35-be7c-eff1af8f8534\") " pod="openstack/dnsmasq-dns-79b5d74c8c-gr9md" Dec 11 08:42:03 crc kubenswrapper[4881]: I1211 08:42:03.022364 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f2376e63-11dc-4d35-be7c-eff1af8f8534-dns-svc\") pod \"dnsmasq-dns-79b5d74c8c-gr9md\" (UID: \"f2376e63-11dc-4d35-be7c-eff1af8f8534\") " pod="openstack/dnsmasq-dns-79b5d74c8c-gr9md" Dec 11 08:42:03 crc kubenswrapper[4881]: I1211 08:42:03.022406 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/f2376e63-11dc-4d35-be7c-eff1af8f8534-dns-swift-storage-0\") pod \"dnsmasq-dns-79b5d74c8c-gr9md\" (UID: \"f2376e63-11dc-4d35-be7c-eff1af8f8534\") " pod="openstack/dnsmasq-dns-79b5d74c8c-gr9md" Dec 11 08:42:03 crc kubenswrapper[4881]: I1211 08:42:03.022440 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f2376e63-11dc-4d35-be7c-eff1af8f8534-ovsdbserver-sb\") pod \"dnsmasq-dns-79b5d74c8c-gr9md\" (UID: \"f2376e63-11dc-4d35-be7c-eff1af8f8534\") " pod="openstack/dnsmasq-dns-79b5d74c8c-gr9md" Dec 11 08:42:03 crc kubenswrapper[4881]: I1211 08:42:03.026197 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"e688f7d9-d215-48aa-8092-e176d3437f09","Type":"ContainerDied","Data":"e1b52b19ca98e36e161fee41ff46127d736b904ad1e26525435cefec416d25ee"} Dec 11 08:42:03 crc kubenswrapper[4881]: I1211 08:42:03.137097 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f2376e63-11dc-4d35-be7c-eff1af8f8534-ovsdbserver-nb\") pod \"dnsmasq-dns-79b5d74c8c-gr9md\" (UID: \"f2376e63-11dc-4d35-be7c-eff1af8f8534\") " pod="openstack/dnsmasq-dns-79b5d74c8c-gr9md" Dec 11 08:42:03 crc kubenswrapper[4881]: I1211 08:42:03.137163 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f2376e63-11dc-4d35-be7c-eff1af8f8534-dns-svc\") pod \"dnsmasq-dns-79b5d74c8c-gr9md\" (UID: \"f2376e63-11dc-4d35-be7c-eff1af8f8534\") " pod="openstack/dnsmasq-dns-79b5d74c8c-gr9md" Dec 11 08:42:03 crc kubenswrapper[4881]: I1211 08:42:03.137196 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/f2376e63-11dc-4d35-be7c-eff1af8f8534-dns-swift-storage-0\") pod \"dnsmasq-dns-79b5d74c8c-gr9md\" (UID: \"f2376e63-11dc-4d35-be7c-eff1af8f8534\") " pod="openstack/dnsmasq-dns-79b5d74c8c-gr9md" Dec 11 08:42:03 crc kubenswrapper[4881]: I1211 08:42:03.137223 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f2376e63-11dc-4d35-be7c-eff1af8f8534-ovsdbserver-sb\") pod \"dnsmasq-dns-79b5d74c8c-gr9md\" (UID: \"f2376e63-11dc-4d35-be7c-eff1af8f8534\") " pod="openstack/dnsmasq-dns-79b5d74c8c-gr9md" Dec 11 08:42:03 crc kubenswrapper[4881]: I1211 08:42:03.137410 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f2376e63-11dc-4d35-be7c-eff1af8f8534-config\") pod \"dnsmasq-dns-79b5d74c8c-gr9md\" (UID: \"f2376e63-11dc-4d35-be7c-eff1af8f8534\") " pod="openstack/dnsmasq-dns-79b5d74c8c-gr9md" Dec 11 08:42:03 crc kubenswrapper[4881]: I1211 08:42:03.137592 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2d4cp\" (UniqueName: \"kubernetes.io/projected/f2376e63-11dc-4d35-be7c-eff1af8f8534-kube-api-access-2d4cp\") pod \"dnsmasq-dns-79b5d74c8c-gr9md\" (UID: \"f2376e63-11dc-4d35-be7c-eff1af8f8534\") " pod="openstack/dnsmasq-dns-79b5d74c8c-gr9md" Dec 11 08:42:03 crc kubenswrapper[4881]: I1211 08:42:03.143683 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f2376e63-11dc-4d35-be7c-eff1af8f8534-dns-svc\") pod \"dnsmasq-dns-79b5d74c8c-gr9md\" (UID: \"f2376e63-11dc-4d35-be7c-eff1af8f8534\") " pod="openstack/dnsmasq-dns-79b5d74c8c-gr9md" Dec 11 08:42:03 crc kubenswrapper[4881]: I1211 08:42:03.144134 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/f2376e63-11dc-4d35-be7c-eff1af8f8534-dns-swift-storage-0\") pod \"dnsmasq-dns-79b5d74c8c-gr9md\" (UID: \"f2376e63-11dc-4d35-be7c-eff1af8f8534\") " pod="openstack/dnsmasq-dns-79b5d74c8c-gr9md" Dec 11 08:42:03 crc kubenswrapper[4881]: I1211 08:42:03.144391 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f2376e63-11dc-4d35-be7c-eff1af8f8534-ovsdbserver-nb\") pod \"dnsmasq-dns-79b5d74c8c-gr9md\" (UID: \"f2376e63-11dc-4d35-be7c-eff1af8f8534\") " pod="openstack/dnsmasq-dns-79b5d74c8c-gr9md" Dec 11 08:42:03 crc kubenswrapper[4881]: I1211 08:42:03.144451 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f2376e63-11dc-4d35-be7c-eff1af8f8534-config\") pod \"dnsmasq-dns-79b5d74c8c-gr9md\" (UID: \"f2376e63-11dc-4d35-be7c-eff1af8f8534\") " pod="openstack/dnsmasq-dns-79b5d74c8c-gr9md" Dec 11 08:42:03 crc kubenswrapper[4881]: I1211 08:42:03.145216 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f2376e63-11dc-4d35-be7c-eff1af8f8534-ovsdbserver-sb\") pod \"dnsmasq-dns-79b5d74c8c-gr9md\" (UID: \"f2376e63-11dc-4d35-be7c-eff1af8f8534\") " pod="openstack/dnsmasq-dns-79b5d74c8c-gr9md" Dec 11 08:42:03 crc kubenswrapper[4881]: I1211 08:42:03.245273 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2d4cp\" (UniqueName: \"kubernetes.io/projected/f2376e63-11dc-4d35-be7c-eff1af8f8534-kube-api-access-2d4cp\") pod \"dnsmasq-dns-79b5d74c8c-gr9md\" (UID: \"f2376e63-11dc-4d35-be7c-eff1af8f8534\") " pod="openstack/dnsmasq-dns-79b5d74c8c-gr9md" Dec 11 08:42:03 crc kubenswrapper[4881]: I1211 08:42:03.255524 4881 scope.go:117] "RemoveContainer" containerID="c90ce7cd8fcf126d46d59245f06190daa1830091da52fd5b1af057889d7e2442" Dec 11 08:42:03 crc kubenswrapper[4881]: I1211 08:42:03.315990 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-79b5d74c8c-gr9md" Dec 11 08:42:03 crc kubenswrapper[4881]: I1211 08:42:03.393839 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-vk695"] Dec 11 08:42:03 crc kubenswrapper[4881]: I1211 08:42:03.415873 4881 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-vk695"] Dec 11 08:42:03 crc kubenswrapper[4881]: I1211 08:42:03.536838 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Dec 11 08:42:03 crc kubenswrapper[4881]: I1211 08:42:03.662692 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/532f11a0-2d04-48b6-87a8-b27e99195ac9-config-data\") pod \"532f11a0-2d04-48b6-87a8-b27e99195ac9\" (UID: \"532f11a0-2d04-48b6-87a8-b27e99195ac9\") " Dec 11 08:42:03 crc kubenswrapper[4881]: I1211 08:42:03.663075 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-824fx\" (UniqueName: \"kubernetes.io/projected/532f11a0-2d04-48b6-87a8-b27e99195ac9-kube-api-access-824fx\") pod \"532f11a0-2d04-48b6-87a8-b27e99195ac9\" (UID: \"532f11a0-2d04-48b6-87a8-b27e99195ac9\") " Dec 11 08:42:03 crc kubenswrapper[4881]: I1211 08:42:03.663387 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/532f11a0-2d04-48b6-87a8-b27e99195ac9-combined-ca-bundle\") pod \"532f11a0-2d04-48b6-87a8-b27e99195ac9\" (UID: \"532f11a0-2d04-48b6-87a8-b27e99195ac9\") " Dec 11 08:42:03 crc kubenswrapper[4881]: I1211 08:42:03.695899 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/532f11a0-2d04-48b6-87a8-b27e99195ac9-kube-api-access-824fx" (OuterVolumeSpecName: "kube-api-access-824fx") pod "532f11a0-2d04-48b6-87a8-b27e99195ac9" (UID: "532f11a0-2d04-48b6-87a8-b27e99195ac9"). InnerVolumeSpecName "kube-api-access-824fx". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:42:03 crc kubenswrapper[4881]: I1211 08:42:03.766211 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-824fx\" (UniqueName: \"kubernetes.io/projected/532f11a0-2d04-48b6-87a8-b27e99195ac9-kube-api-access-824fx\") on node \"crc\" DevicePath \"\"" Dec 11 08:42:03 crc kubenswrapper[4881]: I1211 08:42:03.782503 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/532f11a0-2d04-48b6-87a8-b27e99195ac9-config-data" (OuterVolumeSpecName: "config-data") pod "532f11a0-2d04-48b6-87a8-b27e99195ac9" (UID: "532f11a0-2d04-48b6-87a8-b27e99195ac9"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:42:03 crc kubenswrapper[4881]: I1211 08:42:03.820126 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/532f11a0-2d04-48b6-87a8-b27e99195ac9-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "532f11a0-2d04-48b6-87a8-b27e99195ac9" (UID: "532f11a0-2d04-48b6-87a8-b27e99195ac9"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:42:03 crc kubenswrapper[4881]: I1211 08:42:03.869171 4881 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/532f11a0-2d04-48b6-87a8-b27e99195ac9-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 11 08:42:03 crc kubenswrapper[4881]: I1211 08:42:03.869202 4881 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/532f11a0-2d04-48b6-87a8-b27e99195ac9-config-data\") on node \"crc\" DevicePath \"\"" Dec 11 08:42:03 crc kubenswrapper[4881]: I1211 08:42:03.880626 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Dec 11 08:42:03 crc kubenswrapper[4881]: I1211 08:42:03.970208 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e688f7d9-d215-48aa-8092-e176d3437f09-combined-ca-bundle\") pod \"e688f7d9-d215-48aa-8092-e176d3437f09\" (UID: \"e688f7d9-d215-48aa-8092-e176d3437f09\") " Dec 11 08:42:03 crc kubenswrapper[4881]: I1211 08:42:03.970674 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e688f7d9-d215-48aa-8092-e176d3437f09-config-data\") pod \"e688f7d9-d215-48aa-8092-e176d3437f09\" (UID: \"e688f7d9-d215-48aa-8092-e176d3437f09\") " Dec 11 08:42:03 crc kubenswrapper[4881]: I1211 08:42:03.970744 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-59s7m\" (UniqueName: \"kubernetes.io/projected/e688f7d9-d215-48aa-8092-e176d3437f09-kube-api-access-59s7m\") pod \"e688f7d9-d215-48aa-8092-e176d3437f09\" (UID: \"e688f7d9-d215-48aa-8092-e176d3437f09\") " Dec 11 08:42:03 crc kubenswrapper[4881]: I1211 08:42:03.981437 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e688f7d9-d215-48aa-8092-e176d3437f09-kube-api-access-59s7m" (OuterVolumeSpecName: "kube-api-access-59s7m") pod "e688f7d9-d215-48aa-8092-e176d3437f09" (UID: "e688f7d9-d215-48aa-8092-e176d3437f09"). InnerVolumeSpecName "kube-api-access-59s7m". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:42:04 crc kubenswrapper[4881]: I1211 08:42:04.022288 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e688f7d9-d215-48aa-8092-e176d3437f09-config-data" (OuterVolumeSpecName: "config-data") pod "e688f7d9-d215-48aa-8092-e176d3437f09" (UID: "e688f7d9-d215-48aa-8092-e176d3437f09"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:42:04 crc kubenswrapper[4881]: I1211 08:42:04.053452 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e688f7d9-d215-48aa-8092-e176d3437f09-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "e688f7d9-d215-48aa-8092-e176d3437f09" (UID: "e688f7d9-d215-48aa-8092-e176d3437f09"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:42:04 crc kubenswrapper[4881]: I1211 08:42:04.059504 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"532f11a0-2d04-48b6-87a8-b27e99195ac9","Type":"ContainerDied","Data":"6b27f9a88ba0e9df7605f0240965774967a307b37e86a1c9c2a9d43264bb5042"} Dec 11 08:42:04 crc kubenswrapper[4881]: I1211 08:42:04.059718 4881 scope.go:117] "RemoveContainer" containerID="eca5d6f7bfbb9f1cc81b72c1afde398cc730cee98865ca62bceac737883a8dd4" Dec 11 08:42:04 crc kubenswrapper[4881]: I1211 08:42:04.059966 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Dec 11 08:42:04 crc kubenswrapper[4881]: I1211 08:42:04.075713 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1e223962-c0ce-4d1e-9db2-e0cd0dc70302","Type":"ContainerStarted","Data":"72008dd43f0222dde14415321800759fff9d3e325cb59e9576b9cfc2182224cc"} Dec 11 08:42:04 crc kubenswrapper[4881]: I1211 08:42:04.076061 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Dec 11 08:42:04 crc kubenswrapper[4881]: I1211 08:42:04.077455 4881 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e688f7d9-d215-48aa-8092-e176d3437f09-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 11 08:42:04 crc kubenswrapper[4881]: I1211 08:42:04.077487 4881 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e688f7d9-d215-48aa-8092-e176d3437f09-config-data\") on node \"crc\" DevicePath \"\"" Dec 11 08:42:04 crc kubenswrapper[4881]: I1211 08:42:04.077500 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-59s7m\" (UniqueName: \"kubernetes.io/projected/e688f7d9-d215-48aa-8092-e176d3437f09-kube-api-access-59s7m\") on node \"crc\" DevicePath \"\"" Dec 11 08:42:04 crc kubenswrapper[4881]: I1211 08:42:04.101142 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Dec 11 08:42:04 crc kubenswrapper[4881]: I1211 08:42:04.101202 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"e688f7d9-d215-48aa-8092-e176d3437f09","Type":"ContainerDied","Data":"5212e12db9086955469cdaeb40997be0eaac6cab01484238de7fe94120e16dfa"} Dec 11 08:42:04 crc kubenswrapper[4881]: I1211 08:42:04.118545 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"7b6eaf58-bd08-44d7-bc98-0b11d1cea0b3","Type":"ContainerStarted","Data":"6b76baab3987bc5d56ccd94316c079f736af8541900fdd2a6d2836255abbf72d"} Dec 11 08:42:04 crc kubenswrapper[4881]: I1211 08:42:04.120979 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-conductor-0" Dec 11 08:42:04 crc kubenswrapper[4881]: I1211 08:42:04.151714 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.120142154 podStartE2EDuration="17.151690486s" podCreationTimestamp="2025-12-11 08:41:47 +0000 UTC" firstStartedPulling="2025-12-11 08:41:48.406569105 +0000 UTC m=+1556.783937842" lastFinishedPulling="2025-12-11 08:42:03.438117477 +0000 UTC m=+1571.815486174" observedRunningTime="2025-12-11 08:42:04.122868194 +0000 UTC m=+1572.500236891" watchObservedRunningTime="2025-12-11 08:42:04.151690486 +0000 UTC m=+1572.529059183" Dec 11 08:42:04 crc kubenswrapper[4881]: I1211 08:42:04.206812 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-79b5d74c8c-gr9md"] Dec 11 08:42:04 crc kubenswrapper[4881]: I1211 08:42:04.243413 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-0" podStartSLOduration=11.243391085 podStartE2EDuration="11.243391085s" podCreationTimestamp="2025-12-11 08:41:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 08:42:04.157613763 +0000 UTC m=+1572.534982470" watchObservedRunningTime="2025-12-11 08:42:04.243391085 +0000 UTC m=+1572.620759792" Dec 11 08:42:04 crc kubenswrapper[4881]: I1211 08:42:04.498595 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Dec 11 08:42:04 crc kubenswrapper[4881]: I1211 08:42:04.518166 4881 scope.go:117] "RemoveContainer" containerID="e1b52b19ca98e36e161fee41ff46127d736b904ad1e26525435cefec416d25ee" Dec 11 08:42:04 crc kubenswrapper[4881]: I1211 08:42:04.548767 4881 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Dec 11 08:42:04 crc kubenswrapper[4881]: I1211 08:42:04.591700 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Dec 11 08:42:04 crc kubenswrapper[4881]: I1211 08:42:04.609006 4881 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Dec 11 08:42:04 crc kubenswrapper[4881]: I1211 08:42:04.629031 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Dec 11 08:42:04 crc kubenswrapper[4881]: E1211 08:42:04.629812 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="532f11a0-2d04-48b6-87a8-b27e99195ac9" containerName="nova-cell1-novncproxy-novncproxy" Dec 11 08:42:04 crc kubenswrapper[4881]: I1211 08:42:04.629838 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="532f11a0-2d04-48b6-87a8-b27e99195ac9" containerName="nova-cell1-novncproxy-novncproxy" Dec 11 08:42:04 crc kubenswrapper[4881]: E1211 08:42:04.629867 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e688f7d9-d215-48aa-8092-e176d3437f09" containerName="nova-scheduler-scheduler" Dec 11 08:42:04 crc kubenswrapper[4881]: I1211 08:42:04.629876 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="e688f7d9-d215-48aa-8092-e176d3437f09" containerName="nova-scheduler-scheduler" Dec 11 08:42:04 crc kubenswrapper[4881]: I1211 08:42:04.630176 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="e688f7d9-d215-48aa-8092-e176d3437f09" containerName="nova-scheduler-scheduler" Dec 11 08:42:04 crc kubenswrapper[4881]: I1211 08:42:04.630227 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="532f11a0-2d04-48b6-87a8-b27e99195ac9" containerName="nova-cell1-novncproxy-novncproxy" Dec 11 08:42:04 crc kubenswrapper[4881]: I1211 08:42:04.635911 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Dec 11 08:42:04 crc kubenswrapper[4881]: I1211 08:42:04.638644 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-novncproxy-config-data" Dec 11 08:42:04 crc kubenswrapper[4881]: I1211 08:42:04.642113 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-novncproxy-cell1-vencrypt" Dec 11 08:42:04 crc kubenswrapper[4881]: I1211 08:42:04.642751 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-novncproxy-cell1-public-svc" Dec 11 08:42:04 crc kubenswrapper[4881]: I1211 08:42:04.651696 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Dec 11 08:42:04 crc kubenswrapper[4881]: I1211 08:42:04.653367 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Dec 11 08:42:04 crc kubenswrapper[4881]: I1211 08:42:04.659855 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Dec 11 08:42:04 crc kubenswrapper[4881]: I1211 08:42:04.669985 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Dec 11 08:42:04 crc kubenswrapper[4881]: I1211 08:42:04.696540 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Dec 11 08:42:04 crc kubenswrapper[4881]: I1211 08:42:04.730442 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/41b377bb-0ea7-45df-be96-b7f03c9ff994-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"41b377bb-0ea7-45df-be96-b7f03c9ff994\") " pod="openstack/nova-scheduler-0" Dec 11 08:42:04 crc kubenswrapper[4881]: I1211 08:42:04.730497 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/bf7c31d0-4b0e-4161-9ed5-3dc62c4388b9-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"bf7c31d0-4b0e-4161-9ed5-3dc62c4388b9\") " pod="openstack/nova-cell1-novncproxy-0" Dec 11 08:42:04 crc kubenswrapper[4881]: I1211 08:42:04.730540 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r7c4s\" (UniqueName: \"kubernetes.io/projected/bf7c31d0-4b0e-4161-9ed5-3dc62c4388b9-kube-api-access-r7c4s\") pod \"nova-cell1-novncproxy-0\" (UID: \"bf7c31d0-4b0e-4161-9ed5-3dc62c4388b9\") " pod="openstack/nova-cell1-novncproxy-0" Dec 11 08:42:04 crc kubenswrapper[4881]: I1211 08:42:04.730597 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bf7c31d0-4b0e-4161-9ed5-3dc62c4388b9-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"bf7c31d0-4b0e-4161-9ed5-3dc62c4388b9\") " pod="openstack/nova-cell1-novncproxy-0" Dec 11 08:42:04 crc kubenswrapper[4881]: I1211 08:42:04.730820 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8w4vr\" (UniqueName: \"kubernetes.io/projected/41b377bb-0ea7-45df-be96-b7f03c9ff994-kube-api-access-8w4vr\") pod \"nova-scheduler-0\" (UID: \"41b377bb-0ea7-45df-be96-b7f03c9ff994\") " pod="openstack/nova-scheduler-0" Dec 11 08:42:04 crc kubenswrapper[4881]: I1211 08:42:04.730875 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bf7c31d0-4b0e-4161-9ed5-3dc62c4388b9-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"bf7c31d0-4b0e-4161-9ed5-3dc62c4388b9\") " pod="openstack/nova-cell1-novncproxy-0" Dec 11 08:42:04 crc kubenswrapper[4881]: I1211 08:42:04.730939 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/41b377bb-0ea7-45df-be96-b7f03c9ff994-config-data\") pod \"nova-scheduler-0\" (UID: \"41b377bb-0ea7-45df-be96-b7f03c9ff994\") " pod="openstack/nova-scheduler-0" Dec 11 08:42:04 crc kubenswrapper[4881]: I1211 08:42:04.731017 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/bf7c31d0-4b0e-4161-9ed5-3dc62c4388b9-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"bf7c31d0-4b0e-4161-9ed5-3dc62c4388b9\") " pod="openstack/nova-cell1-novncproxy-0" Dec 11 08:42:04 crc kubenswrapper[4881]: I1211 08:42:04.833555 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8w4vr\" (UniqueName: \"kubernetes.io/projected/41b377bb-0ea7-45df-be96-b7f03c9ff994-kube-api-access-8w4vr\") pod \"nova-scheduler-0\" (UID: \"41b377bb-0ea7-45df-be96-b7f03c9ff994\") " pod="openstack/nova-scheduler-0" Dec 11 08:42:04 crc kubenswrapper[4881]: I1211 08:42:04.833637 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bf7c31d0-4b0e-4161-9ed5-3dc62c4388b9-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"bf7c31d0-4b0e-4161-9ed5-3dc62c4388b9\") " pod="openstack/nova-cell1-novncproxy-0" Dec 11 08:42:04 crc kubenswrapper[4881]: I1211 08:42:04.833697 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/41b377bb-0ea7-45df-be96-b7f03c9ff994-config-data\") pod \"nova-scheduler-0\" (UID: \"41b377bb-0ea7-45df-be96-b7f03c9ff994\") " pod="openstack/nova-scheduler-0" Dec 11 08:42:04 crc kubenswrapper[4881]: I1211 08:42:04.833790 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/bf7c31d0-4b0e-4161-9ed5-3dc62c4388b9-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"bf7c31d0-4b0e-4161-9ed5-3dc62c4388b9\") " pod="openstack/nova-cell1-novncproxy-0" Dec 11 08:42:04 crc kubenswrapper[4881]: I1211 08:42:04.833866 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/41b377bb-0ea7-45df-be96-b7f03c9ff994-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"41b377bb-0ea7-45df-be96-b7f03c9ff994\") " pod="openstack/nova-scheduler-0" Dec 11 08:42:04 crc kubenswrapper[4881]: I1211 08:42:04.833905 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/bf7c31d0-4b0e-4161-9ed5-3dc62c4388b9-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"bf7c31d0-4b0e-4161-9ed5-3dc62c4388b9\") " pod="openstack/nova-cell1-novncproxy-0" Dec 11 08:42:04 crc kubenswrapper[4881]: I1211 08:42:04.833963 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r7c4s\" (UniqueName: \"kubernetes.io/projected/bf7c31d0-4b0e-4161-9ed5-3dc62c4388b9-kube-api-access-r7c4s\") pod \"nova-cell1-novncproxy-0\" (UID: \"bf7c31d0-4b0e-4161-9ed5-3dc62c4388b9\") " pod="openstack/nova-cell1-novncproxy-0" Dec 11 08:42:04 crc kubenswrapper[4881]: I1211 08:42:04.833991 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bf7c31d0-4b0e-4161-9ed5-3dc62c4388b9-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"bf7c31d0-4b0e-4161-9ed5-3dc62c4388b9\") " pod="openstack/nova-cell1-novncproxy-0" Dec 11 08:42:04 crc kubenswrapper[4881]: I1211 08:42:04.841164 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/41b377bb-0ea7-45df-be96-b7f03c9ff994-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"41b377bb-0ea7-45df-be96-b7f03c9ff994\") " pod="openstack/nova-scheduler-0" Dec 11 08:42:04 crc kubenswrapper[4881]: I1211 08:42:04.841865 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/bf7c31d0-4b0e-4161-9ed5-3dc62c4388b9-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"bf7c31d0-4b0e-4161-9ed5-3dc62c4388b9\") " pod="openstack/nova-cell1-novncproxy-0" Dec 11 08:42:04 crc kubenswrapper[4881]: I1211 08:42:04.842104 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bf7c31d0-4b0e-4161-9ed5-3dc62c4388b9-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"bf7c31d0-4b0e-4161-9ed5-3dc62c4388b9\") " pod="openstack/nova-cell1-novncproxy-0" Dec 11 08:42:04 crc kubenswrapper[4881]: I1211 08:42:04.844440 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/41b377bb-0ea7-45df-be96-b7f03c9ff994-config-data\") pod \"nova-scheduler-0\" (UID: \"41b377bb-0ea7-45df-be96-b7f03c9ff994\") " pod="openstack/nova-scheduler-0" Dec 11 08:42:04 crc kubenswrapper[4881]: I1211 08:42:04.845557 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/bf7c31d0-4b0e-4161-9ed5-3dc62c4388b9-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"bf7c31d0-4b0e-4161-9ed5-3dc62c4388b9\") " pod="openstack/nova-cell1-novncproxy-0" Dec 11 08:42:04 crc kubenswrapper[4881]: I1211 08:42:04.849060 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bf7c31d0-4b0e-4161-9ed5-3dc62c4388b9-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"bf7c31d0-4b0e-4161-9ed5-3dc62c4388b9\") " pod="openstack/nova-cell1-novncproxy-0" Dec 11 08:42:04 crc kubenswrapper[4881]: I1211 08:42:04.854505 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8w4vr\" (UniqueName: \"kubernetes.io/projected/41b377bb-0ea7-45df-be96-b7f03c9ff994-kube-api-access-8w4vr\") pod \"nova-scheduler-0\" (UID: \"41b377bb-0ea7-45df-be96-b7f03c9ff994\") " pod="openstack/nova-scheduler-0" Dec 11 08:42:04 crc kubenswrapper[4881]: I1211 08:42:04.857025 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r7c4s\" (UniqueName: \"kubernetes.io/projected/bf7c31d0-4b0e-4161-9ed5-3dc62c4388b9-kube-api-access-r7c4s\") pod \"nova-cell1-novncproxy-0\" (UID: \"bf7c31d0-4b0e-4161-9ed5-3dc62c4388b9\") " pod="openstack/nova-cell1-novncproxy-0" Dec 11 08:42:04 crc kubenswrapper[4881]: I1211 08:42:04.982694 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Dec 11 08:42:04 crc kubenswrapper[4881]: I1211 08:42:04.998293 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Dec 11 08:42:05 crc kubenswrapper[4881]: I1211 08:42:05.020610 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="324526c9-d17c-4dfd-b2f7-b86e9577c36c" path="/var/lib/kubelet/pods/324526c9-d17c-4dfd-b2f7-b86e9577c36c/volumes" Dec 11 08:42:05 crc kubenswrapper[4881]: I1211 08:42:05.021943 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="532f11a0-2d04-48b6-87a8-b27e99195ac9" path="/var/lib/kubelet/pods/532f11a0-2d04-48b6-87a8-b27e99195ac9/volumes" Dec 11 08:42:05 crc kubenswrapper[4881]: I1211 08:42:05.022720 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e688f7d9-d215-48aa-8092-e176d3437f09" path="/var/lib/kubelet/pods/e688f7d9-d215-48aa-8092-e176d3437f09/volumes" Dec 11 08:42:05 crc kubenswrapper[4881]: I1211 08:42:05.182868 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-79b5d74c8c-gr9md" event={"ID":"f2376e63-11dc-4d35-be7c-eff1af8f8534","Type":"ContainerStarted","Data":"77ede655a7ec5d1d6a98f2caafcbf372c4edfcb1285571928b8721f6f0923062"} Dec 11 08:42:05 crc kubenswrapper[4881]: I1211 08:42:05.561403 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Dec 11 08:42:05 crc kubenswrapper[4881]: I1211 08:42:05.641897 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Dec 11 08:42:05 crc kubenswrapper[4881]: I1211 08:42:05.911159 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Dec 11 08:42:05 crc kubenswrapper[4881]: I1211 08:42:05.911695 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="88610cc6-c8c8-47f2-8425-e5714d01f7e2" containerName="nova-api-log" containerID="cri-o://447f6eddf46f057e9c04d1f3c4088a6a0ab0ac83f446c22b24457f654a7e5877" gracePeriod=30 Dec 11 08:42:05 crc kubenswrapper[4881]: I1211 08:42:05.911783 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="88610cc6-c8c8-47f2-8425-e5714d01f7e2" containerName="nova-api-api" containerID="cri-o://5547df66d0502b39203818a66c5451b5ab55b55be882862677c10c5d499b1947" gracePeriod=30 Dec 11 08:42:06 crc kubenswrapper[4881]: I1211 08:42:06.197380 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"41b377bb-0ea7-45df-be96-b7f03c9ff994","Type":"ContainerStarted","Data":"415a1f43316e7762427f0ceaf8edc187468dfefd1369de758d956ac0c00f19ff"} Dec 11 08:42:06 crc kubenswrapper[4881]: I1211 08:42:06.199394 4881 generic.go:334] "Generic (PLEG): container finished" podID="f2376e63-11dc-4d35-be7c-eff1af8f8534" containerID="7b28f08e70b3b16c1b082a8725e98fb5b980250d5cb2fa5b93670a6fd2ba9b15" exitCode=0 Dec 11 08:42:06 crc kubenswrapper[4881]: I1211 08:42:06.199525 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-79b5d74c8c-gr9md" event={"ID":"f2376e63-11dc-4d35-be7c-eff1af8f8534","Type":"ContainerDied","Data":"7b28f08e70b3b16c1b082a8725e98fb5b980250d5cb2fa5b93670a6fd2ba9b15"} Dec 11 08:42:06 crc kubenswrapper[4881]: I1211 08:42:06.203031 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"bf7c31d0-4b0e-4161-9ed5-3dc62c4388b9","Type":"ContainerStarted","Data":"f88b7d83f0ebd80e699596aa3cb7db9a0488f45cc27743cfff7a33b9dcf4a84a"} Dec 11 08:42:07 crc kubenswrapper[4881]: I1211 08:42:07.228671 4881 generic.go:334] "Generic (PLEG): container finished" podID="88610cc6-c8c8-47f2-8425-e5714d01f7e2" containerID="447f6eddf46f057e9c04d1f3c4088a6a0ab0ac83f446c22b24457f654a7e5877" exitCode=143 Dec 11 08:42:07 crc kubenswrapper[4881]: I1211 08:42:07.228889 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"88610cc6-c8c8-47f2-8425-e5714d01f7e2","Type":"ContainerDied","Data":"447f6eddf46f057e9c04d1f3c4088a6a0ab0ac83f446c22b24457f654a7e5877"} Dec 11 08:42:07 crc kubenswrapper[4881]: I1211 08:42:07.244671 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"bf7c31d0-4b0e-4161-9ed5-3dc62c4388b9","Type":"ContainerStarted","Data":"57c428857cc15a0a424476ec4138890216ec546c81a1451499a90790d94f482c"} Dec 11 08:42:07 crc kubenswrapper[4881]: I1211 08:42:07.246897 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"41b377bb-0ea7-45df-be96-b7f03c9ff994","Type":"ContainerStarted","Data":"e6dabacf3bbbc2a7fb8d5aaae341f17129a2707c1d2c810e62a19bc09c0885ef"} Dec 11 08:42:07 crc kubenswrapper[4881]: I1211 08:42:07.258671 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-79b5d74c8c-gr9md" event={"ID":"f2376e63-11dc-4d35-be7c-eff1af8f8534","Type":"ContainerStarted","Data":"c9cdc438b161a719a0ae7bd63f2b369f4154f6de71f70fb5542982d0dac402e1"} Dec 11 08:42:07 crc kubenswrapper[4881]: I1211 08:42:07.280899 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=3.280876963 podStartE2EDuration="3.280876963s" podCreationTimestamp="2025-12-11 08:42:04 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 08:42:07.266087181 +0000 UTC m=+1575.643455878" watchObservedRunningTime="2025-12-11 08:42:07.280876963 +0000 UTC m=+1575.658245660" Dec 11 08:42:07 crc kubenswrapper[4881]: I1211 08:42:07.534879 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Dec 11 08:42:07 crc kubenswrapper[4881]: I1211 08:42:07.536251 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="1e223962-c0ce-4d1e-9db2-e0cd0dc70302" containerName="sg-core" containerID="cri-o://be33c703178f993b71dd30f771cd0f7d0cad4c706bbbefda1240186a83a72a92" gracePeriod=30 Dec 11 08:42:07 crc kubenswrapper[4881]: I1211 08:42:07.536294 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="1e223962-c0ce-4d1e-9db2-e0cd0dc70302" containerName="ceilometer-notification-agent" containerID="cri-o://8ba1420e2d2939d49603eda3c4c537666c793b4ee222c1bb4743410e6d7fcc39" gracePeriod=30 Dec 11 08:42:07 crc kubenswrapper[4881]: I1211 08:42:07.536240 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="1e223962-c0ce-4d1e-9db2-e0cd0dc70302" containerName="proxy-httpd" containerID="cri-o://72008dd43f0222dde14415321800759fff9d3e325cb59e9576b9cfc2182224cc" gracePeriod=30 Dec 11 08:42:07 crc kubenswrapper[4881]: I1211 08:42:07.536107 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="1e223962-c0ce-4d1e-9db2-e0cd0dc70302" containerName="ceilometer-central-agent" containerID="cri-o://8410ffae70b3e089d18fedbde8fab2cf53abf626438218ed6bc170d633d2c921" gracePeriod=30 Dec 11 08:42:08 crc kubenswrapper[4881]: I1211 08:42:08.276148 4881 generic.go:334] "Generic (PLEG): container finished" podID="1e223962-c0ce-4d1e-9db2-e0cd0dc70302" containerID="72008dd43f0222dde14415321800759fff9d3e325cb59e9576b9cfc2182224cc" exitCode=0 Dec 11 08:42:08 crc kubenswrapper[4881]: I1211 08:42:08.276181 4881 generic.go:334] "Generic (PLEG): container finished" podID="1e223962-c0ce-4d1e-9db2-e0cd0dc70302" containerID="be33c703178f993b71dd30f771cd0f7d0cad4c706bbbefda1240186a83a72a92" exitCode=2 Dec 11 08:42:08 crc kubenswrapper[4881]: I1211 08:42:08.277833 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1e223962-c0ce-4d1e-9db2-e0cd0dc70302","Type":"ContainerDied","Data":"72008dd43f0222dde14415321800759fff9d3e325cb59e9576b9cfc2182224cc"} Dec 11 08:42:08 crc kubenswrapper[4881]: I1211 08:42:08.277869 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1e223962-c0ce-4d1e-9db2-e0cd0dc70302","Type":"ContainerDied","Data":"be33c703178f993b71dd30f771cd0f7d0cad4c706bbbefda1240186a83a72a92"} Dec 11 08:42:08 crc kubenswrapper[4881]: I1211 08:42:08.278102 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-79b5d74c8c-gr9md" Dec 11 08:42:08 crc kubenswrapper[4881]: I1211 08:42:08.303936 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-novncproxy-0" podStartSLOduration=4.3039175069999995 podStartE2EDuration="4.303917507s" podCreationTimestamp="2025-12-11 08:42:04 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 08:42:08.292677001 +0000 UTC m=+1576.670045718" watchObservedRunningTime="2025-12-11 08:42:08.303917507 +0000 UTC m=+1576.681286204" Dec 11 08:42:08 crc kubenswrapper[4881]: I1211 08:42:08.323078 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-79b5d74c8c-gr9md" podStartSLOduration=6.323056189 podStartE2EDuration="6.323056189s" podCreationTimestamp="2025-12-11 08:42:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 08:42:08.313489043 +0000 UTC m=+1576.690857750" watchObservedRunningTime="2025-12-11 08:42:08.323056189 +0000 UTC m=+1576.700424886" Dec 11 08:42:09 crc kubenswrapper[4881]: I1211 08:42:09.059231 4881 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-api-0" podUID="88610cc6-c8c8-47f2-8425-e5714d01f7e2" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.0.247:8774/\": read tcp 10.217.0.2:41750->10.217.0.247:8774: read: connection reset by peer" Dec 11 08:42:09 crc kubenswrapper[4881]: I1211 08:42:09.059399 4881 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-api-0" podUID="88610cc6-c8c8-47f2-8425-e5714d01f7e2" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.0.247:8774/\": read tcp 10.217.0.2:41734->10.217.0.247:8774: read: connection reset by peer" Dec 11 08:42:09 crc kubenswrapper[4881]: I1211 08:42:09.302166 4881 generic.go:334] "Generic (PLEG): container finished" podID="88610cc6-c8c8-47f2-8425-e5714d01f7e2" containerID="5547df66d0502b39203818a66c5451b5ab55b55be882862677c10c5d499b1947" exitCode=0 Dec 11 08:42:09 crc kubenswrapper[4881]: I1211 08:42:09.302253 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"88610cc6-c8c8-47f2-8425-e5714d01f7e2","Type":"ContainerDied","Data":"5547df66d0502b39203818a66c5451b5ab55b55be882862677c10c5d499b1947"} Dec 11 08:42:09 crc kubenswrapper[4881]: I1211 08:42:09.319691 4881 generic.go:334] "Generic (PLEG): container finished" podID="1e223962-c0ce-4d1e-9db2-e0cd0dc70302" containerID="8ba1420e2d2939d49603eda3c4c537666c793b4ee222c1bb4743410e6d7fcc39" exitCode=0 Dec 11 08:42:09 crc kubenswrapper[4881]: I1211 08:42:09.319758 4881 generic.go:334] "Generic (PLEG): container finished" podID="1e223962-c0ce-4d1e-9db2-e0cd0dc70302" containerID="8410ffae70b3e089d18fedbde8fab2cf53abf626438218ed6bc170d633d2c921" exitCode=0 Dec 11 08:42:09 crc kubenswrapper[4881]: I1211 08:42:09.320187 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1e223962-c0ce-4d1e-9db2-e0cd0dc70302","Type":"ContainerDied","Data":"8ba1420e2d2939d49603eda3c4c537666c793b4ee222c1bb4743410e6d7fcc39"} Dec 11 08:42:09 crc kubenswrapper[4881]: I1211 08:42:09.320239 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1e223962-c0ce-4d1e-9db2-e0cd0dc70302","Type":"ContainerDied","Data":"8410ffae70b3e089d18fedbde8fab2cf53abf626438218ed6bc170d633d2c921"} Dec 11 08:42:09 crc kubenswrapper[4881]: I1211 08:42:09.356091 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-conductor-0" Dec 11 08:42:09 crc kubenswrapper[4881]: I1211 08:42:09.606175 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 11 08:42:09 crc kubenswrapper[4881]: I1211 08:42:09.667216 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1e223962-c0ce-4d1e-9db2-e0cd0dc70302-run-httpd\") pod \"1e223962-c0ce-4d1e-9db2-e0cd0dc70302\" (UID: \"1e223962-c0ce-4d1e-9db2-e0cd0dc70302\") " Dec 11 08:42:09 crc kubenswrapper[4881]: I1211 08:42:09.667380 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/1e223962-c0ce-4d1e-9db2-e0cd0dc70302-sg-core-conf-yaml\") pod \"1e223962-c0ce-4d1e-9db2-e0cd0dc70302\" (UID: \"1e223962-c0ce-4d1e-9db2-e0cd0dc70302\") " Dec 11 08:42:09 crc kubenswrapper[4881]: I1211 08:42:09.667420 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gcqgv\" (UniqueName: \"kubernetes.io/projected/1e223962-c0ce-4d1e-9db2-e0cd0dc70302-kube-api-access-gcqgv\") pod \"1e223962-c0ce-4d1e-9db2-e0cd0dc70302\" (UID: \"1e223962-c0ce-4d1e-9db2-e0cd0dc70302\") " Dec 11 08:42:09 crc kubenswrapper[4881]: I1211 08:42:09.667506 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1e223962-c0ce-4d1e-9db2-e0cd0dc70302-scripts\") pod \"1e223962-c0ce-4d1e-9db2-e0cd0dc70302\" (UID: \"1e223962-c0ce-4d1e-9db2-e0cd0dc70302\") " Dec 11 08:42:09 crc kubenswrapper[4881]: I1211 08:42:09.667531 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1e223962-c0ce-4d1e-9db2-e0cd0dc70302-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "1e223962-c0ce-4d1e-9db2-e0cd0dc70302" (UID: "1e223962-c0ce-4d1e-9db2-e0cd0dc70302"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 08:42:09 crc kubenswrapper[4881]: I1211 08:42:09.667586 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1e223962-c0ce-4d1e-9db2-e0cd0dc70302-combined-ca-bundle\") pod \"1e223962-c0ce-4d1e-9db2-e0cd0dc70302\" (UID: \"1e223962-c0ce-4d1e-9db2-e0cd0dc70302\") " Dec 11 08:42:09 crc kubenswrapper[4881]: I1211 08:42:09.667607 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/1e223962-c0ce-4d1e-9db2-e0cd0dc70302-ceilometer-tls-certs\") pod \"1e223962-c0ce-4d1e-9db2-e0cd0dc70302\" (UID: \"1e223962-c0ce-4d1e-9db2-e0cd0dc70302\") " Dec 11 08:42:09 crc kubenswrapper[4881]: I1211 08:42:09.667656 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1e223962-c0ce-4d1e-9db2-e0cd0dc70302-log-httpd\") pod \"1e223962-c0ce-4d1e-9db2-e0cd0dc70302\" (UID: \"1e223962-c0ce-4d1e-9db2-e0cd0dc70302\") " Dec 11 08:42:09 crc kubenswrapper[4881]: I1211 08:42:09.667688 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1e223962-c0ce-4d1e-9db2-e0cd0dc70302-config-data\") pod \"1e223962-c0ce-4d1e-9db2-e0cd0dc70302\" (UID: \"1e223962-c0ce-4d1e-9db2-e0cd0dc70302\") " Dec 11 08:42:09 crc kubenswrapper[4881]: I1211 08:42:09.668171 4881 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1e223962-c0ce-4d1e-9db2-e0cd0dc70302-run-httpd\") on node \"crc\" DevicePath \"\"" Dec 11 08:42:09 crc kubenswrapper[4881]: I1211 08:42:09.671767 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1e223962-c0ce-4d1e-9db2-e0cd0dc70302-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "1e223962-c0ce-4d1e-9db2-e0cd0dc70302" (UID: "1e223962-c0ce-4d1e-9db2-e0cd0dc70302"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 08:42:09 crc kubenswrapper[4881]: I1211 08:42:09.686660 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1e223962-c0ce-4d1e-9db2-e0cd0dc70302-scripts" (OuterVolumeSpecName: "scripts") pod "1e223962-c0ce-4d1e-9db2-e0cd0dc70302" (UID: "1e223962-c0ce-4d1e-9db2-e0cd0dc70302"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:42:09 crc kubenswrapper[4881]: I1211 08:42:09.686902 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1e223962-c0ce-4d1e-9db2-e0cd0dc70302-kube-api-access-gcqgv" (OuterVolumeSpecName: "kube-api-access-gcqgv") pod "1e223962-c0ce-4d1e-9db2-e0cd0dc70302" (UID: "1e223962-c0ce-4d1e-9db2-e0cd0dc70302"). InnerVolumeSpecName "kube-api-access-gcqgv". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:42:09 crc kubenswrapper[4881]: I1211 08:42:09.722309 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1e223962-c0ce-4d1e-9db2-e0cd0dc70302-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "1e223962-c0ce-4d1e-9db2-e0cd0dc70302" (UID: "1e223962-c0ce-4d1e-9db2-e0cd0dc70302"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:42:09 crc kubenswrapper[4881]: I1211 08:42:09.756904 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1e223962-c0ce-4d1e-9db2-e0cd0dc70302-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "1e223962-c0ce-4d1e-9db2-e0cd0dc70302" (UID: "1e223962-c0ce-4d1e-9db2-e0cd0dc70302"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:42:09 crc kubenswrapper[4881]: I1211 08:42:09.770521 4881 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/1e223962-c0ce-4d1e-9db2-e0cd0dc70302-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Dec 11 08:42:09 crc kubenswrapper[4881]: I1211 08:42:09.770558 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gcqgv\" (UniqueName: \"kubernetes.io/projected/1e223962-c0ce-4d1e-9db2-e0cd0dc70302-kube-api-access-gcqgv\") on node \"crc\" DevicePath \"\"" Dec 11 08:42:09 crc kubenswrapper[4881]: I1211 08:42:09.770575 4881 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1e223962-c0ce-4d1e-9db2-e0cd0dc70302-scripts\") on node \"crc\" DevicePath \"\"" Dec 11 08:42:09 crc kubenswrapper[4881]: I1211 08:42:09.770591 4881 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/1e223962-c0ce-4d1e-9db2-e0cd0dc70302-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Dec 11 08:42:09 crc kubenswrapper[4881]: I1211 08:42:09.770606 4881 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1e223962-c0ce-4d1e-9db2-e0cd0dc70302-log-httpd\") on node \"crc\" DevicePath \"\"" Dec 11 08:42:09 crc kubenswrapper[4881]: I1211 08:42:09.790430 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1e223962-c0ce-4d1e-9db2-e0cd0dc70302-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "1e223962-c0ce-4d1e-9db2-e0cd0dc70302" (UID: "1e223962-c0ce-4d1e-9db2-e0cd0dc70302"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:42:09 crc kubenswrapper[4881]: I1211 08:42:09.791958 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Dec 11 08:42:09 crc kubenswrapper[4881]: I1211 08:42:09.872487 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/88610cc6-c8c8-47f2-8425-e5714d01f7e2-combined-ca-bundle\") pod \"88610cc6-c8c8-47f2-8425-e5714d01f7e2\" (UID: \"88610cc6-c8c8-47f2-8425-e5714d01f7e2\") " Dec 11 08:42:09 crc kubenswrapper[4881]: I1211 08:42:09.872635 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fbrtf\" (UniqueName: \"kubernetes.io/projected/88610cc6-c8c8-47f2-8425-e5714d01f7e2-kube-api-access-fbrtf\") pod \"88610cc6-c8c8-47f2-8425-e5714d01f7e2\" (UID: \"88610cc6-c8c8-47f2-8425-e5714d01f7e2\") " Dec 11 08:42:09 crc kubenswrapper[4881]: I1211 08:42:09.872789 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/88610cc6-c8c8-47f2-8425-e5714d01f7e2-config-data\") pod \"88610cc6-c8c8-47f2-8425-e5714d01f7e2\" (UID: \"88610cc6-c8c8-47f2-8425-e5714d01f7e2\") " Dec 11 08:42:09 crc kubenswrapper[4881]: I1211 08:42:09.872943 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/88610cc6-c8c8-47f2-8425-e5714d01f7e2-logs\") pod \"88610cc6-c8c8-47f2-8425-e5714d01f7e2\" (UID: \"88610cc6-c8c8-47f2-8425-e5714d01f7e2\") " Dec 11 08:42:09 crc kubenswrapper[4881]: I1211 08:42:09.873614 4881 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1e223962-c0ce-4d1e-9db2-e0cd0dc70302-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 11 08:42:09 crc kubenswrapper[4881]: I1211 08:42:09.874789 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/88610cc6-c8c8-47f2-8425-e5714d01f7e2-logs" (OuterVolumeSpecName: "logs") pod "88610cc6-c8c8-47f2-8425-e5714d01f7e2" (UID: "88610cc6-c8c8-47f2-8425-e5714d01f7e2"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 08:42:09 crc kubenswrapper[4881]: I1211 08:42:09.875761 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1e223962-c0ce-4d1e-9db2-e0cd0dc70302-config-data" (OuterVolumeSpecName: "config-data") pod "1e223962-c0ce-4d1e-9db2-e0cd0dc70302" (UID: "1e223962-c0ce-4d1e-9db2-e0cd0dc70302"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:42:09 crc kubenswrapper[4881]: I1211 08:42:09.879622 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/88610cc6-c8c8-47f2-8425-e5714d01f7e2-kube-api-access-fbrtf" (OuterVolumeSpecName: "kube-api-access-fbrtf") pod "88610cc6-c8c8-47f2-8425-e5714d01f7e2" (UID: "88610cc6-c8c8-47f2-8425-e5714d01f7e2"). InnerVolumeSpecName "kube-api-access-fbrtf". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:42:09 crc kubenswrapper[4881]: I1211 08:42:09.904564 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/88610cc6-c8c8-47f2-8425-e5714d01f7e2-config-data" (OuterVolumeSpecName: "config-data") pod "88610cc6-c8c8-47f2-8425-e5714d01f7e2" (UID: "88610cc6-c8c8-47f2-8425-e5714d01f7e2"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:42:09 crc kubenswrapper[4881]: I1211 08:42:09.917903 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/88610cc6-c8c8-47f2-8425-e5714d01f7e2-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "88610cc6-c8c8-47f2-8425-e5714d01f7e2" (UID: "88610cc6-c8c8-47f2-8425-e5714d01f7e2"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:42:09 crc kubenswrapper[4881]: I1211 08:42:09.975910 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fbrtf\" (UniqueName: \"kubernetes.io/projected/88610cc6-c8c8-47f2-8425-e5714d01f7e2-kube-api-access-fbrtf\") on node \"crc\" DevicePath \"\"" Dec 11 08:42:09 crc kubenswrapper[4881]: I1211 08:42:09.975961 4881 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/88610cc6-c8c8-47f2-8425-e5714d01f7e2-config-data\") on node \"crc\" DevicePath \"\"" Dec 11 08:42:09 crc kubenswrapper[4881]: I1211 08:42:09.975976 4881 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1e223962-c0ce-4d1e-9db2-e0cd0dc70302-config-data\") on node \"crc\" DevicePath \"\"" Dec 11 08:42:09 crc kubenswrapper[4881]: I1211 08:42:09.975986 4881 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/88610cc6-c8c8-47f2-8425-e5714d01f7e2-logs\") on node \"crc\" DevicePath \"\"" Dec 11 08:42:09 crc kubenswrapper[4881]: I1211 08:42:09.975999 4881 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/88610cc6-c8c8-47f2-8425-e5714d01f7e2-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 11 08:42:09 crc kubenswrapper[4881]: I1211 08:42:09.983607 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-novncproxy-0" Dec 11 08:42:10 crc kubenswrapper[4881]: I1211 08:42:10.004804 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Dec 11 08:42:10 crc kubenswrapper[4881]: I1211 08:42:10.331231 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Dec 11 08:42:10 crc kubenswrapper[4881]: I1211 08:42:10.331225 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"88610cc6-c8c8-47f2-8425-e5714d01f7e2","Type":"ContainerDied","Data":"70ce343a0acdb45a979305c0074ec869eff0d47c50e1bc2c512c4c67777f1adb"} Dec 11 08:42:10 crc kubenswrapper[4881]: I1211 08:42:10.331390 4881 scope.go:117] "RemoveContainer" containerID="5547df66d0502b39203818a66c5451b5ab55b55be882862677c10c5d499b1947" Dec 11 08:42:10 crc kubenswrapper[4881]: I1211 08:42:10.336039 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1e223962-c0ce-4d1e-9db2-e0cd0dc70302","Type":"ContainerDied","Data":"f5990963d4fcf23f21125d59372f5889d4b3fc5f3aba9c025a9b632bfb6c4b20"} Dec 11 08:42:10 crc kubenswrapper[4881]: I1211 08:42:10.336081 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 11 08:42:10 crc kubenswrapper[4881]: I1211 08:42:10.384871 4881 scope.go:117] "RemoveContainer" containerID="447f6eddf46f057e9c04d1f3c4088a6a0ab0ac83f446c22b24457f654a7e5877" Dec 11 08:42:10 crc kubenswrapper[4881]: I1211 08:42:10.420589 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Dec 11 08:42:10 crc kubenswrapper[4881]: I1211 08:42:10.456562 4881 scope.go:117] "RemoveContainer" containerID="72008dd43f0222dde14415321800759fff9d3e325cb59e9576b9cfc2182224cc" Dec 11 08:42:10 crc kubenswrapper[4881]: I1211 08:42:10.484089 4881 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Dec 11 08:42:10 crc kubenswrapper[4881]: I1211 08:42:10.490585 4881 scope.go:117] "RemoveContainer" containerID="be33c703178f993b71dd30f771cd0f7d0cad4c706bbbefda1240186a83a72a92" Dec 11 08:42:10 crc kubenswrapper[4881]: I1211 08:42:10.501487 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Dec 11 08:42:10 crc kubenswrapper[4881]: I1211 08:42:10.517624 4881 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Dec 11 08:42:10 crc kubenswrapper[4881]: I1211 08:42:10.517885 4881 scope.go:117] "RemoveContainer" containerID="8ba1420e2d2939d49603eda3c4c537666c793b4ee222c1bb4743410e6d7fcc39" Dec 11 08:42:10 crc kubenswrapper[4881]: I1211 08:42:10.536764 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Dec 11 08:42:10 crc kubenswrapper[4881]: E1211 08:42:10.537445 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1e223962-c0ce-4d1e-9db2-e0cd0dc70302" containerName="ceilometer-notification-agent" Dec 11 08:42:10 crc kubenswrapper[4881]: I1211 08:42:10.537460 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="1e223962-c0ce-4d1e-9db2-e0cd0dc70302" containerName="ceilometer-notification-agent" Dec 11 08:42:10 crc kubenswrapper[4881]: E1211 08:42:10.537493 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1e223962-c0ce-4d1e-9db2-e0cd0dc70302" containerName="ceilometer-central-agent" Dec 11 08:42:10 crc kubenswrapper[4881]: I1211 08:42:10.537500 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="1e223962-c0ce-4d1e-9db2-e0cd0dc70302" containerName="ceilometer-central-agent" Dec 11 08:42:10 crc kubenswrapper[4881]: E1211 08:42:10.537545 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="88610cc6-c8c8-47f2-8425-e5714d01f7e2" containerName="nova-api-log" Dec 11 08:42:10 crc kubenswrapper[4881]: I1211 08:42:10.537553 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="88610cc6-c8c8-47f2-8425-e5714d01f7e2" containerName="nova-api-log" Dec 11 08:42:10 crc kubenswrapper[4881]: E1211 08:42:10.537566 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="88610cc6-c8c8-47f2-8425-e5714d01f7e2" containerName="nova-api-api" Dec 11 08:42:10 crc kubenswrapper[4881]: I1211 08:42:10.537573 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="88610cc6-c8c8-47f2-8425-e5714d01f7e2" containerName="nova-api-api" Dec 11 08:42:10 crc kubenswrapper[4881]: E1211 08:42:10.537592 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1e223962-c0ce-4d1e-9db2-e0cd0dc70302" containerName="sg-core" Dec 11 08:42:10 crc kubenswrapper[4881]: I1211 08:42:10.537599 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="1e223962-c0ce-4d1e-9db2-e0cd0dc70302" containerName="sg-core" Dec 11 08:42:10 crc kubenswrapper[4881]: E1211 08:42:10.537616 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1e223962-c0ce-4d1e-9db2-e0cd0dc70302" containerName="proxy-httpd" Dec 11 08:42:10 crc kubenswrapper[4881]: I1211 08:42:10.537625 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="1e223962-c0ce-4d1e-9db2-e0cd0dc70302" containerName="proxy-httpd" Dec 11 08:42:10 crc kubenswrapper[4881]: I1211 08:42:10.539573 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="1e223962-c0ce-4d1e-9db2-e0cd0dc70302" containerName="proxy-httpd" Dec 11 08:42:10 crc kubenswrapper[4881]: I1211 08:42:10.539612 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="1e223962-c0ce-4d1e-9db2-e0cd0dc70302" containerName="sg-core" Dec 11 08:42:10 crc kubenswrapper[4881]: I1211 08:42:10.539644 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="1e223962-c0ce-4d1e-9db2-e0cd0dc70302" containerName="ceilometer-notification-agent" Dec 11 08:42:10 crc kubenswrapper[4881]: I1211 08:42:10.539656 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="1e223962-c0ce-4d1e-9db2-e0cd0dc70302" containerName="ceilometer-central-agent" Dec 11 08:42:10 crc kubenswrapper[4881]: I1211 08:42:10.539676 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="88610cc6-c8c8-47f2-8425-e5714d01f7e2" containerName="nova-api-api" Dec 11 08:42:10 crc kubenswrapper[4881]: I1211 08:42:10.539691 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="88610cc6-c8c8-47f2-8425-e5714d01f7e2" containerName="nova-api-log" Dec 11 08:42:10 crc kubenswrapper[4881]: I1211 08:42:10.541699 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Dec 11 08:42:10 crc kubenswrapper[4881]: I1211 08:42:10.544046 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Dec 11 08:42:10 crc kubenswrapper[4881]: I1211 08:42:10.544387 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-internal-svc" Dec 11 08:42:10 crc kubenswrapper[4881]: I1211 08:42:10.544563 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-public-svc" Dec 11 08:42:10 crc kubenswrapper[4881]: I1211 08:42:10.547670 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Dec 11 08:42:10 crc kubenswrapper[4881]: I1211 08:42:10.551388 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 11 08:42:10 crc kubenswrapper[4881]: I1211 08:42:10.554003 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Dec 11 08:42:10 crc kubenswrapper[4881]: I1211 08:42:10.554151 4881 scope.go:117] "RemoveContainer" containerID="8410ffae70b3e089d18fedbde8fab2cf53abf626438218ed6bc170d633d2c921" Dec 11 08:42:10 crc kubenswrapper[4881]: I1211 08:42:10.556667 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Dec 11 08:42:10 crc kubenswrapper[4881]: I1211 08:42:10.556902 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Dec 11 08:42:10 crc kubenswrapper[4881]: I1211 08:42:10.561612 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Dec 11 08:42:10 crc kubenswrapper[4881]: I1211 08:42:10.571514 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Dec 11 08:42:10 crc kubenswrapper[4881]: I1211 08:42:10.607234 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ae7bac1b-08d2-4ab8-b7b1-d5f0152f844c-config-data\") pod \"nova-api-0\" (UID: \"ae7bac1b-08d2-4ab8-b7b1-d5f0152f844c\") " pod="openstack/nova-api-0" Dec 11 08:42:10 crc kubenswrapper[4881]: I1211 08:42:10.607429 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/ae7bac1b-08d2-4ab8-b7b1-d5f0152f844c-internal-tls-certs\") pod \"nova-api-0\" (UID: \"ae7bac1b-08d2-4ab8-b7b1-d5f0152f844c\") " pod="openstack/nova-api-0" Dec 11 08:42:10 crc kubenswrapper[4881]: I1211 08:42:10.607477 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mmh96\" (UniqueName: \"kubernetes.io/projected/ae7bac1b-08d2-4ab8-b7b1-d5f0152f844c-kube-api-access-mmh96\") pod \"nova-api-0\" (UID: \"ae7bac1b-08d2-4ab8-b7b1-d5f0152f844c\") " pod="openstack/nova-api-0" Dec 11 08:42:10 crc kubenswrapper[4881]: I1211 08:42:10.607541 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ae7bac1b-08d2-4ab8-b7b1-d5f0152f844c-logs\") pod \"nova-api-0\" (UID: \"ae7bac1b-08d2-4ab8-b7b1-d5f0152f844c\") " pod="openstack/nova-api-0" Dec 11 08:42:10 crc kubenswrapper[4881]: I1211 08:42:10.607574 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ae7bac1b-08d2-4ab8-b7b1-d5f0152f844c-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"ae7bac1b-08d2-4ab8-b7b1-d5f0152f844c\") " pod="openstack/nova-api-0" Dec 11 08:42:10 crc kubenswrapper[4881]: I1211 08:42:10.607601 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/ae7bac1b-08d2-4ab8-b7b1-d5f0152f844c-public-tls-certs\") pod \"nova-api-0\" (UID: \"ae7bac1b-08d2-4ab8-b7b1-d5f0152f844c\") " pod="openstack/nova-api-0" Dec 11 08:42:10 crc kubenswrapper[4881]: I1211 08:42:10.709601 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/1cb637c3-35ca-4a93-bfb6-a7e67fec14fa-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"1cb637c3-35ca-4a93-bfb6-a7e67fec14fa\") " pod="openstack/ceilometer-0" Dec 11 08:42:10 crc kubenswrapper[4881]: I1211 08:42:10.709655 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1cb637c3-35ca-4a93-bfb6-a7e67fec14fa-scripts\") pod \"ceilometer-0\" (UID: \"1cb637c3-35ca-4a93-bfb6-a7e67fec14fa\") " pod="openstack/ceilometer-0" Dec 11 08:42:10 crc kubenswrapper[4881]: I1211 08:42:10.709713 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ae7bac1b-08d2-4ab8-b7b1-d5f0152f844c-config-data\") pod \"nova-api-0\" (UID: \"ae7bac1b-08d2-4ab8-b7b1-d5f0152f844c\") " pod="openstack/nova-api-0" Dec 11 08:42:10 crc kubenswrapper[4881]: I1211 08:42:10.709811 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1cb637c3-35ca-4a93-bfb6-a7e67fec14fa-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"1cb637c3-35ca-4a93-bfb6-a7e67fec14fa\") " pod="openstack/ceilometer-0" Dec 11 08:42:10 crc kubenswrapper[4881]: I1211 08:42:10.709831 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1cb637c3-35ca-4a93-bfb6-a7e67fec14fa-config-data\") pod \"ceilometer-0\" (UID: \"1cb637c3-35ca-4a93-bfb6-a7e67fec14fa\") " pod="openstack/ceilometer-0" Dec 11 08:42:10 crc kubenswrapper[4881]: I1211 08:42:10.709911 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/ae7bac1b-08d2-4ab8-b7b1-d5f0152f844c-internal-tls-certs\") pod \"nova-api-0\" (UID: \"ae7bac1b-08d2-4ab8-b7b1-d5f0152f844c\") " pod="openstack/nova-api-0" Dec 11 08:42:10 crc kubenswrapper[4881]: I1211 08:42:10.709958 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mmh96\" (UniqueName: \"kubernetes.io/projected/ae7bac1b-08d2-4ab8-b7b1-d5f0152f844c-kube-api-access-mmh96\") pod \"nova-api-0\" (UID: \"ae7bac1b-08d2-4ab8-b7b1-d5f0152f844c\") " pod="openstack/nova-api-0" Dec 11 08:42:10 crc kubenswrapper[4881]: I1211 08:42:10.710011 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1cb637c3-35ca-4a93-bfb6-a7e67fec14fa-run-httpd\") pod \"ceilometer-0\" (UID: \"1cb637c3-35ca-4a93-bfb6-a7e67fec14fa\") " pod="openstack/ceilometer-0" Dec 11 08:42:10 crc kubenswrapper[4881]: I1211 08:42:10.710052 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ae7bac1b-08d2-4ab8-b7b1-d5f0152f844c-logs\") pod \"nova-api-0\" (UID: \"ae7bac1b-08d2-4ab8-b7b1-d5f0152f844c\") " pod="openstack/nova-api-0" Dec 11 08:42:10 crc kubenswrapper[4881]: I1211 08:42:10.710081 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ae7bac1b-08d2-4ab8-b7b1-d5f0152f844c-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"ae7bac1b-08d2-4ab8-b7b1-d5f0152f844c\") " pod="openstack/nova-api-0" Dec 11 08:42:10 crc kubenswrapper[4881]: I1211 08:42:10.710098 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1cb637c3-35ca-4a93-bfb6-a7e67fec14fa-log-httpd\") pod \"ceilometer-0\" (UID: \"1cb637c3-35ca-4a93-bfb6-a7e67fec14fa\") " pod="openstack/ceilometer-0" Dec 11 08:42:10 crc kubenswrapper[4881]: I1211 08:42:10.710123 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/ae7bac1b-08d2-4ab8-b7b1-d5f0152f844c-public-tls-certs\") pod \"nova-api-0\" (UID: \"ae7bac1b-08d2-4ab8-b7b1-d5f0152f844c\") " pod="openstack/nova-api-0" Dec 11 08:42:10 crc kubenswrapper[4881]: I1211 08:42:10.710143 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7cwbb\" (UniqueName: \"kubernetes.io/projected/1cb637c3-35ca-4a93-bfb6-a7e67fec14fa-kube-api-access-7cwbb\") pod \"ceilometer-0\" (UID: \"1cb637c3-35ca-4a93-bfb6-a7e67fec14fa\") " pod="openstack/ceilometer-0" Dec 11 08:42:10 crc kubenswrapper[4881]: I1211 08:42:10.710165 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/1cb637c3-35ca-4a93-bfb6-a7e67fec14fa-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"1cb637c3-35ca-4a93-bfb6-a7e67fec14fa\") " pod="openstack/ceilometer-0" Dec 11 08:42:10 crc kubenswrapper[4881]: I1211 08:42:10.711590 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ae7bac1b-08d2-4ab8-b7b1-d5f0152f844c-logs\") pod \"nova-api-0\" (UID: \"ae7bac1b-08d2-4ab8-b7b1-d5f0152f844c\") " pod="openstack/nova-api-0" Dec 11 08:42:10 crc kubenswrapper[4881]: I1211 08:42:10.715949 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ae7bac1b-08d2-4ab8-b7b1-d5f0152f844c-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"ae7bac1b-08d2-4ab8-b7b1-d5f0152f844c\") " pod="openstack/nova-api-0" Dec 11 08:42:10 crc kubenswrapper[4881]: I1211 08:42:10.716112 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/ae7bac1b-08d2-4ab8-b7b1-d5f0152f844c-internal-tls-certs\") pod \"nova-api-0\" (UID: \"ae7bac1b-08d2-4ab8-b7b1-d5f0152f844c\") " pod="openstack/nova-api-0" Dec 11 08:42:10 crc kubenswrapper[4881]: I1211 08:42:10.716608 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ae7bac1b-08d2-4ab8-b7b1-d5f0152f844c-config-data\") pod \"nova-api-0\" (UID: \"ae7bac1b-08d2-4ab8-b7b1-d5f0152f844c\") " pod="openstack/nova-api-0" Dec 11 08:42:10 crc kubenswrapper[4881]: I1211 08:42:10.725005 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/ae7bac1b-08d2-4ab8-b7b1-d5f0152f844c-public-tls-certs\") pod \"nova-api-0\" (UID: \"ae7bac1b-08d2-4ab8-b7b1-d5f0152f844c\") " pod="openstack/nova-api-0" Dec 11 08:42:10 crc kubenswrapper[4881]: I1211 08:42:10.811822 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1cb637c3-35ca-4a93-bfb6-a7e67fec14fa-config-data\") pod \"ceilometer-0\" (UID: \"1cb637c3-35ca-4a93-bfb6-a7e67fec14fa\") " pod="openstack/ceilometer-0" Dec 11 08:42:10 crc kubenswrapper[4881]: I1211 08:42:10.811860 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1cb637c3-35ca-4a93-bfb6-a7e67fec14fa-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"1cb637c3-35ca-4a93-bfb6-a7e67fec14fa\") " pod="openstack/ceilometer-0" Dec 11 08:42:10 crc kubenswrapper[4881]: I1211 08:42:10.811979 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1cb637c3-35ca-4a93-bfb6-a7e67fec14fa-run-httpd\") pod \"ceilometer-0\" (UID: \"1cb637c3-35ca-4a93-bfb6-a7e67fec14fa\") " pod="openstack/ceilometer-0" Dec 11 08:42:10 crc kubenswrapper[4881]: I1211 08:42:10.812017 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1cb637c3-35ca-4a93-bfb6-a7e67fec14fa-log-httpd\") pod \"ceilometer-0\" (UID: \"1cb637c3-35ca-4a93-bfb6-a7e67fec14fa\") " pod="openstack/ceilometer-0" Dec 11 08:42:10 crc kubenswrapper[4881]: I1211 08:42:10.812036 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7cwbb\" (UniqueName: \"kubernetes.io/projected/1cb637c3-35ca-4a93-bfb6-a7e67fec14fa-kube-api-access-7cwbb\") pod \"ceilometer-0\" (UID: \"1cb637c3-35ca-4a93-bfb6-a7e67fec14fa\") " pod="openstack/ceilometer-0" Dec 11 08:42:10 crc kubenswrapper[4881]: I1211 08:42:10.812052 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/1cb637c3-35ca-4a93-bfb6-a7e67fec14fa-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"1cb637c3-35ca-4a93-bfb6-a7e67fec14fa\") " pod="openstack/ceilometer-0" Dec 11 08:42:10 crc kubenswrapper[4881]: I1211 08:42:10.812088 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/1cb637c3-35ca-4a93-bfb6-a7e67fec14fa-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"1cb637c3-35ca-4a93-bfb6-a7e67fec14fa\") " pod="openstack/ceilometer-0" Dec 11 08:42:10 crc kubenswrapper[4881]: I1211 08:42:10.812108 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1cb637c3-35ca-4a93-bfb6-a7e67fec14fa-scripts\") pod \"ceilometer-0\" (UID: \"1cb637c3-35ca-4a93-bfb6-a7e67fec14fa\") " pod="openstack/ceilometer-0" Dec 11 08:42:10 crc kubenswrapper[4881]: I1211 08:42:10.812739 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1cb637c3-35ca-4a93-bfb6-a7e67fec14fa-log-httpd\") pod \"ceilometer-0\" (UID: \"1cb637c3-35ca-4a93-bfb6-a7e67fec14fa\") " pod="openstack/ceilometer-0" Dec 11 08:42:10 crc kubenswrapper[4881]: I1211 08:42:10.812843 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1cb637c3-35ca-4a93-bfb6-a7e67fec14fa-run-httpd\") pod \"ceilometer-0\" (UID: \"1cb637c3-35ca-4a93-bfb6-a7e67fec14fa\") " pod="openstack/ceilometer-0" Dec 11 08:42:10 crc kubenswrapper[4881]: I1211 08:42:10.815379 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/1cb637c3-35ca-4a93-bfb6-a7e67fec14fa-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"1cb637c3-35ca-4a93-bfb6-a7e67fec14fa\") " pod="openstack/ceilometer-0" Dec 11 08:42:10 crc kubenswrapper[4881]: I1211 08:42:10.815467 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/1cb637c3-35ca-4a93-bfb6-a7e67fec14fa-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"1cb637c3-35ca-4a93-bfb6-a7e67fec14fa\") " pod="openstack/ceilometer-0" Dec 11 08:42:10 crc kubenswrapper[4881]: I1211 08:42:10.815628 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1cb637c3-35ca-4a93-bfb6-a7e67fec14fa-config-data\") pod \"ceilometer-0\" (UID: \"1cb637c3-35ca-4a93-bfb6-a7e67fec14fa\") " pod="openstack/ceilometer-0" Dec 11 08:42:10 crc kubenswrapper[4881]: I1211 08:42:10.815688 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1cb637c3-35ca-4a93-bfb6-a7e67fec14fa-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"1cb637c3-35ca-4a93-bfb6-a7e67fec14fa\") " pod="openstack/ceilometer-0" Dec 11 08:42:10 crc kubenswrapper[4881]: I1211 08:42:10.817432 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1cb637c3-35ca-4a93-bfb6-a7e67fec14fa-scripts\") pod \"ceilometer-0\" (UID: \"1cb637c3-35ca-4a93-bfb6-a7e67fec14fa\") " pod="openstack/ceilometer-0" Dec 11 08:42:11 crc kubenswrapper[4881]: I1211 08:42:11.035910 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1e223962-c0ce-4d1e-9db2-e0cd0dc70302" path="/var/lib/kubelet/pods/1e223962-c0ce-4d1e-9db2-e0cd0dc70302/volumes" Dec 11 08:42:11 crc kubenswrapper[4881]: I1211 08:42:11.037760 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="88610cc6-c8c8-47f2-8425-e5714d01f7e2" path="/var/lib/kubelet/pods/88610cc6-c8c8-47f2-8425-e5714d01f7e2/volumes" Dec 11 08:42:11 crc kubenswrapper[4881]: I1211 08:42:11.046983 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mmh96\" (UniqueName: \"kubernetes.io/projected/ae7bac1b-08d2-4ab8-b7b1-d5f0152f844c-kube-api-access-mmh96\") pod \"nova-api-0\" (UID: \"ae7bac1b-08d2-4ab8-b7b1-d5f0152f844c\") " pod="openstack/nova-api-0" Dec 11 08:42:11 crc kubenswrapper[4881]: I1211 08:42:11.076641 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7cwbb\" (UniqueName: \"kubernetes.io/projected/1cb637c3-35ca-4a93-bfb6-a7e67fec14fa-kube-api-access-7cwbb\") pod \"ceilometer-0\" (UID: \"1cb637c3-35ca-4a93-bfb6-a7e67fec14fa\") " pod="openstack/ceilometer-0" Dec 11 08:42:11 crc kubenswrapper[4881]: I1211 08:42:11.170937 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Dec 11 08:42:11 crc kubenswrapper[4881]: I1211 08:42:11.180408 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 11 08:42:11 crc kubenswrapper[4881]: I1211 08:42:11.649213 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Dec 11 08:42:11 crc kubenswrapper[4881]: I1211 08:42:11.862412 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Dec 11 08:42:12 crc kubenswrapper[4881]: I1211 08:42:12.372002 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1cb637c3-35ca-4a93-bfb6-a7e67fec14fa","Type":"ContainerStarted","Data":"f7c1a6e95bf4c200b1261e4fdfce4995f14d68d08cdd2e7dfae724b3b6eac440"} Dec 11 08:42:12 crc kubenswrapper[4881]: I1211 08:42:12.373823 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"ae7bac1b-08d2-4ab8-b7b1-d5f0152f844c","Type":"ContainerStarted","Data":"3d42d59a00e022042efdaccfa39cbb5b5829d613ecad5fe0a65ce4fadb8c2b65"} Dec 11 08:42:13 crc kubenswrapper[4881]: E1211 08:42:13.180579 4881 fsHandler.go:119] failed to collect filesystem stats - rootDiskErr: could not stat "/var/lib/containers/storage/overlay/6812a55cc022f8c579881128c2547e5de1a845e8dace691b36aceb24c2ce1617/diff" to get inode usage: stat /var/lib/containers/storage/overlay/6812a55cc022f8c579881128c2547e5de1a845e8dace691b36aceb24c2ce1617/diff: no such file or directory, extraDiskErr: could not stat "/var/log/pods/openshift-marketplace_redhat-operators-vk695_324526c9-d17c-4dfd-b2f7-b86e9577c36c/registry-server/0.log" to get inode usage: stat /var/log/pods/openshift-marketplace_redhat-operators-vk695_324526c9-d17c-4dfd-b2f7-b86e9577c36c/registry-server/0.log: no such file or directory Dec 11 08:42:13 crc kubenswrapper[4881]: I1211 08:42:13.320425 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-79b5d74c8c-gr9md" Dec 11 08:42:13 crc kubenswrapper[4881]: I1211 08:42:13.415757 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5fbc4d444f-5xbg9"] Dec 11 08:42:13 crc kubenswrapper[4881]: I1211 08:42:13.416312 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-5fbc4d444f-5xbg9" podUID="8a89d31f-1156-4509-85be-1ac98304de6c" containerName="dnsmasq-dns" containerID="cri-o://1b181dbb63ad929241e5e7449c3df035202daaaa67bd99f06382e726021653d3" gracePeriod=10 Dec 11 08:42:13 crc kubenswrapper[4881]: I1211 08:42:13.425399 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"ae7bac1b-08d2-4ab8-b7b1-d5f0152f844c","Type":"ContainerStarted","Data":"aa2a280ed96678db5b04f886b58913f782e34a6e7df60ea6b4bf7128dc6a541e"} Dec 11 08:42:14 crc kubenswrapper[4881]: I1211 08:42:14.079693 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5fbc4d444f-5xbg9" Dec 11 08:42:14 crc kubenswrapper[4881]: I1211 08:42:14.224040 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/8a89d31f-1156-4509-85be-1ac98304de6c-ovsdbserver-nb\") pod \"8a89d31f-1156-4509-85be-1ac98304de6c\" (UID: \"8a89d31f-1156-4509-85be-1ac98304de6c\") " Dec 11 08:42:14 crc kubenswrapper[4881]: I1211 08:42:14.224160 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8a89d31f-1156-4509-85be-1ac98304de6c-config\") pod \"8a89d31f-1156-4509-85be-1ac98304de6c\" (UID: \"8a89d31f-1156-4509-85be-1ac98304de6c\") " Dec 11 08:42:14 crc kubenswrapper[4881]: I1211 08:42:14.225085 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8rmhf\" (UniqueName: \"kubernetes.io/projected/8a89d31f-1156-4509-85be-1ac98304de6c-kube-api-access-8rmhf\") pod \"8a89d31f-1156-4509-85be-1ac98304de6c\" (UID: \"8a89d31f-1156-4509-85be-1ac98304de6c\") " Dec 11 08:42:14 crc kubenswrapper[4881]: I1211 08:42:14.225167 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/8a89d31f-1156-4509-85be-1ac98304de6c-ovsdbserver-sb\") pod \"8a89d31f-1156-4509-85be-1ac98304de6c\" (UID: \"8a89d31f-1156-4509-85be-1ac98304de6c\") " Dec 11 08:42:14 crc kubenswrapper[4881]: I1211 08:42:14.225495 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8a89d31f-1156-4509-85be-1ac98304de6c-dns-svc\") pod \"8a89d31f-1156-4509-85be-1ac98304de6c\" (UID: \"8a89d31f-1156-4509-85be-1ac98304de6c\") " Dec 11 08:42:14 crc kubenswrapper[4881]: I1211 08:42:14.225608 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/8a89d31f-1156-4509-85be-1ac98304de6c-dns-swift-storage-0\") pod \"8a89d31f-1156-4509-85be-1ac98304de6c\" (UID: \"8a89d31f-1156-4509-85be-1ac98304de6c\") " Dec 11 08:42:14 crc kubenswrapper[4881]: I1211 08:42:14.255581 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8a89d31f-1156-4509-85be-1ac98304de6c-kube-api-access-8rmhf" (OuterVolumeSpecName: "kube-api-access-8rmhf") pod "8a89d31f-1156-4509-85be-1ac98304de6c" (UID: "8a89d31f-1156-4509-85be-1ac98304de6c"). InnerVolumeSpecName "kube-api-access-8rmhf". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:42:14 crc kubenswrapper[4881]: I1211 08:42:14.332764 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8rmhf\" (UniqueName: \"kubernetes.io/projected/8a89d31f-1156-4509-85be-1ac98304de6c-kube-api-access-8rmhf\") on node \"crc\" DevicePath \"\"" Dec 11 08:42:14 crc kubenswrapper[4881]: I1211 08:42:14.342034 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8a89d31f-1156-4509-85be-1ac98304de6c-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "8a89d31f-1156-4509-85be-1ac98304de6c" (UID: "8a89d31f-1156-4509-85be-1ac98304de6c"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:42:14 crc kubenswrapper[4881]: I1211 08:42:14.345193 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8a89d31f-1156-4509-85be-1ac98304de6c-config" (OuterVolumeSpecName: "config") pod "8a89d31f-1156-4509-85be-1ac98304de6c" (UID: "8a89d31f-1156-4509-85be-1ac98304de6c"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:42:14 crc kubenswrapper[4881]: I1211 08:42:14.369745 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8a89d31f-1156-4509-85be-1ac98304de6c-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "8a89d31f-1156-4509-85be-1ac98304de6c" (UID: "8a89d31f-1156-4509-85be-1ac98304de6c"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:42:14 crc kubenswrapper[4881]: I1211 08:42:14.380641 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8a89d31f-1156-4509-85be-1ac98304de6c-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "8a89d31f-1156-4509-85be-1ac98304de6c" (UID: "8a89d31f-1156-4509-85be-1ac98304de6c"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:42:14 crc kubenswrapper[4881]: I1211 08:42:14.394012 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8a89d31f-1156-4509-85be-1ac98304de6c-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "8a89d31f-1156-4509-85be-1ac98304de6c" (UID: "8a89d31f-1156-4509-85be-1ac98304de6c"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:42:14 crc kubenswrapper[4881]: I1211 08:42:14.435992 4881 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/8a89d31f-1156-4509-85be-1ac98304de6c-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Dec 11 08:42:14 crc kubenswrapper[4881]: I1211 08:42:14.436040 4881 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8a89d31f-1156-4509-85be-1ac98304de6c-config\") on node \"crc\" DevicePath \"\"" Dec 11 08:42:14 crc kubenswrapper[4881]: I1211 08:42:14.436053 4881 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/8a89d31f-1156-4509-85be-1ac98304de6c-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Dec 11 08:42:14 crc kubenswrapper[4881]: I1211 08:42:14.436067 4881 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8a89d31f-1156-4509-85be-1ac98304de6c-dns-svc\") on node \"crc\" DevicePath \"\"" Dec 11 08:42:14 crc kubenswrapper[4881]: I1211 08:42:14.436080 4881 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/8a89d31f-1156-4509-85be-1ac98304de6c-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Dec 11 08:42:14 crc kubenswrapper[4881]: I1211 08:42:14.450514 4881 generic.go:334] "Generic (PLEG): container finished" podID="8a89d31f-1156-4509-85be-1ac98304de6c" containerID="1b181dbb63ad929241e5e7449c3df035202daaaa67bd99f06382e726021653d3" exitCode=0 Dec 11 08:42:14 crc kubenswrapper[4881]: I1211 08:42:14.450612 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5fbc4d444f-5xbg9" Dec 11 08:42:14 crc kubenswrapper[4881]: I1211 08:42:14.450600 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5fbc4d444f-5xbg9" event={"ID":"8a89d31f-1156-4509-85be-1ac98304de6c","Type":"ContainerDied","Data":"1b181dbb63ad929241e5e7449c3df035202daaaa67bd99f06382e726021653d3"} Dec 11 08:42:14 crc kubenswrapper[4881]: I1211 08:42:14.450763 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5fbc4d444f-5xbg9" event={"ID":"8a89d31f-1156-4509-85be-1ac98304de6c","Type":"ContainerDied","Data":"424515f5587f34e6aebd1dde992be73912d373f6dfb46ea944f97d3199192c6a"} Dec 11 08:42:14 crc kubenswrapper[4881]: I1211 08:42:14.450794 4881 scope.go:117] "RemoveContainer" containerID="1b181dbb63ad929241e5e7449c3df035202daaaa67bd99f06382e726021653d3" Dec 11 08:42:14 crc kubenswrapper[4881]: I1211 08:42:14.461228 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1cb637c3-35ca-4a93-bfb6-a7e67fec14fa","Type":"ContainerStarted","Data":"001dbd7763875d0d496eaf1cf80c51cc1b2a40cbbf021e3ac5e4911ef4236184"} Dec 11 08:42:14 crc kubenswrapper[4881]: I1211 08:42:14.476911 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"ae7bac1b-08d2-4ab8-b7b1-d5f0152f844c","Type":"ContainerStarted","Data":"0fc4f0176e6edc37e8de2b9bcea2037788771d7b02c16417cc0b34e73bb9a3da"} Dec 11 08:42:14 crc kubenswrapper[4881]: I1211 08:42:14.513503 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=4.513479224 podStartE2EDuration="4.513479224s" podCreationTimestamp="2025-12-11 08:42:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 08:42:14.496488917 +0000 UTC m=+1582.873857624" watchObservedRunningTime="2025-12-11 08:42:14.513479224 +0000 UTC m=+1582.890847931" Dec 11 08:42:14 crc kubenswrapper[4881]: I1211 08:42:14.538165 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5fbc4d444f-5xbg9"] Dec 11 08:42:14 crc kubenswrapper[4881]: I1211 08:42:14.550277 4881 scope.go:117] "RemoveContainer" containerID="5dec1101ecfed5306fb6b0c67ef748dd6d81429ad00cdfaed4c5098ea4dc7592" Dec 11 08:42:14 crc kubenswrapper[4881]: I1211 08:42:14.551399 4881 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5fbc4d444f-5xbg9"] Dec 11 08:42:14 crc kubenswrapper[4881]: I1211 08:42:14.654351 4881 scope.go:117] "RemoveContainer" containerID="1b181dbb63ad929241e5e7449c3df035202daaaa67bd99f06382e726021653d3" Dec 11 08:42:14 crc kubenswrapper[4881]: E1211 08:42:14.688815 4881 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1b181dbb63ad929241e5e7449c3df035202daaaa67bd99f06382e726021653d3\": container with ID starting with 1b181dbb63ad929241e5e7449c3df035202daaaa67bd99f06382e726021653d3 not found: ID does not exist" containerID="1b181dbb63ad929241e5e7449c3df035202daaaa67bd99f06382e726021653d3" Dec 11 08:42:14 crc kubenswrapper[4881]: I1211 08:42:14.688861 4881 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1b181dbb63ad929241e5e7449c3df035202daaaa67bd99f06382e726021653d3"} err="failed to get container status \"1b181dbb63ad929241e5e7449c3df035202daaaa67bd99f06382e726021653d3\": rpc error: code = NotFound desc = could not find container \"1b181dbb63ad929241e5e7449c3df035202daaaa67bd99f06382e726021653d3\": container with ID starting with 1b181dbb63ad929241e5e7449c3df035202daaaa67bd99f06382e726021653d3 not found: ID does not exist" Dec 11 08:42:14 crc kubenswrapper[4881]: I1211 08:42:14.688894 4881 scope.go:117] "RemoveContainer" containerID="5dec1101ecfed5306fb6b0c67ef748dd6d81429ad00cdfaed4c5098ea4dc7592" Dec 11 08:42:14 crc kubenswrapper[4881]: E1211 08:42:14.690142 4881 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5dec1101ecfed5306fb6b0c67ef748dd6d81429ad00cdfaed4c5098ea4dc7592\": container with ID starting with 5dec1101ecfed5306fb6b0c67ef748dd6d81429ad00cdfaed4c5098ea4dc7592 not found: ID does not exist" containerID="5dec1101ecfed5306fb6b0c67ef748dd6d81429ad00cdfaed4c5098ea4dc7592" Dec 11 08:42:14 crc kubenswrapper[4881]: I1211 08:42:14.690194 4881 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5dec1101ecfed5306fb6b0c67ef748dd6d81429ad00cdfaed4c5098ea4dc7592"} err="failed to get container status \"5dec1101ecfed5306fb6b0c67ef748dd6d81429ad00cdfaed4c5098ea4dc7592\": rpc error: code = NotFound desc = could not find container \"5dec1101ecfed5306fb6b0c67ef748dd6d81429ad00cdfaed4c5098ea4dc7592\": container with ID starting with 5dec1101ecfed5306fb6b0c67ef748dd6d81429ad00cdfaed4c5098ea4dc7592 not found: ID does not exist" Dec 11 08:42:14 crc kubenswrapper[4881]: I1211 08:42:14.983801 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-cell1-novncproxy-0" Dec 11 08:42:14 crc kubenswrapper[4881]: I1211 08:42:14.999204 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Dec 11 08:42:15 crc kubenswrapper[4881]: I1211 08:42:15.028788 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8a89d31f-1156-4509-85be-1ac98304de6c" path="/var/lib/kubelet/pods/8a89d31f-1156-4509-85be-1ac98304de6c/volumes" Dec 11 08:42:15 crc kubenswrapper[4881]: I1211 08:42:15.029694 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-cell1-novncproxy-0" Dec 11 08:42:15 crc kubenswrapper[4881]: I1211 08:42:15.041021 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Dec 11 08:42:15 crc kubenswrapper[4881]: I1211 08:42:15.504749 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1cb637c3-35ca-4a93-bfb6-a7e67fec14fa","Type":"ContainerStarted","Data":"280c8d37b0a6337b557a41baaf36c2400eea6b55ff1dfde838e13454aea06c11"} Dec 11 08:42:15 crc kubenswrapper[4881]: I1211 08:42:15.538753 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-novncproxy-0" Dec 11 08:42:15 crc kubenswrapper[4881]: I1211 08:42:15.546495 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Dec 11 08:42:15 crc kubenswrapper[4881]: I1211 08:42:15.761509 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-cell-mapping-s5tsr"] Dec 11 08:42:15 crc kubenswrapper[4881]: E1211 08:42:15.762227 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8a89d31f-1156-4509-85be-1ac98304de6c" containerName="init" Dec 11 08:42:15 crc kubenswrapper[4881]: I1211 08:42:15.762250 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="8a89d31f-1156-4509-85be-1ac98304de6c" containerName="init" Dec 11 08:42:15 crc kubenswrapper[4881]: E1211 08:42:15.762289 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8a89d31f-1156-4509-85be-1ac98304de6c" containerName="dnsmasq-dns" Dec 11 08:42:15 crc kubenswrapper[4881]: I1211 08:42:15.762297 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="8a89d31f-1156-4509-85be-1ac98304de6c" containerName="dnsmasq-dns" Dec 11 08:42:15 crc kubenswrapper[4881]: I1211 08:42:15.762594 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="8a89d31f-1156-4509-85be-1ac98304de6c" containerName="dnsmasq-dns" Dec 11 08:42:15 crc kubenswrapper[4881]: I1211 08:42:15.763719 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-s5tsr" Dec 11 08:42:15 crc kubenswrapper[4881]: I1211 08:42:15.766083 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-manage-scripts" Dec 11 08:42:15 crc kubenswrapper[4881]: I1211 08:42:15.766757 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-manage-config-data" Dec 11 08:42:15 crc kubenswrapper[4881]: I1211 08:42:15.792208 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-cell-mapping-s5tsr"] Dec 11 08:42:15 crc kubenswrapper[4881]: I1211 08:42:15.879011 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6b6f1a51-4774-4356-a5ef-5e901d75d889-scripts\") pod \"nova-cell1-cell-mapping-s5tsr\" (UID: \"6b6f1a51-4774-4356-a5ef-5e901d75d889\") " pod="openstack/nova-cell1-cell-mapping-s5tsr" Dec 11 08:42:15 crc kubenswrapper[4881]: I1211 08:42:15.879520 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6b6f1a51-4774-4356-a5ef-5e901d75d889-config-data\") pod \"nova-cell1-cell-mapping-s5tsr\" (UID: \"6b6f1a51-4774-4356-a5ef-5e901d75d889\") " pod="openstack/nova-cell1-cell-mapping-s5tsr" Dec 11 08:42:15 crc kubenswrapper[4881]: I1211 08:42:15.879744 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6b6f1a51-4774-4356-a5ef-5e901d75d889-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-s5tsr\" (UID: \"6b6f1a51-4774-4356-a5ef-5e901d75d889\") " pod="openstack/nova-cell1-cell-mapping-s5tsr" Dec 11 08:42:15 crc kubenswrapper[4881]: I1211 08:42:15.879844 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zljff\" (UniqueName: \"kubernetes.io/projected/6b6f1a51-4774-4356-a5ef-5e901d75d889-kube-api-access-zljff\") pod \"nova-cell1-cell-mapping-s5tsr\" (UID: \"6b6f1a51-4774-4356-a5ef-5e901d75d889\") " pod="openstack/nova-cell1-cell-mapping-s5tsr" Dec 11 08:42:15 crc kubenswrapper[4881]: I1211 08:42:15.983256 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6b6f1a51-4774-4356-a5ef-5e901d75d889-config-data\") pod \"nova-cell1-cell-mapping-s5tsr\" (UID: \"6b6f1a51-4774-4356-a5ef-5e901d75d889\") " pod="openstack/nova-cell1-cell-mapping-s5tsr" Dec 11 08:42:15 crc kubenswrapper[4881]: I1211 08:42:15.983382 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6b6f1a51-4774-4356-a5ef-5e901d75d889-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-s5tsr\" (UID: \"6b6f1a51-4774-4356-a5ef-5e901d75d889\") " pod="openstack/nova-cell1-cell-mapping-s5tsr" Dec 11 08:42:15 crc kubenswrapper[4881]: I1211 08:42:15.983421 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zljff\" (UniqueName: \"kubernetes.io/projected/6b6f1a51-4774-4356-a5ef-5e901d75d889-kube-api-access-zljff\") pod \"nova-cell1-cell-mapping-s5tsr\" (UID: \"6b6f1a51-4774-4356-a5ef-5e901d75d889\") " pod="openstack/nova-cell1-cell-mapping-s5tsr" Dec 11 08:42:15 crc kubenswrapper[4881]: I1211 08:42:15.983580 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6b6f1a51-4774-4356-a5ef-5e901d75d889-scripts\") pod \"nova-cell1-cell-mapping-s5tsr\" (UID: \"6b6f1a51-4774-4356-a5ef-5e901d75d889\") " pod="openstack/nova-cell1-cell-mapping-s5tsr" Dec 11 08:42:15 crc kubenswrapper[4881]: I1211 08:42:15.993179 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6b6f1a51-4774-4356-a5ef-5e901d75d889-scripts\") pod \"nova-cell1-cell-mapping-s5tsr\" (UID: \"6b6f1a51-4774-4356-a5ef-5e901d75d889\") " pod="openstack/nova-cell1-cell-mapping-s5tsr" Dec 11 08:42:15 crc kubenswrapper[4881]: I1211 08:42:15.993206 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6b6f1a51-4774-4356-a5ef-5e901d75d889-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-s5tsr\" (UID: \"6b6f1a51-4774-4356-a5ef-5e901d75d889\") " pod="openstack/nova-cell1-cell-mapping-s5tsr" Dec 11 08:42:16 crc kubenswrapper[4881]: I1211 08:42:16.000538 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6b6f1a51-4774-4356-a5ef-5e901d75d889-config-data\") pod \"nova-cell1-cell-mapping-s5tsr\" (UID: \"6b6f1a51-4774-4356-a5ef-5e901d75d889\") " pod="openstack/nova-cell1-cell-mapping-s5tsr" Dec 11 08:42:16 crc kubenswrapper[4881]: I1211 08:42:16.007302 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zljff\" (UniqueName: \"kubernetes.io/projected/6b6f1a51-4774-4356-a5ef-5e901d75d889-kube-api-access-zljff\") pod \"nova-cell1-cell-mapping-s5tsr\" (UID: \"6b6f1a51-4774-4356-a5ef-5e901d75d889\") " pod="openstack/nova-cell1-cell-mapping-s5tsr" Dec 11 08:42:16 crc kubenswrapper[4881]: I1211 08:42:16.185162 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-s5tsr" Dec 11 08:42:16 crc kubenswrapper[4881]: I1211 08:42:16.527106 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1cb637c3-35ca-4a93-bfb6-a7e67fec14fa","Type":"ContainerStarted","Data":"f00a23608c51c731dc3ffaac100788cecea1036c8f366c3cb6d2a83ff48fe5c6"} Dec 11 08:42:16 crc kubenswrapper[4881]: I1211 08:42:16.725987 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-cell-mapping-s5tsr"] Dec 11 08:42:17 crc kubenswrapper[4881]: I1211 08:42:17.539037 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-s5tsr" event={"ID":"6b6f1a51-4774-4356-a5ef-5e901d75d889","Type":"ContainerStarted","Data":"1f15ea09d95add8e3ec9a347353f72d9f4e1b86913da909c37a877b3ce297048"} Dec 11 08:42:17 crc kubenswrapper[4881]: I1211 08:42:17.540821 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-s5tsr" event={"ID":"6b6f1a51-4774-4356-a5ef-5e901d75d889","Type":"ContainerStarted","Data":"d52a8ae85b689f1dee4e35cf151fbacd397be6c0089006e476d20063dcc83b86"} Dec 11 08:42:17 crc kubenswrapper[4881]: I1211 08:42:17.564485 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-cell-mapping-s5tsr" podStartSLOduration=2.564463095 podStartE2EDuration="2.564463095s" podCreationTimestamp="2025-12-11 08:42:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 08:42:17.558455336 +0000 UTC m=+1585.935824043" watchObservedRunningTime="2025-12-11 08:42:17.564463095 +0000 UTC m=+1585.941831792" Dec 11 08:42:18 crc kubenswrapper[4881]: I1211 08:42:18.559976 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1cb637c3-35ca-4a93-bfb6-a7e67fec14fa","Type":"ContainerStarted","Data":"c15179834beeb123ed7633c6556d2807e15027df6475b0b46ed81d83d81736f0"} Dec 11 08:42:18 crc kubenswrapper[4881]: I1211 08:42:18.560357 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Dec 11 08:42:18 crc kubenswrapper[4881]: I1211 08:42:18.649279 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.411487066 podStartE2EDuration="8.649256925s" podCreationTimestamp="2025-12-11 08:42:10 +0000 UTC" firstStartedPulling="2025-12-11 08:42:11.644067368 +0000 UTC m=+1580.021436065" lastFinishedPulling="2025-12-11 08:42:17.881837227 +0000 UTC m=+1586.259205924" observedRunningTime="2025-12-11 08:42:18.623893927 +0000 UTC m=+1587.001262634" watchObservedRunningTime="2025-12-11 08:42:18.649256925 +0000 UTC m=+1587.026625622" Dec 11 08:42:21 crc kubenswrapper[4881]: I1211 08:42:21.172047 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Dec 11 08:42:21 crc kubenswrapper[4881]: I1211 08:42:21.172702 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Dec 11 08:42:21 crc kubenswrapper[4881]: W1211 08:42:21.799504 4881 watcher.go:93] Error while processing event ("/sys/fs/cgroup/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf2376e63_11dc_4d35_be7c_eff1af8f8534.slice/crio-conmon-7b28f08e70b3b16c1b082a8725e98fb5b980250d5cb2fa5b93670a6fd2ba9b15.scope": 0x40000100 == IN_CREATE|IN_ISDIR): inotify_add_watch /sys/fs/cgroup/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf2376e63_11dc_4d35_be7c_eff1af8f8534.slice/crio-conmon-7b28f08e70b3b16c1b082a8725e98fb5b980250d5cb2fa5b93670a6fd2ba9b15.scope: no such file or directory Dec 11 08:42:21 crc kubenswrapper[4881]: W1211 08:42:21.799573 4881 watcher.go:93] Error while processing event ("/sys/fs/cgroup/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf2376e63_11dc_4d35_be7c_eff1af8f8534.slice/crio-7b28f08e70b3b16c1b082a8725e98fb5b980250d5cb2fa5b93670a6fd2ba9b15.scope": 0x40000100 == IN_CREATE|IN_ISDIR): inotify_add_watch /sys/fs/cgroup/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf2376e63_11dc_4d35_be7c_eff1af8f8534.slice/crio-7b28f08e70b3b16c1b082a8725e98fb5b980250d5cb2fa5b93670a6fd2ba9b15.scope: no such file or directory Dec 11 08:42:21 crc kubenswrapper[4881]: W1211 08:42:21.826659 4881 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1e223962_c0ce_4d1e_9db2_e0cd0dc70302.slice/crio-72008dd43f0222dde14415321800759fff9d3e325cb59e9576b9cfc2182224cc.scope WatchSource:0}: Error finding container 72008dd43f0222dde14415321800759fff9d3e325cb59e9576b9cfc2182224cc: Status 404 returned error can't find the container with id 72008dd43f0222dde14415321800759fff9d3e325cb59e9576b9cfc2182224cc Dec 11 08:42:22 crc kubenswrapper[4881]: E1211 08:42:22.004210 4881 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod88610cc6_c8c8_47f2_8425_e5714d01f7e2.slice/crio-447f6eddf46f057e9c04d1f3c4088a6a0ab0ac83f446c22b24457f654a7e5877.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode688f7d9_d215_48aa_8092_e176d3437f09.slice/crio-conmon-e1b52b19ca98e36e161fee41ff46127d736b904ad1e26525435cefec416d25ee.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod532f11a0_2d04_48b6_87a8_b27e99195ac9.slice/crio-6b27f9a88ba0e9df7605f0240965774967a307b37e86a1c9c2a9d43264bb5042\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1e223962_c0ce_4d1e_9db2_e0cd0dc70302.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod532f11a0_2d04_48b6_87a8_b27e99195ac9.slice/crio-conmon-eca5d6f7bfbb9f1cc81b72c1afde398cc730cee98865ca62bceac737883a8dd4.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc9bdc0d4_7342_4b61_8acc_c2ac4c62653b.slice/crio-4cef02557414b487e8e8b0f835864abd3654785c1a571d517144633bc977824c.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4ce4c4b4_5f9a_4e64_af32_6c3d805a01fc.slice/crio-f697e3148cabc05aacae526bee51af71f26a21fc511787e9d7e84e3da5b7998c\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8a89d31f_1156_4509_85be_1ac98304de6c.slice/crio-1b181dbb63ad929241e5e7449c3df035202daaaa67bd99f06382e726021653d3.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8a89d31f_1156_4509_85be_1ac98304de6c.slice/crio-424515f5587f34e6aebd1dde992be73912d373f6dfb46ea944f97d3199192c6a\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1e223962_c0ce_4d1e_9db2_e0cd0dc70302.slice/crio-8ba1420e2d2939d49603eda3c4c537666c793b4ee222c1bb4743410e6d7fcc39.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1e223962_c0ce_4d1e_9db2_e0cd0dc70302.slice/crio-8410ffae70b3e089d18fedbde8fab2cf53abf626438218ed6bc170d633d2c921.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod324526c9_d17c_4dfd_b2f7_b86e9577c36c.slice/crio-1257b08e4188c1026c60fa90aa8dfd41d01ae08ea1530a7ae64a6c10d85fc1d4\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod532f11a0_2d04_48b6_87a8_b27e99195ac9.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod88610cc6_c8c8_47f2_8425_e5714d01f7e2.slice/crio-5547df66d0502b39203818a66c5451b5ab55b55be882862677c10c5d499b1947.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1e223962_c0ce_4d1e_9db2_e0cd0dc70302.slice/crio-conmon-8ba1420e2d2939d49603eda3c4c537666c793b4ee222c1bb4743410e6d7fcc39.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode688f7d9_d215_48aa_8092_e176d3437f09.slice/crio-e1b52b19ca98e36e161fee41ff46127d736b904ad1e26525435cefec416d25ee.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8a89d31f_1156_4509_85be_1ac98304de6c.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4ce4c4b4_5f9a_4e64_af32_6c3d805a01fc.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod324526c9_d17c_4dfd_b2f7_b86e9577c36c.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod324526c9_d17c_4dfd_b2f7_b86e9577c36c.slice/crio-d89c12eca30a4bde2f8bc067026e755b3972417eccab9ffdce126ecbbba33e88.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode688f7d9_d215_48aa_8092_e176d3437f09.slice/crio-5212e12db9086955469cdaeb40997be0eaac6cab01484238de7fe94120e16dfa\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8a89d31f_1156_4509_85be_1ac98304de6c.slice/crio-conmon-1b181dbb63ad929241e5e7449c3df035202daaaa67bd99f06382e726021653d3.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod324526c9_d17c_4dfd_b2f7_b86e9577c36c.slice/crio-conmon-d89c12eca30a4bde2f8bc067026e755b3972417eccab9ffdce126ecbbba33e88.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1e223962_c0ce_4d1e_9db2_e0cd0dc70302.slice/crio-conmon-8410ffae70b3e089d18fedbde8fab2cf53abf626438218ed6bc170d633d2c921.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod324526c9_d17c_4dfd_b2f7_b86e9577c36c.slice/crio-c90ce7cd8fcf126d46d59245f06190daa1830091da52fd5b1af057889d7e2442.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc9bdc0d4_7342_4b61_8acc_c2ac4c62653b.slice/crio-conmon-4cef02557414b487e8e8b0f835864abd3654785c1a571d517144633bc977824c.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1e223962_c0ce_4d1e_9db2_e0cd0dc70302.slice/crio-f5990963d4fcf23f21125d59372f5889d4b3fc5f3aba9c025a9b632bfb6c4b20\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod88610cc6_c8c8_47f2_8425_e5714d01f7e2.slice/crio-conmon-447f6eddf46f057e9c04d1f3c4088a6a0ab0ac83f446c22b24457f654a7e5877.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod532f11a0_2d04_48b6_87a8_b27e99195ac9.slice/crio-eca5d6f7bfbb9f1cc81b72c1afde398cc730cee98865ca62bceac737883a8dd4.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod88610cc6_c8c8_47f2_8425_e5714d01f7e2.slice/crio-conmon-5547df66d0502b39203818a66c5451b5ab55b55be882862677c10c5d499b1947.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode688f7d9_d215_48aa_8092_e176d3437f09.slice\": RecentStats: unable to find data in memory cache]" Dec 11 08:42:22 crc kubenswrapper[4881]: E1211 08:42:22.005267 4881 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc9bdc0d4_7342_4b61_8acc_c2ac4c62653b.slice/crio-6d5651a7391d686acf9f7a1e064a27a444fa9404cbfcc8c73b35a82fac196ed0\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4ce4c4b4_5f9a_4e64_af32_6c3d805a01fc.slice/crio-f697e3148cabc05aacae526bee51af71f26a21fc511787e9d7e84e3da5b7998c\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4ce4c4b4_5f9a_4e64_af32_6c3d805a01fc.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod88610cc6_c8c8_47f2_8425_e5714d01f7e2.slice/crio-447f6eddf46f057e9c04d1f3c4088a6a0ab0ac83f446c22b24457f654a7e5877.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod532f11a0_2d04_48b6_87a8_b27e99195ac9.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1e223962_c0ce_4d1e_9db2_e0cd0dc70302.slice/crio-8ba1420e2d2939d49603eda3c4c537666c793b4ee222c1bb4743410e6d7fcc39.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod324526c9_d17c_4dfd_b2f7_b86e9577c36c.slice/crio-conmon-d89c12eca30a4bde2f8bc067026e755b3972417eccab9ffdce126ecbbba33e88.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod532f11a0_2d04_48b6_87a8_b27e99195ac9.slice/crio-eca5d6f7bfbb9f1cc81b72c1afde398cc730cee98865ca62bceac737883a8dd4.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1e223962_c0ce_4d1e_9db2_e0cd0dc70302.slice/crio-8410ffae70b3e089d18fedbde8fab2cf53abf626438218ed6bc170d633d2c921.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8a89d31f_1156_4509_85be_1ac98304de6c.slice/crio-1b181dbb63ad929241e5e7449c3df035202daaaa67bd99f06382e726021653d3.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1e223962_c0ce_4d1e_9db2_e0cd0dc70302.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod88610cc6_c8c8_47f2_8425_e5714d01f7e2.slice/crio-conmon-5547df66d0502b39203818a66c5451b5ab55b55be882862677c10c5d499b1947.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod324526c9_d17c_4dfd_b2f7_b86e9577c36c.slice/crio-c90ce7cd8fcf126d46d59245f06190daa1830091da52fd5b1af057889d7e2442.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod88610cc6_c8c8_47f2_8425_e5714d01f7e2.slice/crio-conmon-447f6eddf46f057e9c04d1f3c4088a6a0ab0ac83f446c22b24457f654a7e5877.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode688f7d9_d215_48aa_8092_e176d3437f09.slice/crio-5212e12db9086955469cdaeb40997be0eaac6cab01484238de7fe94120e16dfa\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod324526c9_d17c_4dfd_b2f7_b86e9577c36c.slice/crio-1257b08e4188c1026c60fa90aa8dfd41d01ae08ea1530a7ae64a6c10d85fc1d4\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod88610cc6_c8c8_47f2_8425_e5714d01f7e2.slice/crio-5547df66d0502b39203818a66c5451b5ab55b55be882862677c10c5d499b1947.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8a89d31f_1156_4509_85be_1ac98304de6c.slice/crio-424515f5587f34e6aebd1dde992be73912d373f6dfb46ea944f97d3199192c6a\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod324526c9_d17c_4dfd_b2f7_b86e9577c36c.slice/crio-d89c12eca30a4bde2f8bc067026e755b3972417eccab9ffdce126ecbbba33e88.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode688f7d9_d215_48aa_8092_e176d3437f09.slice/crio-conmon-e1b52b19ca98e36e161fee41ff46127d736b904ad1e26525435cefec416d25ee.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode688f7d9_d215_48aa_8092_e176d3437f09.slice/crio-e1b52b19ca98e36e161fee41ff46127d736b904ad1e26525435cefec416d25ee.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod324526c9_d17c_4dfd_b2f7_b86e9577c36c.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod88610cc6_c8c8_47f2_8425_e5714d01f7e2.slice/crio-70ce343a0acdb45a979305c0074ec869eff0d47c50e1bc2c512c4c67777f1adb\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1e223962_c0ce_4d1e_9db2_e0cd0dc70302.slice/crio-be33c703178f993b71dd30f771cd0f7d0cad4c706bbbefda1240186a83a72a92.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1e223962_c0ce_4d1e_9db2_e0cd0dc70302.slice/crio-f5990963d4fcf23f21125d59372f5889d4b3fc5f3aba9c025a9b632bfb6c4b20\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod532f11a0_2d04_48b6_87a8_b27e99195ac9.slice/crio-6b27f9a88ba0e9df7605f0240965774967a307b37e86a1c9c2a9d43264bb5042\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1e223962_c0ce_4d1e_9db2_e0cd0dc70302.slice/crio-conmon-be33c703178f993b71dd30f771cd0f7d0cad4c706bbbefda1240186a83a72a92.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1e223962_c0ce_4d1e_9db2_e0cd0dc70302.slice/crio-conmon-8410ffae70b3e089d18fedbde8fab2cf53abf626438218ed6bc170d633d2c921.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod88610cc6_c8c8_47f2_8425_e5714d01f7e2.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc9bdc0d4_7342_4b61_8acc_c2ac4c62653b.slice/crio-conmon-4cef02557414b487e8e8b0f835864abd3654785c1a571d517144633bc977824c.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8a89d31f_1156_4509_85be_1ac98304de6c.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod532f11a0_2d04_48b6_87a8_b27e99195ac9.slice/crio-conmon-eca5d6f7bfbb9f1cc81b72c1afde398cc730cee98865ca62bceac737883a8dd4.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1e223962_c0ce_4d1e_9db2_e0cd0dc70302.slice/crio-conmon-8ba1420e2d2939d49603eda3c4c537666c793b4ee222c1bb4743410e6d7fcc39.scope\": RecentStats: unable to find data in memory cache]" Dec 11 08:42:22 crc kubenswrapper[4881]: E1211 08:42:22.005633 4881 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1e223962_c0ce_4d1e_9db2_e0cd0dc70302.slice/crio-conmon-8ba1420e2d2939d49603eda3c4c537666c793b4ee222c1bb4743410e6d7fcc39.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8a89d31f_1156_4509_85be_1ac98304de6c.slice/crio-conmon-1b181dbb63ad929241e5e7449c3df035202daaaa67bd99f06382e726021653d3.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8a89d31f_1156_4509_85be_1ac98304de6c.slice/crio-1b181dbb63ad929241e5e7449c3df035202daaaa67bd99f06382e726021653d3.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod88610cc6_c8c8_47f2_8425_e5714d01f7e2.slice/crio-conmon-447f6eddf46f057e9c04d1f3c4088a6a0ab0ac83f446c22b24457f654a7e5877.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod88610cc6_c8c8_47f2_8425_e5714d01f7e2.slice/crio-447f6eddf46f057e9c04d1f3c4088a6a0ab0ac83f446c22b24457f654a7e5877.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1e223962_c0ce_4d1e_9db2_e0cd0dc70302.slice/crio-8ba1420e2d2939d49603eda3c4c537666c793b4ee222c1bb4743410e6d7fcc39.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1e223962_c0ce_4d1e_9db2_e0cd0dc70302.slice/crio-conmon-8410ffae70b3e089d18fedbde8fab2cf53abf626438218ed6bc170d633d2c921.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc9bdc0d4_7342_4b61_8acc_c2ac4c62653b.slice/crio-conmon-4cef02557414b487e8e8b0f835864abd3654785c1a571d517144633bc977824c.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8a89d31f_1156_4509_85be_1ac98304de6c.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1e223962_c0ce_4d1e_9db2_e0cd0dc70302.slice/crio-f5990963d4fcf23f21125d59372f5889d4b3fc5f3aba9c025a9b632bfb6c4b20\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod88610cc6_c8c8_47f2_8425_e5714d01f7e2.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod88610cc6_c8c8_47f2_8425_e5714d01f7e2.slice/crio-5547df66d0502b39203818a66c5451b5ab55b55be882862677c10c5d499b1947.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod88610cc6_c8c8_47f2_8425_e5714d01f7e2.slice/crio-70ce343a0acdb45a979305c0074ec869eff0d47c50e1bc2c512c4c67777f1adb\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod88610cc6_c8c8_47f2_8425_e5714d01f7e2.slice/crio-conmon-5547df66d0502b39203818a66c5451b5ab55b55be882862677c10c5d499b1947.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc9bdc0d4_7342_4b61_8acc_c2ac4c62653b.slice/crio-4cef02557414b487e8e8b0f835864abd3654785c1a571d517144633bc977824c.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1e223962_c0ce_4d1e_9db2_e0cd0dc70302.slice/crio-conmon-be33c703178f993b71dd30f771cd0f7d0cad4c706bbbefda1240186a83a72a92.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1e223962_c0ce_4d1e_9db2_e0cd0dc70302.slice/crio-conmon-72008dd43f0222dde14415321800759fff9d3e325cb59e9576b9cfc2182224cc.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1e223962_c0ce_4d1e_9db2_e0cd0dc70302.slice/crio-be33c703178f993b71dd30f771cd0f7d0cad4c706bbbefda1240186a83a72a92.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1e223962_c0ce_4d1e_9db2_e0cd0dc70302.slice/crio-8410ffae70b3e089d18fedbde8fab2cf53abf626438218ed6bc170d633d2c921.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1e223962_c0ce_4d1e_9db2_e0cd0dc70302.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8a89d31f_1156_4509_85be_1ac98304de6c.slice/crio-424515f5587f34e6aebd1dde992be73912d373f6dfb46ea944f97d3199192c6a\": RecentStats: unable to find data in memory cache]" Dec 11 08:42:22 crc kubenswrapper[4881]: I1211 08:42:22.185576 4881 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="ae7bac1b-08d2-4ab8-b7b1-d5f0152f844c" containerName="nova-api-log" probeResult="failure" output="Get \"https://10.217.0.253:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Dec 11 08:42:22 crc kubenswrapper[4881]: I1211 08:42:22.185641 4881 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="ae7bac1b-08d2-4ab8-b7b1-d5f0152f844c" containerName="nova-api-api" probeResult="failure" output="Get \"https://10.217.0.253:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Dec 11 08:42:22 crc kubenswrapper[4881]: I1211 08:42:22.663839 4881 generic.go:334] "Generic (PLEG): container finished" podID="6b6f1a51-4774-4356-a5ef-5e901d75d889" containerID="1f15ea09d95add8e3ec9a347353f72d9f4e1b86913da909c37a877b3ce297048" exitCode=0 Dec 11 08:42:22 crc kubenswrapper[4881]: I1211 08:42:22.663962 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-s5tsr" event={"ID":"6b6f1a51-4774-4356-a5ef-5e901d75d889","Type":"ContainerDied","Data":"1f15ea09d95add8e3ec9a347353f72d9f4e1b86913da909c37a877b3ce297048"} Dec 11 08:42:22 crc kubenswrapper[4881]: I1211 08:42:22.669203 4881 generic.go:334] "Generic (PLEG): container finished" podID="c9bdc0d4-7342-4b61-8acc-c2ac4c62653b" containerID="4cef02557414b487e8e8b0f835864abd3654785c1a571d517144633bc977824c" exitCode=137 Dec 11 08:42:22 crc kubenswrapper[4881]: I1211 08:42:22.669250 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"c9bdc0d4-7342-4b61-8acc-c2ac4c62653b","Type":"ContainerDied","Data":"4cef02557414b487e8e8b0f835864abd3654785c1a571d517144633bc977824c"} Dec 11 08:42:22 crc kubenswrapper[4881]: I1211 08:42:22.905132 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-0" Dec 11 08:42:22 crc kubenswrapper[4881]: I1211 08:42:22.997220 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c9bdc0d4-7342-4b61-8acc-c2ac4c62653b-config-data\") pod \"c9bdc0d4-7342-4b61-8acc-c2ac4c62653b\" (UID: \"c9bdc0d4-7342-4b61-8acc-c2ac4c62653b\") " Dec 11 08:42:22 crc kubenswrapper[4881]: I1211 08:42:22.997416 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c9bdc0d4-7342-4b61-8acc-c2ac4c62653b-combined-ca-bundle\") pod \"c9bdc0d4-7342-4b61-8acc-c2ac4c62653b\" (UID: \"c9bdc0d4-7342-4b61-8acc-c2ac4c62653b\") " Dec 11 08:42:22 crc kubenswrapper[4881]: I1211 08:42:22.997658 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vvszs\" (UniqueName: \"kubernetes.io/projected/c9bdc0d4-7342-4b61-8acc-c2ac4c62653b-kube-api-access-vvszs\") pod \"c9bdc0d4-7342-4b61-8acc-c2ac4c62653b\" (UID: \"c9bdc0d4-7342-4b61-8acc-c2ac4c62653b\") " Dec 11 08:42:22 crc kubenswrapper[4881]: I1211 08:42:22.997937 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c9bdc0d4-7342-4b61-8acc-c2ac4c62653b-scripts\") pod \"c9bdc0d4-7342-4b61-8acc-c2ac4c62653b\" (UID: \"c9bdc0d4-7342-4b61-8acc-c2ac4c62653b\") " Dec 11 08:42:23 crc kubenswrapper[4881]: I1211 08:42:23.005715 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c9bdc0d4-7342-4b61-8acc-c2ac4c62653b-scripts" (OuterVolumeSpecName: "scripts") pod "c9bdc0d4-7342-4b61-8acc-c2ac4c62653b" (UID: "c9bdc0d4-7342-4b61-8acc-c2ac4c62653b"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:42:23 crc kubenswrapper[4881]: I1211 08:42:23.043704 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c9bdc0d4-7342-4b61-8acc-c2ac4c62653b-kube-api-access-vvszs" (OuterVolumeSpecName: "kube-api-access-vvszs") pod "c9bdc0d4-7342-4b61-8acc-c2ac4c62653b" (UID: "c9bdc0d4-7342-4b61-8acc-c2ac4c62653b"). InnerVolumeSpecName "kube-api-access-vvszs". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:42:23 crc kubenswrapper[4881]: I1211 08:42:23.131723 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vvszs\" (UniqueName: \"kubernetes.io/projected/c9bdc0d4-7342-4b61-8acc-c2ac4c62653b-kube-api-access-vvszs\") on node \"crc\" DevicePath \"\"" Dec 11 08:42:23 crc kubenswrapper[4881]: I1211 08:42:23.139454 4881 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c9bdc0d4-7342-4b61-8acc-c2ac4c62653b-scripts\") on node \"crc\" DevicePath \"\"" Dec 11 08:42:23 crc kubenswrapper[4881]: I1211 08:42:23.267690 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c9bdc0d4-7342-4b61-8acc-c2ac4c62653b-config-data" (OuterVolumeSpecName: "config-data") pod "c9bdc0d4-7342-4b61-8acc-c2ac4c62653b" (UID: "c9bdc0d4-7342-4b61-8acc-c2ac4c62653b"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:42:23 crc kubenswrapper[4881]: I1211 08:42:23.278383 4881 scope.go:117] "RemoveContainer" containerID="ee58a97bf15ede8164bc20dc2102b2cd3854ce0929b2c2db6b2892101e2c841b" Dec 11 08:42:23 crc kubenswrapper[4881]: I1211 08:42:23.351358 4881 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c9bdc0d4-7342-4b61-8acc-c2ac4c62653b-config-data\") on node \"crc\" DevicePath \"\"" Dec 11 08:42:23 crc kubenswrapper[4881]: I1211 08:42:23.369862 4881 scope.go:117] "RemoveContainer" containerID="300c9cc21d7c99e88f619059b20812e4d22519f3a2fa93be568aaa8a35c666db" Dec 11 08:42:23 crc kubenswrapper[4881]: I1211 08:42:23.377351 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c9bdc0d4-7342-4b61-8acc-c2ac4c62653b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c9bdc0d4-7342-4b61-8acc-c2ac4c62653b" (UID: "c9bdc0d4-7342-4b61-8acc-c2ac4c62653b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:42:23 crc kubenswrapper[4881]: I1211 08:42:23.402559 4881 scope.go:117] "RemoveContainer" containerID="d2e7f857235b2bcb1ad66c87925fed80c9ecd182c5a79c243a4af95cd5333196" Dec 11 08:42:23 crc kubenswrapper[4881]: I1211 08:42:23.452903 4881 scope.go:117] "RemoveContainer" containerID="afba34d0541f3a617bbd22315ff9717b831464d8a4ba0f8b8017969828cf95d7" Dec 11 08:42:23 crc kubenswrapper[4881]: I1211 08:42:23.453505 4881 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c9bdc0d4-7342-4b61-8acc-c2ac4c62653b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 11 08:42:23 crc kubenswrapper[4881]: I1211 08:42:23.715028 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-0" Dec 11 08:42:23 crc kubenswrapper[4881]: I1211 08:42:23.715913 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"c9bdc0d4-7342-4b61-8acc-c2ac4c62653b","Type":"ContainerDied","Data":"6d5651a7391d686acf9f7a1e064a27a444fa9404cbfcc8c73b35a82fac196ed0"} Dec 11 08:42:23 crc kubenswrapper[4881]: I1211 08:42:23.715994 4881 scope.go:117] "RemoveContainer" containerID="4cef02557414b487e8e8b0f835864abd3654785c1a571d517144633bc977824c" Dec 11 08:42:23 crc kubenswrapper[4881]: I1211 08:42:23.820779 4881 scope.go:117] "RemoveContainer" containerID="bc909f046c8674c0430d07c127bd01edb69853e0e426736024aa90df3e3ef25f" Dec 11 08:42:23 crc kubenswrapper[4881]: I1211 08:42:23.821129 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/aodh-0"] Dec 11 08:42:23 crc kubenswrapper[4881]: I1211 08:42:23.862544 4881 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/aodh-0"] Dec 11 08:42:23 crc kubenswrapper[4881]: I1211 08:42:23.877541 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/aodh-0"] Dec 11 08:42:23 crc kubenswrapper[4881]: E1211 08:42:23.878135 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c9bdc0d4-7342-4b61-8acc-c2ac4c62653b" containerName="aodh-evaluator" Dec 11 08:42:23 crc kubenswrapper[4881]: I1211 08:42:23.878157 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="c9bdc0d4-7342-4b61-8acc-c2ac4c62653b" containerName="aodh-evaluator" Dec 11 08:42:23 crc kubenswrapper[4881]: E1211 08:42:23.878179 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c9bdc0d4-7342-4b61-8acc-c2ac4c62653b" containerName="aodh-api" Dec 11 08:42:23 crc kubenswrapper[4881]: I1211 08:42:23.878188 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="c9bdc0d4-7342-4b61-8acc-c2ac4c62653b" containerName="aodh-api" Dec 11 08:42:23 crc kubenswrapper[4881]: E1211 08:42:23.878227 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c9bdc0d4-7342-4b61-8acc-c2ac4c62653b" containerName="aodh-notifier" Dec 11 08:42:23 crc kubenswrapper[4881]: I1211 08:42:23.878237 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="c9bdc0d4-7342-4b61-8acc-c2ac4c62653b" containerName="aodh-notifier" Dec 11 08:42:23 crc kubenswrapper[4881]: E1211 08:42:23.878261 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c9bdc0d4-7342-4b61-8acc-c2ac4c62653b" containerName="aodh-listener" Dec 11 08:42:23 crc kubenswrapper[4881]: I1211 08:42:23.878269 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="c9bdc0d4-7342-4b61-8acc-c2ac4c62653b" containerName="aodh-listener" Dec 11 08:42:23 crc kubenswrapper[4881]: I1211 08:42:23.878603 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="c9bdc0d4-7342-4b61-8acc-c2ac4c62653b" containerName="aodh-notifier" Dec 11 08:42:23 crc kubenswrapper[4881]: I1211 08:42:23.878628 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="c9bdc0d4-7342-4b61-8acc-c2ac4c62653b" containerName="aodh-api" Dec 11 08:42:23 crc kubenswrapper[4881]: I1211 08:42:23.878660 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="c9bdc0d4-7342-4b61-8acc-c2ac4c62653b" containerName="aodh-listener" Dec 11 08:42:23 crc kubenswrapper[4881]: I1211 08:42:23.878680 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="c9bdc0d4-7342-4b61-8acc-c2ac4c62653b" containerName="aodh-evaluator" Dec 11 08:42:23 crc kubenswrapper[4881]: I1211 08:42:23.883081 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-0" Dec 11 08:42:23 crc kubenswrapper[4881]: I1211 08:42:23.887869 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-scripts" Dec 11 08:42:23 crc kubenswrapper[4881]: I1211 08:42:23.887937 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-aodh-public-svc" Dec 11 08:42:23 crc kubenswrapper[4881]: I1211 08:42:23.888353 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-config-data" Dec 11 08:42:23 crc kubenswrapper[4881]: I1211 08:42:23.888615 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"telemetry-autoscaling-dockercfg-gqtm2" Dec 11 08:42:23 crc kubenswrapper[4881]: I1211 08:42:23.888917 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-aodh-internal-svc" Dec 11 08:42:23 crc kubenswrapper[4881]: I1211 08:42:23.893245 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-0"] Dec 11 08:42:23 crc kubenswrapper[4881]: I1211 08:42:23.935361 4881 scope.go:117] "RemoveContainer" containerID="db3fbd048954f392125c26aaf6ca4615c6c6873297d0c73c18e6d3259a2ea1ed" Dec 11 08:42:23 crc kubenswrapper[4881]: I1211 08:42:23.985645 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/338db346-be5c-4382-9404-29d345bba595-internal-tls-certs\") pod \"aodh-0\" (UID: \"338db346-be5c-4382-9404-29d345bba595\") " pod="openstack/aodh-0" Dec 11 08:42:23 crc kubenswrapper[4881]: I1211 08:42:23.986023 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/338db346-be5c-4382-9404-29d345bba595-scripts\") pod \"aodh-0\" (UID: \"338db346-be5c-4382-9404-29d345bba595\") " pod="openstack/aodh-0" Dec 11 08:42:23 crc kubenswrapper[4881]: I1211 08:42:23.986502 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fcr7x\" (UniqueName: \"kubernetes.io/projected/338db346-be5c-4382-9404-29d345bba595-kube-api-access-fcr7x\") pod \"aodh-0\" (UID: \"338db346-be5c-4382-9404-29d345bba595\") " pod="openstack/aodh-0" Dec 11 08:42:23 crc kubenswrapper[4881]: I1211 08:42:23.987269 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/338db346-be5c-4382-9404-29d345bba595-config-data\") pod \"aodh-0\" (UID: \"338db346-be5c-4382-9404-29d345bba595\") " pod="openstack/aodh-0" Dec 11 08:42:23 crc kubenswrapper[4881]: I1211 08:42:23.987620 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/338db346-be5c-4382-9404-29d345bba595-public-tls-certs\") pod \"aodh-0\" (UID: \"338db346-be5c-4382-9404-29d345bba595\") " pod="openstack/aodh-0" Dec 11 08:42:23 crc kubenswrapper[4881]: I1211 08:42:23.987725 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/338db346-be5c-4382-9404-29d345bba595-combined-ca-bundle\") pod \"aodh-0\" (UID: \"338db346-be5c-4382-9404-29d345bba595\") " pod="openstack/aodh-0" Dec 11 08:42:24 crc kubenswrapper[4881]: I1211 08:42:24.040701 4881 scope.go:117] "RemoveContainer" containerID="30bcc67180507330b5527285e37e71256de00d00111147702b2ccdb179c0d95e" Dec 11 08:42:24 crc kubenswrapper[4881]: I1211 08:42:24.092832 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/338db346-be5c-4382-9404-29d345bba595-scripts\") pod \"aodh-0\" (UID: \"338db346-be5c-4382-9404-29d345bba595\") " pod="openstack/aodh-0" Dec 11 08:42:24 crc kubenswrapper[4881]: I1211 08:42:24.094245 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fcr7x\" (UniqueName: \"kubernetes.io/projected/338db346-be5c-4382-9404-29d345bba595-kube-api-access-fcr7x\") pod \"aodh-0\" (UID: \"338db346-be5c-4382-9404-29d345bba595\") " pod="openstack/aodh-0" Dec 11 08:42:24 crc kubenswrapper[4881]: I1211 08:42:24.094374 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/338db346-be5c-4382-9404-29d345bba595-config-data\") pod \"aodh-0\" (UID: \"338db346-be5c-4382-9404-29d345bba595\") " pod="openstack/aodh-0" Dec 11 08:42:24 crc kubenswrapper[4881]: I1211 08:42:24.094524 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/338db346-be5c-4382-9404-29d345bba595-public-tls-certs\") pod \"aodh-0\" (UID: \"338db346-be5c-4382-9404-29d345bba595\") " pod="openstack/aodh-0" Dec 11 08:42:24 crc kubenswrapper[4881]: I1211 08:42:24.094807 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/338db346-be5c-4382-9404-29d345bba595-combined-ca-bundle\") pod \"aodh-0\" (UID: \"338db346-be5c-4382-9404-29d345bba595\") " pod="openstack/aodh-0" Dec 11 08:42:24 crc kubenswrapper[4881]: I1211 08:42:24.094981 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/338db346-be5c-4382-9404-29d345bba595-internal-tls-certs\") pod \"aodh-0\" (UID: \"338db346-be5c-4382-9404-29d345bba595\") " pod="openstack/aodh-0" Dec 11 08:42:24 crc kubenswrapper[4881]: I1211 08:42:24.102366 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/338db346-be5c-4382-9404-29d345bba595-internal-tls-certs\") pod \"aodh-0\" (UID: \"338db346-be5c-4382-9404-29d345bba595\") " pod="openstack/aodh-0" Dec 11 08:42:24 crc kubenswrapper[4881]: I1211 08:42:24.104363 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/338db346-be5c-4382-9404-29d345bba595-public-tls-certs\") pod \"aodh-0\" (UID: \"338db346-be5c-4382-9404-29d345bba595\") " pod="openstack/aodh-0" Dec 11 08:42:24 crc kubenswrapper[4881]: I1211 08:42:24.107853 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/338db346-be5c-4382-9404-29d345bba595-scripts\") pod \"aodh-0\" (UID: \"338db346-be5c-4382-9404-29d345bba595\") " pod="openstack/aodh-0" Dec 11 08:42:24 crc kubenswrapper[4881]: I1211 08:42:24.110080 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/338db346-be5c-4382-9404-29d345bba595-combined-ca-bundle\") pod \"aodh-0\" (UID: \"338db346-be5c-4382-9404-29d345bba595\") " pod="openstack/aodh-0" Dec 11 08:42:24 crc kubenswrapper[4881]: I1211 08:42:24.111996 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/338db346-be5c-4382-9404-29d345bba595-config-data\") pod \"aodh-0\" (UID: \"338db346-be5c-4382-9404-29d345bba595\") " pod="openstack/aodh-0" Dec 11 08:42:24 crc kubenswrapper[4881]: I1211 08:42:24.136182 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fcr7x\" (UniqueName: \"kubernetes.io/projected/338db346-be5c-4382-9404-29d345bba595-kube-api-access-fcr7x\") pod \"aodh-0\" (UID: \"338db346-be5c-4382-9404-29d345bba595\") " pod="openstack/aodh-0" Dec 11 08:42:24 crc kubenswrapper[4881]: I1211 08:42:24.253552 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-0" Dec 11 08:42:24 crc kubenswrapper[4881]: I1211 08:42:24.448411 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-s5tsr" Dec 11 08:42:24 crc kubenswrapper[4881]: I1211 08:42:24.526104 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6b6f1a51-4774-4356-a5ef-5e901d75d889-config-data\") pod \"6b6f1a51-4774-4356-a5ef-5e901d75d889\" (UID: \"6b6f1a51-4774-4356-a5ef-5e901d75d889\") " Dec 11 08:42:24 crc kubenswrapper[4881]: I1211 08:42:24.526622 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6b6f1a51-4774-4356-a5ef-5e901d75d889-scripts\") pod \"6b6f1a51-4774-4356-a5ef-5e901d75d889\" (UID: \"6b6f1a51-4774-4356-a5ef-5e901d75d889\") " Dec 11 08:42:24 crc kubenswrapper[4881]: I1211 08:42:24.527269 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zljff\" (UniqueName: \"kubernetes.io/projected/6b6f1a51-4774-4356-a5ef-5e901d75d889-kube-api-access-zljff\") pod \"6b6f1a51-4774-4356-a5ef-5e901d75d889\" (UID: \"6b6f1a51-4774-4356-a5ef-5e901d75d889\") " Dec 11 08:42:24 crc kubenswrapper[4881]: I1211 08:42:24.527621 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6b6f1a51-4774-4356-a5ef-5e901d75d889-combined-ca-bundle\") pod \"6b6f1a51-4774-4356-a5ef-5e901d75d889\" (UID: \"6b6f1a51-4774-4356-a5ef-5e901d75d889\") " Dec 11 08:42:24 crc kubenswrapper[4881]: I1211 08:42:24.538750 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6b6f1a51-4774-4356-a5ef-5e901d75d889-scripts" (OuterVolumeSpecName: "scripts") pod "6b6f1a51-4774-4356-a5ef-5e901d75d889" (UID: "6b6f1a51-4774-4356-a5ef-5e901d75d889"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:42:24 crc kubenswrapper[4881]: I1211 08:42:24.538762 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6b6f1a51-4774-4356-a5ef-5e901d75d889-kube-api-access-zljff" (OuterVolumeSpecName: "kube-api-access-zljff") pod "6b6f1a51-4774-4356-a5ef-5e901d75d889" (UID: "6b6f1a51-4774-4356-a5ef-5e901d75d889"). InnerVolumeSpecName "kube-api-access-zljff". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:42:24 crc kubenswrapper[4881]: I1211 08:42:24.577558 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6b6f1a51-4774-4356-a5ef-5e901d75d889-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "6b6f1a51-4774-4356-a5ef-5e901d75d889" (UID: "6b6f1a51-4774-4356-a5ef-5e901d75d889"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:42:24 crc kubenswrapper[4881]: I1211 08:42:24.588158 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6b6f1a51-4774-4356-a5ef-5e901d75d889-config-data" (OuterVolumeSpecName: "config-data") pod "6b6f1a51-4774-4356-a5ef-5e901d75d889" (UID: "6b6f1a51-4774-4356-a5ef-5e901d75d889"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:42:24 crc kubenswrapper[4881]: I1211 08:42:24.631112 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zljff\" (UniqueName: \"kubernetes.io/projected/6b6f1a51-4774-4356-a5ef-5e901d75d889-kube-api-access-zljff\") on node \"crc\" DevicePath \"\"" Dec 11 08:42:24 crc kubenswrapper[4881]: I1211 08:42:24.631154 4881 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6b6f1a51-4774-4356-a5ef-5e901d75d889-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 11 08:42:24 crc kubenswrapper[4881]: I1211 08:42:24.631165 4881 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6b6f1a51-4774-4356-a5ef-5e901d75d889-config-data\") on node \"crc\" DevicePath \"\"" Dec 11 08:42:24 crc kubenswrapper[4881]: I1211 08:42:24.631173 4881 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6b6f1a51-4774-4356-a5ef-5e901d75d889-scripts\") on node \"crc\" DevicePath \"\"" Dec 11 08:42:24 crc kubenswrapper[4881]: I1211 08:42:24.801880 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-s5tsr" event={"ID":"6b6f1a51-4774-4356-a5ef-5e901d75d889","Type":"ContainerDied","Data":"d52a8ae85b689f1dee4e35cf151fbacd397be6c0089006e476d20063dcc83b86"} Dec 11 08:42:24 crc kubenswrapper[4881]: I1211 08:42:24.801938 4881 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d52a8ae85b689f1dee4e35cf151fbacd397be6c0089006e476d20063dcc83b86" Dec 11 08:42:24 crc kubenswrapper[4881]: I1211 08:42:24.802057 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-s5tsr" Dec 11 08:42:24 crc kubenswrapper[4881]: W1211 08:42:24.894768 4881 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod338db346_be5c_4382_9404_29d345bba595.slice/crio-32fdb8a9454acf728711e177e91072eb325ebac82c3a589640ac0a10111ce07e WatchSource:0}: Error finding container 32fdb8a9454acf728711e177e91072eb325ebac82c3a589640ac0a10111ce07e: Status 404 returned error can't find the container with id 32fdb8a9454acf728711e177e91072eb325ebac82c3a589640ac0a10111ce07e Dec 11 08:42:24 crc kubenswrapper[4881]: I1211 08:42:24.907593 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-0"] Dec 11 08:42:24 crc kubenswrapper[4881]: I1211 08:42:24.954410 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Dec 11 08:42:24 crc kubenswrapper[4881]: I1211 08:42:24.954767 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="41b377bb-0ea7-45df-be96-b7f03c9ff994" containerName="nova-scheduler-scheduler" containerID="cri-o://e6dabacf3bbbc2a7fb8d5aaae341f17129a2707c1d2c810e62a19bc09c0885ef" gracePeriod=30 Dec 11 08:42:24 crc kubenswrapper[4881]: I1211 08:42:24.990297 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Dec 11 08:42:24 crc kubenswrapper[4881]: I1211 08:42:24.991040 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="ae7bac1b-08d2-4ab8-b7b1-d5f0152f844c" containerName="nova-api-log" containerID="cri-o://aa2a280ed96678db5b04f886b58913f782e34a6e7df60ea6b4bf7128dc6a541e" gracePeriod=30 Dec 11 08:42:24 crc kubenswrapper[4881]: I1211 08:42:24.991264 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="ae7bac1b-08d2-4ab8-b7b1-d5f0152f844c" containerName="nova-api-api" containerID="cri-o://0fc4f0176e6edc37e8de2b9bcea2037788771d7b02c16417cc0b34e73bb9a3da" gracePeriod=30 Dec 11 08:42:25 crc kubenswrapper[4881]: E1211 08:42:25.001769 4881 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="e6dabacf3bbbc2a7fb8d5aaae341f17129a2707c1d2c810e62a19bc09c0885ef" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Dec 11 08:42:25 crc kubenswrapper[4881]: E1211 08:42:25.010747 4881 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="e6dabacf3bbbc2a7fb8d5aaae341f17129a2707c1d2c810e62a19bc09c0885ef" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Dec 11 08:42:25 crc kubenswrapper[4881]: E1211 08:42:25.018139 4881 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="e6dabacf3bbbc2a7fb8d5aaae341f17129a2707c1d2c810e62a19bc09c0885ef" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Dec 11 08:42:25 crc kubenswrapper[4881]: E1211 08:42:25.018312 4881 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-scheduler-0" podUID="41b377bb-0ea7-45df-be96-b7f03c9ff994" containerName="nova-scheduler-scheduler" Dec 11 08:42:25 crc kubenswrapper[4881]: I1211 08:42:25.035154 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c9bdc0d4-7342-4b61-8acc-c2ac4c62653b" path="/var/lib/kubelet/pods/c9bdc0d4-7342-4b61-8acc-c2ac4c62653b/volumes" Dec 11 08:42:25 crc kubenswrapper[4881]: I1211 08:42:25.036655 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Dec 11 08:42:25 crc kubenswrapper[4881]: I1211 08:42:25.037024 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="7a5a5663-d97f-47f5-b4a8-4c18ae3bd844" containerName="nova-metadata-log" containerID="cri-o://2184c385a971e223e031a3584efedad7092a43a0579a4ad57e97c461525c2d00" gracePeriod=30 Dec 11 08:42:25 crc kubenswrapper[4881]: I1211 08:42:25.037205 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="7a5a5663-d97f-47f5-b4a8-4c18ae3bd844" containerName="nova-metadata-metadata" containerID="cri-o://67eb97c9052c95582305c04ef8237c3a7a5fcbe153b6943ea5671bf72a85fa9b" gracePeriod=30 Dec 11 08:42:25 crc kubenswrapper[4881]: I1211 08:42:25.819086 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"338db346-be5c-4382-9404-29d345bba595","Type":"ContainerStarted","Data":"32fdb8a9454acf728711e177e91072eb325ebac82c3a589640ac0a10111ce07e"} Dec 11 08:42:25 crc kubenswrapper[4881]: I1211 08:42:25.826803 4881 generic.go:334] "Generic (PLEG): container finished" podID="ae7bac1b-08d2-4ab8-b7b1-d5f0152f844c" containerID="aa2a280ed96678db5b04f886b58913f782e34a6e7df60ea6b4bf7128dc6a541e" exitCode=143 Dec 11 08:42:25 crc kubenswrapper[4881]: I1211 08:42:25.826890 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"ae7bac1b-08d2-4ab8-b7b1-d5f0152f844c","Type":"ContainerDied","Data":"aa2a280ed96678db5b04f886b58913f782e34a6e7df60ea6b4bf7128dc6a541e"} Dec 11 08:42:25 crc kubenswrapper[4881]: I1211 08:42:25.850129 4881 generic.go:334] "Generic (PLEG): container finished" podID="7a5a5663-d97f-47f5-b4a8-4c18ae3bd844" containerID="2184c385a971e223e031a3584efedad7092a43a0579a4ad57e97c461525c2d00" exitCode=143 Dec 11 08:42:25 crc kubenswrapper[4881]: I1211 08:42:25.850173 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"7a5a5663-d97f-47f5-b4a8-4c18ae3bd844","Type":"ContainerDied","Data":"2184c385a971e223e031a3584efedad7092a43a0579a4ad57e97c461525c2d00"} Dec 11 08:42:26 crc kubenswrapper[4881]: I1211 08:42:26.863108 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"338db346-be5c-4382-9404-29d345bba595","Type":"ContainerStarted","Data":"51b95edd797ca74ba6373144cec82c5dfe9a83a2ad44219685a75b07beea5471"} Dec 11 08:42:27 crc kubenswrapper[4881]: I1211 08:42:27.874848 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"338db346-be5c-4382-9404-29d345bba595","Type":"ContainerStarted","Data":"92821f7a096c136ffc7e64eec1c3ac32209131960744bad0dc1260ba16083605"} Dec 11 08:42:28 crc kubenswrapper[4881]: I1211 08:42:28.898039 4881 generic.go:334] "Generic (PLEG): container finished" podID="ae7bac1b-08d2-4ab8-b7b1-d5f0152f844c" containerID="0fc4f0176e6edc37e8de2b9bcea2037788771d7b02c16417cc0b34e73bb9a3da" exitCode=0 Dec 11 08:42:28 crc kubenswrapper[4881]: I1211 08:42:28.898129 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"ae7bac1b-08d2-4ab8-b7b1-d5f0152f844c","Type":"ContainerDied","Data":"0fc4f0176e6edc37e8de2b9bcea2037788771d7b02c16417cc0b34e73bb9a3da"} Dec 11 08:42:28 crc kubenswrapper[4881]: I1211 08:42:28.901959 4881 generic.go:334] "Generic (PLEG): container finished" podID="7a5a5663-d97f-47f5-b4a8-4c18ae3bd844" containerID="67eb97c9052c95582305c04ef8237c3a7a5fcbe153b6943ea5671bf72a85fa9b" exitCode=0 Dec 11 08:42:28 crc kubenswrapper[4881]: I1211 08:42:28.902055 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"7a5a5663-d97f-47f5-b4a8-4c18ae3bd844","Type":"ContainerDied","Data":"67eb97c9052c95582305c04ef8237c3a7a5fcbe153b6943ea5671bf72a85fa9b"} Dec 11 08:42:28 crc kubenswrapper[4881]: I1211 08:42:28.912984 4881 generic.go:334] "Generic (PLEG): container finished" podID="41b377bb-0ea7-45df-be96-b7f03c9ff994" containerID="e6dabacf3bbbc2a7fb8d5aaae341f17129a2707c1d2c810e62a19bc09c0885ef" exitCode=0 Dec 11 08:42:28 crc kubenswrapper[4881]: I1211 08:42:28.913183 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"41b377bb-0ea7-45df-be96-b7f03c9ff994","Type":"ContainerDied","Data":"e6dabacf3bbbc2a7fb8d5aaae341f17129a2707c1d2c810e62a19bc09c0885ef"} Dec 11 08:42:28 crc kubenswrapper[4881]: I1211 08:42:28.922285 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"338db346-be5c-4382-9404-29d345bba595","Type":"ContainerStarted","Data":"a6691685fe3ec19d9f53dbfb0791524d93e4a8c82f6e4fee143db74ec34ef4e9"} Dec 11 08:42:28 crc kubenswrapper[4881]: I1211 08:42:28.930046 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Dec 11 08:42:28 crc kubenswrapper[4881]: I1211 08:42:28.959151 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7a5a5663-d97f-47f5-b4a8-4c18ae3bd844-logs\") pod \"7a5a5663-d97f-47f5-b4a8-4c18ae3bd844\" (UID: \"7a5a5663-d97f-47f5-b4a8-4c18ae3bd844\") " Dec 11 08:42:28 crc kubenswrapper[4881]: I1211 08:42:28.959308 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7a5a5663-d97f-47f5-b4a8-4c18ae3bd844-config-data\") pod \"7a5a5663-d97f-47f5-b4a8-4c18ae3bd844\" (UID: \"7a5a5663-d97f-47f5-b4a8-4c18ae3bd844\") " Dec 11 08:42:28 crc kubenswrapper[4881]: I1211 08:42:28.959721 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-c2gzs\" (UniqueName: \"kubernetes.io/projected/7a5a5663-d97f-47f5-b4a8-4c18ae3bd844-kube-api-access-c2gzs\") pod \"7a5a5663-d97f-47f5-b4a8-4c18ae3bd844\" (UID: \"7a5a5663-d97f-47f5-b4a8-4c18ae3bd844\") " Dec 11 08:42:28 crc kubenswrapper[4881]: I1211 08:42:28.959805 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/7a5a5663-d97f-47f5-b4a8-4c18ae3bd844-nova-metadata-tls-certs\") pod \"7a5a5663-d97f-47f5-b4a8-4c18ae3bd844\" (UID: \"7a5a5663-d97f-47f5-b4a8-4c18ae3bd844\") " Dec 11 08:42:28 crc kubenswrapper[4881]: I1211 08:42:28.959874 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7a5a5663-d97f-47f5-b4a8-4c18ae3bd844-combined-ca-bundle\") pod \"7a5a5663-d97f-47f5-b4a8-4c18ae3bd844\" (UID: \"7a5a5663-d97f-47f5-b4a8-4c18ae3bd844\") " Dec 11 08:42:28 crc kubenswrapper[4881]: I1211 08:42:28.961028 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7a5a5663-d97f-47f5-b4a8-4c18ae3bd844-logs" (OuterVolumeSpecName: "logs") pod "7a5a5663-d97f-47f5-b4a8-4c18ae3bd844" (UID: "7a5a5663-d97f-47f5-b4a8-4c18ae3bd844"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 08:42:28 crc kubenswrapper[4881]: I1211 08:42:28.963099 4881 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7a5a5663-d97f-47f5-b4a8-4c18ae3bd844-logs\") on node \"crc\" DevicePath \"\"" Dec 11 08:42:28 crc kubenswrapper[4881]: I1211 08:42:28.972753 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7a5a5663-d97f-47f5-b4a8-4c18ae3bd844-kube-api-access-c2gzs" (OuterVolumeSpecName: "kube-api-access-c2gzs") pod "7a5a5663-d97f-47f5-b4a8-4c18ae3bd844" (UID: "7a5a5663-d97f-47f5-b4a8-4c18ae3bd844"). InnerVolumeSpecName "kube-api-access-c2gzs". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:42:29 crc kubenswrapper[4881]: I1211 08:42:29.013081 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7a5a5663-d97f-47f5-b4a8-4c18ae3bd844-config-data" (OuterVolumeSpecName: "config-data") pod "7a5a5663-d97f-47f5-b4a8-4c18ae3bd844" (UID: "7a5a5663-d97f-47f5-b4a8-4c18ae3bd844"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:42:29 crc kubenswrapper[4881]: I1211 08:42:29.032663 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7a5a5663-d97f-47f5-b4a8-4c18ae3bd844-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "7a5a5663-d97f-47f5-b4a8-4c18ae3bd844" (UID: "7a5a5663-d97f-47f5-b4a8-4c18ae3bd844"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:42:29 crc kubenswrapper[4881]: I1211 08:42:29.055684 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7a5a5663-d97f-47f5-b4a8-4c18ae3bd844-nova-metadata-tls-certs" (OuterVolumeSpecName: "nova-metadata-tls-certs") pod "7a5a5663-d97f-47f5-b4a8-4c18ae3bd844" (UID: "7a5a5663-d97f-47f5-b4a8-4c18ae3bd844"). InnerVolumeSpecName "nova-metadata-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:42:29 crc kubenswrapper[4881]: I1211 08:42:29.065462 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-c2gzs\" (UniqueName: \"kubernetes.io/projected/7a5a5663-d97f-47f5-b4a8-4c18ae3bd844-kube-api-access-c2gzs\") on node \"crc\" DevicePath \"\"" Dec 11 08:42:29 crc kubenswrapper[4881]: I1211 08:42:29.065497 4881 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/7a5a5663-d97f-47f5-b4a8-4c18ae3bd844-nova-metadata-tls-certs\") on node \"crc\" DevicePath \"\"" Dec 11 08:42:29 crc kubenswrapper[4881]: I1211 08:42:29.065507 4881 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7a5a5663-d97f-47f5-b4a8-4c18ae3bd844-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 11 08:42:29 crc kubenswrapper[4881]: I1211 08:42:29.065518 4881 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7a5a5663-d97f-47f5-b4a8-4c18ae3bd844-config-data\") on node \"crc\" DevicePath \"\"" Dec 11 08:42:29 crc kubenswrapper[4881]: I1211 08:42:29.417418 4881 patch_prober.go:28] interesting pod/machine-config-daemon-z9nnh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 11 08:42:29 crc kubenswrapper[4881]: I1211 08:42:29.417784 4881 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 11 08:42:29 crc kubenswrapper[4881]: I1211 08:42:29.547049 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Dec 11 08:42:29 crc kubenswrapper[4881]: I1211 08:42:29.598945 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ae7bac1b-08d2-4ab8-b7b1-d5f0152f844c-config-data\") pod \"ae7bac1b-08d2-4ab8-b7b1-d5f0152f844c\" (UID: \"ae7bac1b-08d2-4ab8-b7b1-d5f0152f844c\") " Dec 11 08:42:29 crc kubenswrapper[4881]: I1211 08:42:29.598999 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/ae7bac1b-08d2-4ab8-b7b1-d5f0152f844c-internal-tls-certs\") pod \"ae7bac1b-08d2-4ab8-b7b1-d5f0152f844c\" (UID: \"ae7bac1b-08d2-4ab8-b7b1-d5f0152f844c\") " Dec 11 08:42:29 crc kubenswrapper[4881]: I1211 08:42:29.599108 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ae7bac1b-08d2-4ab8-b7b1-d5f0152f844c-logs\") pod \"ae7bac1b-08d2-4ab8-b7b1-d5f0152f844c\" (UID: \"ae7bac1b-08d2-4ab8-b7b1-d5f0152f844c\") " Dec 11 08:42:29 crc kubenswrapper[4881]: I1211 08:42:29.599222 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mmh96\" (UniqueName: \"kubernetes.io/projected/ae7bac1b-08d2-4ab8-b7b1-d5f0152f844c-kube-api-access-mmh96\") pod \"ae7bac1b-08d2-4ab8-b7b1-d5f0152f844c\" (UID: \"ae7bac1b-08d2-4ab8-b7b1-d5f0152f844c\") " Dec 11 08:42:29 crc kubenswrapper[4881]: I1211 08:42:29.599488 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ae7bac1b-08d2-4ab8-b7b1-d5f0152f844c-combined-ca-bundle\") pod \"ae7bac1b-08d2-4ab8-b7b1-d5f0152f844c\" (UID: \"ae7bac1b-08d2-4ab8-b7b1-d5f0152f844c\") " Dec 11 08:42:29 crc kubenswrapper[4881]: I1211 08:42:29.599534 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/ae7bac1b-08d2-4ab8-b7b1-d5f0152f844c-public-tls-certs\") pod \"ae7bac1b-08d2-4ab8-b7b1-d5f0152f844c\" (UID: \"ae7bac1b-08d2-4ab8-b7b1-d5f0152f844c\") " Dec 11 08:42:29 crc kubenswrapper[4881]: I1211 08:42:29.606046 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ae7bac1b-08d2-4ab8-b7b1-d5f0152f844c-logs" (OuterVolumeSpecName: "logs") pod "ae7bac1b-08d2-4ab8-b7b1-d5f0152f844c" (UID: "ae7bac1b-08d2-4ab8-b7b1-d5f0152f844c"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 08:42:29 crc kubenswrapper[4881]: I1211 08:42:29.616551 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ae7bac1b-08d2-4ab8-b7b1-d5f0152f844c-kube-api-access-mmh96" (OuterVolumeSpecName: "kube-api-access-mmh96") pod "ae7bac1b-08d2-4ab8-b7b1-d5f0152f844c" (UID: "ae7bac1b-08d2-4ab8-b7b1-d5f0152f844c"). InnerVolumeSpecName "kube-api-access-mmh96". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:42:29 crc kubenswrapper[4881]: I1211 08:42:29.638832 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ae7bac1b-08d2-4ab8-b7b1-d5f0152f844c-config-data" (OuterVolumeSpecName: "config-data") pod "ae7bac1b-08d2-4ab8-b7b1-d5f0152f844c" (UID: "ae7bac1b-08d2-4ab8-b7b1-d5f0152f844c"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:42:29 crc kubenswrapper[4881]: I1211 08:42:29.661687 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ae7bac1b-08d2-4ab8-b7b1-d5f0152f844c-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ae7bac1b-08d2-4ab8-b7b1-d5f0152f844c" (UID: "ae7bac1b-08d2-4ab8-b7b1-d5f0152f844c"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:42:29 crc kubenswrapper[4881]: I1211 08:42:29.681707 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ae7bac1b-08d2-4ab8-b7b1-d5f0152f844c-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "ae7bac1b-08d2-4ab8-b7b1-d5f0152f844c" (UID: "ae7bac1b-08d2-4ab8-b7b1-d5f0152f844c"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:42:29 crc kubenswrapper[4881]: I1211 08:42:29.687244 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Dec 11 08:42:29 crc kubenswrapper[4881]: I1211 08:42:29.702453 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/41b377bb-0ea7-45df-be96-b7f03c9ff994-config-data\") pod \"41b377bb-0ea7-45df-be96-b7f03c9ff994\" (UID: \"41b377bb-0ea7-45df-be96-b7f03c9ff994\") " Dec 11 08:42:29 crc kubenswrapper[4881]: I1211 08:42:29.702582 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/41b377bb-0ea7-45df-be96-b7f03c9ff994-combined-ca-bundle\") pod \"41b377bb-0ea7-45df-be96-b7f03c9ff994\" (UID: \"41b377bb-0ea7-45df-be96-b7f03c9ff994\") " Dec 11 08:42:29 crc kubenswrapper[4881]: I1211 08:42:29.702751 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8w4vr\" (UniqueName: \"kubernetes.io/projected/41b377bb-0ea7-45df-be96-b7f03c9ff994-kube-api-access-8w4vr\") pod \"41b377bb-0ea7-45df-be96-b7f03c9ff994\" (UID: \"41b377bb-0ea7-45df-be96-b7f03c9ff994\") " Dec 11 08:42:29 crc kubenswrapper[4881]: I1211 08:42:29.703849 4881 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/ae7bac1b-08d2-4ab8-b7b1-d5f0152f844c-public-tls-certs\") on node \"crc\" DevicePath \"\"" Dec 11 08:42:29 crc kubenswrapper[4881]: I1211 08:42:29.703875 4881 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ae7bac1b-08d2-4ab8-b7b1-d5f0152f844c-config-data\") on node \"crc\" DevicePath \"\"" Dec 11 08:42:29 crc kubenswrapper[4881]: I1211 08:42:29.703890 4881 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ae7bac1b-08d2-4ab8-b7b1-d5f0152f844c-logs\") on node \"crc\" DevicePath \"\"" Dec 11 08:42:29 crc kubenswrapper[4881]: I1211 08:42:29.703903 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mmh96\" (UniqueName: \"kubernetes.io/projected/ae7bac1b-08d2-4ab8-b7b1-d5f0152f844c-kube-api-access-mmh96\") on node \"crc\" DevicePath \"\"" Dec 11 08:42:29 crc kubenswrapper[4881]: I1211 08:42:29.703920 4881 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ae7bac1b-08d2-4ab8-b7b1-d5f0152f844c-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 11 08:42:29 crc kubenswrapper[4881]: I1211 08:42:29.704147 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ae7bac1b-08d2-4ab8-b7b1-d5f0152f844c-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "ae7bac1b-08d2-4ab8-b7b1-d5f0152f844c" (UID: "ae7bac1b-08d2-4ab8-b7b1-d5f0152f844c"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:42:29 crc kubenswrapper[4881]: I1211 08:42:29.708676 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/41b377bb-0ea7-45df-be96-b7f03c9ff994-kube-api-access-8w4vr" (OuterVolumeSpecName: "kube-api-access-8w4vr") pod "41b377bb-0ea7-45df-be96-b7f03c9ff994" (UID: "41b377bb-0ea7-45df-be96-b7f03c9ff994"). InnerVolumeSpecName "kube-api-access-8w4vr". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:42:29 crc kubenswrapper[4881]: I1211 08:42:29.783583 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/41b377bb-0ea7-45df-be96-b7f03c9ff994-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "41b377bb-0ea7-45df-be96-b7f03c9ff994" (UID: "41b377bb-0ea7-45df-be96-b7f03c9ff994"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:42:29 crc kubenswrapper[4881]: I1211 08:42:29.799368 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/41b377bb-0ea7-45df-be96-b7f03c9ff994-config-data" (OuterVolumeSpecName: "config-data") pod "41b377bb-0ea7-45df-be96-b7f03c9ff994" (UID: "41b377bb-0ea7-45df-be96-b7f03c9ff994"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:42:29 crc kubenswrapper[4881]: I1211 08:42:29.808866 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8w4vr\" (UniqueName: \"kubernetes.io/projected/41b377bb-0ea7-45df-be96-b7f03c9ff994-kube-api-access-8w4vr\") on node \"crc\" DevicePath \"\"" Dec 11 08:42:29 crc kubenswrapper[4881]: I1211 08:42:29.808912 4881 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/41b377bb-0ea7-45df-be96-b7f03c9ff994-config-data\") on node \"crc\" DevicePath \"\"" Dec 11 08:42:29 crc kubenswrapper[4881]: I1211 08:42:29.808925 4881 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/41b377bb-0ea7-45df-be96-b7f03c9ff994-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 11 08:42:29 crc kubenswrapper[4881]: I1211 08:42:29.808937 4881 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/ae7bac1b-08d2-4ab8-b7b1-d5f0152f844c-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Dec 11 08:42:29 crc kubenswrapper[4881]: I1211 08:42:29.940042 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Dec 11 08:42:29 crc kubenswrapper[4881]: I1211 08:42:29.940038 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"ae7bac1b-08d2-4ab8-b7b1-d5f0152f844c","Type":"ContainerDied","Data":"3d42d59a00e022042efdaccfa39cbb5b5829d613ecad5fe0a65ce4fadb8c2b65"} Dec 11 08:42:29 crc kubenswrapper[4881]: I1211 08:42:29.940888 4881 scope.go:117] "RemoveContainer" containerID="0fc4f0176e6edc37e8de2b9bcea2037788771d7b02c16417cc0b34e73bb9a3da" Dec 11 08:42:29 crc kubenswrapper[4881]: I1211 08:42:29.944977 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"7a5a5663-d97f-47f5-b4a8-4c18ae3bd844","Type":"ContainerDied","Data":"04037f0c77061cbc307677273fbd3020e41d9574bf0f94523ca43f536a33decb"} Dec 11 08:42:29 crc kubenswrapper[4881]: I1211 08:42:29.945096 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Dec 11 08:42:29 crc kubenswrapper[4881]: I1211 08:42:29.954968 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"41b377bb-0ea7-45df-be96-b7f03c9ff994","Type":"ContainerDied","Data":"415a1f43316e7762427f0ceaf8edc187468dfefd1369de758d956ac0c00f19ff"} Dec 11 08:42:29 crc kubenswrapper[4881]: I1211 08:42:29.955075 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Dec 11 08:42:29 crc kubenswrapper[4881]: I1211 08:42:29.962465 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"338db346-be5c-4382-9404-29d345bba595","Type":"ContainerStarted","Data":"9319640f9dc0eb3ae69dbd3e5ef0eecbac06a70502481fc4c5c4b64aaa45d4a8"} Dec 11 08:42:29 crc kubenswrapper[4881]: I1211 08:42:29.970605 4881 scope.go:117] "RemoveContainer" containerID="aa2a280ed96678db5b04f886b58913f782e34a6e7df60ea6b4bf7128dc6a541e" Dec 11 08:42:30 crc kubenswrapper[4881]: I1211 08:42:30.028797 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/aodh-0" podStartSLOduration=2.619339573 podStartE2EDuration="7.028776949s" podCreationTimestamp="2025-12-11 08:42:23 +0000 UTC" firstStartedPulling="2025-12-11 08:42:24.909595949 +0000 UTC m=+1593.286964646" lastFinishedPulling="2025-12-11 08:42:29.319033325 +0000 UTC m=+1597.696402022" observedRunningTime="2025-12-11 08:42:29.988830736 +0000 UTC m=+1598.366199453" watchObservedRunningTime="2025-12-11 08:42:30.028776949 +0000 UTC m=+1598.406145646" Dec 11 08:42:30 crc kubenswrapper[4881]: I1211 08:42:30.089638 4881 scope.go:117] "RemoveContainer" containerID="67eb97c9052c95582305c04ef8237c3a7a5fcbe153b6943ea5671bf72a85fa9b" Dec 11 08:42:30 crc kubenswrapper[4881]: I1211 08:42:30.090444 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Dec 11 08:42:30 crc kubenswrapper[4881]: I1211 08:42:30.128178 4881 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Dec 11 08:42:30 crc kubenswrapper[4881]: I1211 08:42:30.156738 4881 scope.go:117] "RemoveContainer" containerID="2184c385a971e223e031a3584efedad7092a43a0579a4ad57e97c461525c2d00" Dec 11 08:42:30 crc kubenswrapper[4881]: I1211 08:42:30.173434 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Dec 11 08:42:30 crc kubenswrapper[4881]: I1211 08:42:30.186553 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Dec 11 08:42:30 crc kubenswrapper[4881]: E1211 08:42:30.187129 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ae7bac1b-08d2-4ab8-b7b1-d5f0152f844c" containerName="nova-api-api" Dec 11 08:42:30 crc kubenswrapper[4881]: I1211 08:42:30.187142 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="ae7bac1b-08d2-4ab8-b7b1-d5f0152f844c" containerName="nova-api-api" Dec 11 08:42:30 crc kubenswrapper[4881]: E1211 08:42:30.187159 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ae7bac1b-08d2-4ab8-b7b1-d5f0152f844c" containerName="nova-api-log" Dec 11 08:42:30 crc kubenswrapper[4881]: I1211 08:42:30.187165 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="ae7bac1b-08d2-4ab8-b7b1-d5f0152f844c" containerName="nova-api-log" Dec 11 08:42:30 crc kubenswrapper[4881]: E1211 08:42:30.187174 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6b6f1a51-4774-4356-a5ef-5e901d75d889" containerName="nova-manage" Dec 11 08:42:30 crc kubenswrapper[4881]: I1211 08:42:30.187180 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="6b6f1a51-4774-4356-a5ef-5e901d75d889" containerName="nova-manage" Dec 11 08:42:30 crc kubenswrapper[4881]: E1211 08:42:30.187199 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7a5a5663-d97f-47f5-b4a8-4c18ae3bd844" containerName="nova-metadata-log" Dec 11 08:42:30 crc kubenswrapper[4881]: I1211 08:42:30.187205 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="7a5a5663-d97f-47f5-b4a8-4c18ae3bd844" containerName="nova-metadata-log" Dec 11 08:42:30 crc kubenswrapper[4881]: E1211 08:42:30.187221 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7a5a5663-d97f-47f5-b4a8-4c18ae3bd844" containerName="nova-metadata-metadata" Dec 11 08:42:30 crc kubenswrapper[4881]: I1211 08:42:30.187226 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="7a5a5663-d97f-47f5-b4a8-4c18ae3bd844" containerName="nova-metadata-metadata" Dec 11 08:42:30 crc kubenswrapper[4881]: E1211 08:42:30.187246 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="41b377bb-0ea7-45df-be96-b7f03c9ff994" containerName="nova-scheduler-scheduler" Dec 11 08:42:30 crc kubenswrapper[4881]: I1211 08:42:30.187252 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="41b377bb-0ea7-45df-be96-b7f03c9ff994" containerName="nova-scheduler-scheduler" Dec 11 08:42:30 crc kubenswrapper[4881]: I1211 08:42:30.187471 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="7a5a5663-d97f-47f5-b4a8-4c18ae3bd844" containerName="nova-metadata-metadata" Dec 11 08:42:30 crc kubenswrapper[4881]: I1211 08:42:30.187494 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="41b377bb-0ea7-45df-be96-b7f03c9ff994" containerName="nova-scheduler-scheduler" Dec 11 08:42:30 crc kubenswrapper[4881]: I1211 08:42:30.187510 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="ae7bac1b-08d2-4ab8-b7b1-d5f0152f844c" containerName="nova-api-log" Dec 11 08:42:30 crc kubenswrapper[4881]: I1211 08:42:30.187523 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="ae7bac1b-08d2-4ab8-b7b1-d5f0152f844c" containerName="nova-api-api" Dec 11 08:42:30 crc kubenswrapper[4881]: I1211 08:42:30.187534 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="6b6f1a51-4774-4356-a5ef-5e901d75d889" containerName="nova-manage" Dec 11 08:42:30 crc kubenswrapper[4881]: I1211 08:42:30.187544 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="7a5a5663-d97f-47f5-b4a8-4c18ae3bd844" containerName="nova-metadata-log" Dec 11 08:42:30 crc kubenswrapper[4881]: I1211 08:42:30.188808 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Dec 11 08:42:30 crc kubenswrapper[4881]: I1211 08:42:30.194032 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-internal-svc" Dec 11 08:42:30 crc kubenswrapper[4881]: I1211 08:42:30.194480 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-public-svc" Dec 11 08:42:30 crc kubenswrapper[4881]: I1211 08:42:30.195753 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Dec 11 08:42:30 crc kubenswrapper[4881]: I1211 08:42:30.226120 4881 scope.go:117] "RemoveContainer" containerID="e6dabacf3bbbc2a7fb8d5aaae341f17129a2707c1d2c810e62a19bc09c0885ef" Dec 11 08:42:30 crc kubenswrapper[4881]: I1211 08:42:30.228600 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/a461b235-8929-4c28-a4bc-fcc40fe9ede9-internal-tls-certs\") pod \"nova-api-0\" (UID: \"a461b235-8929-4c28-a4bc-fcc40fe9ede9\") " pod="openstack/nova-api-0" Dec 11 08:42:30 crc kubenswrapper[4881]: I1211 08:42:30.228729 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a461b235-8929-4c28-a4bc-fcc40fe9ede9-config-data\") pod \"nova-api-0\" (UID: \"a461b235-8929-4c28-a4bc-fcc40fe9ede9\") " pod="openstack/nova-api-0" Dec 11 08:42:30 crc kubenswrapper[4881]: I1211 08:42:30.228838 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/a461b235-8929-4c28-a4bc-fcc40fe9ede9-public-tls-certs\") pod \"nova-api-0\" (UID: \"a461b235-8929-4c28-a4bc-fcc40fe9ede9\") " pod="openstack/nova-api-0" Dec 11 08:42:30 crc kubenswrapper[4881]: I1211 08:42:30.228898 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4ttcw\" (UniqueName: \"kubernetes.io/projected/a461b235-8929-4c28-a4bc-fcc40fe9ede9-kube-api-access-4ttcw\") pod \"nova-api-0\" (UID: \"a461b235-8929-4c28-a4bc-fcc40fe9ede9\") " pod="openstack/nova-api-0" Dec 11 08:42:30 crc kubenswrapper[4881]: I1211 08:42:30.228995 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a461b235-8929-4c28-a4bc-fcc40fe9ede9-logs\") pod \"nova-api-0\" (UID: \"a461b235-8929-4c28-a4bc-fcc40fe9ede9\") " pod="openstack/nova-api-0" Dec 11 08:42:30 crc kubenswrapper[4881]: I1211 08:42:30.229054 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a461b235-8929-4c28-a4bc-fcc40fe9ede9-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"a461b235-8929-4c28-a4bc-fcc40fe9ede9\") " pod="openstack/nova-api-0" Dec 11 08:42:30 crc kubenswrapper[4881]: I1211 08:42:30.231517 4881 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Dec 11 08:42:30 crc kubenswrapper[4881]: I1211 08:42:30.291849 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Dec 11 08:42:30 crc kubenswrapper[4881]: I1211 08:42:30.343625 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4ttcw\" (UniqueName: \"kubernetes.io/projected/a461b235-8929-4c28-a4bc-fcc40fe9ede9-kube-api-access-4ttcw\") pod \"nova-api-0\" (UID: \"a461b235-8929-4c28-a4bc-fcc40fe9ede9\") " pod="openstack/nova-api-0" Dec 11 08:42:30 crc kubenswrapper[4881]: I1211 08:42:30.345909 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a461b235-8929-4c28-a4bc-fcc40fe9ede9-logs\") pod \"nova-api-0\" (UID: \"a461b235-8929-4c28-a4bc-fcc40fe9ede9\") " pod="openstack/nova-api-0" Dec 11 08:42:30 crc kubenswrapper[4881]: I1211 08:42:30.346079 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a461b235-8929-4c28-a4bc-fcc40fe9ede9-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"a461b235-8929-4c28-a4bc-fcc40fe9ede9\") " pod="openstack/nova-api-0" Dec 11 08:42:30 crc kubenswrapper[4881]: I1211 08:42:30.346253 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/a461b235-8929-4c28-a4bc-fcc40fe9ede9-internal-tls-certs\") pod \"nova-api-0\" (UID: \"a461b235-8929-4c28-a4bc-fcc40fe9ede9\") " pod="openstack/nova-api-0" Dec 11 08:42:30 crc kubenswrapper[4881]: I1211 08:42:30.346459 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a461b235-8929-4c28-a4bc-fcc40fe9ede9-config-data\") pod \"nova-api-0\" (UID: \"a461b235-8929-4c28-a4bc-fcc40fe9ede9\") " pod="openstack/nova-api-0" Dec 11 08:42:30 crc kubenswrapper[4881]: I1211 08:42:30.346745 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/a461b235-8929-4c28-a4bc-fcc40fe9ede9-public-tls-certs\") pod \"nova-api-0\" (UID: \"a461b235-8929-4c28-a4bc-fcc40fe9ede9\") " pod="openstack/nova-api-0" Dec 11 08:42:30 crc kubenswrapper[4881]: I1211 08:42:30.348577 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a461b235-8929-4c28-a4bc-fcc40fe9ede9-logs\") pod \"nova-api-0\" (UID: \"a461b235-8929-4c28-a4bc-fcc40fe9ede9\") " pod="openstack/nova-api-0" Dec 11 08:42:30 crc kubenswrapper[4881]: I1211 08:42:30.354006 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/a461b235-8929-4c28-a4bc-fcc40fe9ede9-internal-tls-certs\") pod \"nova-api-0\" (UID: \"a461b235-8929-4c28-a4bc-fcc40fe9ede9\") " pod="openstack/nova-api-0" Dec 11 08:42:30 crc kubenswrapper[4881]: I1211 08:42:30.368810 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a461b235-8929-4c28-a4bc-fcc40fe9ede9-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"a461b235-8929-4c28-a4bc-fcc40fe9ede9\") " pod="openstack/nova-api-0" Dec 11 08:42:30 crc kubenswrapper[4881]: I1211 08:42:30.369025 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/a461b235-8929-4c28-a4bc-fcc40fe9ede9-public-tls-certs\") pod \"nova-api-0\" (UID: \"a461b235-8929-4c28-a4bc-fcc40fe9ede9\") " pod="openstack/nova-api-0" Dec 11 08:42:30 crc kubenswrapper[4881]: I1211 08:42:30.380746 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4ttcw\" (UniqueName: \"kubernetes.io/projected/a461b235-8929-4c28-a4bc-fcc40fe9ede9-kube-api-access-4ttcw\") pod \"nova-api-0\" (UID: \"a461b235-8929-4c28-a4bc-fcc40fe9ede9\") " pod="openstack/nova-api-0" Dec 11 08:42:30 crc kubenswrapper[4881]: I1211 08:42:30.389465 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Dec 11 08:42:30 crc kubenswrapper[4881]: I1211 08:42:30.394580 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Dec 11 08:42:30 crc kubenswrapper[4881]: I1211 08:42:30.402538 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Dec 11 08:42:30 crc kubenswrapper[4881]: I1211 08:42:30.402662 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a461b235-8929-4c28-a4bc-fcc40fe9ede9-config-data\") pod \"nova-api-0\" (UID: \"a461b235-8929-4c28-a4bc-fcc40fe9ede9\") " pod="openstack/nova-api-0" Dec 11 08:42:30 crc kubenswrapper[4881]: I1211 08:42:30.402984 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Dec 11 08:42:30 crc kubenswrapper[4881]: I1211 08:42:30.408051 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Dec 11 08:42:30 crc kubenswrapper[4881]: I1211 08:42:30.422694 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Dec 11 08:42:30 crc kubenswrapper[4881]: I1211 08:42:30.449049 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/af5edf02-ece3-42cd-b5da-a84f734d2505-logs\") pod \"nova-metadata-0\" (UID: \"af5edf02-ece3-42cd-b5da-a84f734d2505\") " pod="openstack/nova-metadata-0" Dec 11 08:42:30 crc kubenswrapper[4881]: I1211 08:42:30.449168 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/af5edf02-ece3-42cd-b5da-a84f734d2505-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"af5edf02-ece3-42cd-b5da-a84f734d2505\") " pod="openstack/nova-metadata-0" Dec 11 08:42:30 crc kubenswrapper[4881]: I1211 08:42:30.449260 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wprqv\" (UniqueName: \"kubernetes.io/projected/af5edf02-ece3-42cd-b5da-a84f734d2505-kube-api-access-wprqv\") pod \"nova-metadata-0\" (UID: \"af5edf02-ece3-42cd-b5da-a84f734d2505\") " pod="openstack/nova-metadata-0" Dec 11 08:42:30 crc kubenswrapper[4881]: I1211 08:42:30.453255 4881 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Dec 11 08:42:30 crc kubenswrapper[4881]: I1211 08:42:30.457696 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/af5edf02-ece3-42cd-b5da-a84f734d2505-config-data\") pod \"nova-metadata-0\" (UID: \"af5edf02-ece3-42cd-b5da-a84f734d2505\") " pod="openstack/nova-metadata-0" Dec 11 08:42:30 crc kubenswrapper[4881]: I1211 08:42:30.457856 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/af5edf02-ece3-42cd-b5da-a84f734d2505-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"af5edf02-ece3-42cd-b5da-a84f734d2505\") " pod="openstack/nova-metadata-0" Dec 11 08:42:30 crc kubenswrapper[4881]: I1211 08:42:30.478820 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Dec 11 08:42:30 crc kubenswrapper[4881]: I1211 08:42:30.482985 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Dec 11 08:42:30 crc kubenswrapper[4881]: I1211 08:42:30.486789 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Dec 11 08:42:30 crc kubenswrapper[4881]: I1211 08:42:30.491587 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Dec 11 08:42:30 crc kubenswrapper[4881]: I1211 08:42:30.546014 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Dec 11 08:42:30 crc kubenswrapper[4881]: I1211 08:42:30.560035 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b12d1174-33a2-4075-8cc6-bd591d290563-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"b12d1174-33a2-4075-8cc6-bd591d290563\") " pod="openstack/nova-scheduler-0" Dec 11 08:42:30 crc kubenswrapper[4881]: I1211 08:42:30.560120 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/af5edf02-ece3-42cd-b5da-a84f734d2505-logs\") pod \"nova-metadata-0\" (UID: \"af5edf02-ece3-42cd-b5da-a84f734d2505\") " pod="openstack/nova-metadata-0" Dec 11 08:42:30 crc kubenswrapper[4881]: I1211 08:42:30.560163 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rcr4k\" (UniqueName: \"kubernetes.io/projected/b12d1174-33a2-4075-8cc6-bd591d290563-kube-api-access-rcr4k\") pod \"nova-scheduler-0\" (UID: \"b12d1174-33a2-4075-8cc6-bd591d290563\") " pod="openstack/nova-scheduler-0" Dec 11 08:42:30 crc kubenswrapper[4881]: I1211 08:42:30.560215 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/af5edf02-ece3-42cd-b5da-a84f734d2505-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"af5edf02-ece3-42cd-b5da-a84f734d2505\") " pod="openstack/nova-metadata-0" Dec 11 08:42:30 crc kubenswrapper[4881]: I1211 08:42:30.560302 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wprqv\" (UniqueName: \"kubernetes.io/projected/af5edf02-ece3-42cd-b5da-a84f734d2505-kube-api-access-wprqv\") pod \"nova-metadata-0\" (UID: \"af5edf02-ece3-42cd-b5da-a84f734d2505\") " pod="openstack/nova-metadata-0" Dec 11 08:42:30 crc kubenswrapper[4881]: I1211 08:42:30.560409 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/af5edf02-ece3-42cd-b5da-a84f734d2505-config-data\") pod \"nova-metadata-0\" (UID: \"af5edf02-ece3-42cd-b5da-a84f734d2505\") " pod="openstack/nova-metadata-0" Dec 11 08:42:30 crc kubenswrapper[4881]: I1211 08:42:30.560478 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b12d1174-33a2-4075-8cc6-bd591d290563-config-data\") pod \"nova-scheduler-0\" (UID: \"b12d1174-33a2-4075-8cc6-bd591d290563\") " pod="openstack/nova-scheduler-0" Dec 11 08:42:30 crc kubenswrapper[4881]: I1211 08:42:30.560510 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/af5edf02-ece3-42cd-b5da-a84f734d2505-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"af5edf02-ece3-42cd-b5da-a84f734d2505\") " pod="openstack/nova-metadata-0" Dec 11 08:42:30 crc kubenswrapper[4881]: I1211 08:42:30.561842 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/af5edf02-ece3-42cd-b5da-a84f734d2505-logs\") pod \"nova-metadata-0\" (UID: \"af5edf02-ece3-42cd-b5da-a84f734d2505\") " pod="openstack/nova-metadata-0" Dec 11 08:42:30 crc kubenswrapper[4881]: I1211 08:42:30.564665 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/af5edf02-ece3-42cd-b5da-a84f734d2505-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"af5edf02-ece3-42cd-b5da-a84f734d2505\") " pod="openstack/nova-metadata-0" Dec 11 08:42:30 crc kubenswrapper[4881]: I1211 08:42:30.564812 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/af5edf02-ece3-42cd-b5da-a84f734d2505-config-data\") pod \"nova-metadata-0\" (UID: \"af5edf02-ece3-42cd-b5da-a84f734d2505\") " pod="openstack/nova-metadata-0" Dec 11 08:42:30 crc kubenswrapper[4881]: I1211 08:42:30.566599 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/af5edf02-ece3-42cd-b5da-a84f734d2505-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"af5edf02-ece3-42cd-b5da-a84f734d2505\") " pod="openstack/nova-metadata-0" Dec 11 08:42:30 crc kubenswrapper[4881]: I1211 08:42:30.577844 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wprqv\" (UniqueName: \"kubernetes.io/projected/af5edf02-ece3-42cd-b5da-a84f734d2505-kube-api-access-wprqv\") pod \"nova-metadata-0\" (UID: \"af5edf02-ece3-42cd-b5da-a84f734d2505\") " pod="openstack/nova-metadata-0" Dec 11 08:42:30 crc kubenswrapper[4881]: I1211 08:42:30.669451 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b12d1174-33a2-4075-8cc6-bd591d290563-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"b12d1174-33a2-4075-8cc6-bd591d290563\") " pod="openstack/nova-scheduler-0" Dec 11 08:42:30 crc kubenswrapper[4881]: I1211 08:42:30.669780 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rcr4k\" (UniqueName: \"kubernetes.io/projected/b12d1174-33a2-4075-8cc6-bd591d290563-kube-api-access-rcr4k\") pod \"nova-scheduler-0\" (UID: \"b12d1174-33a2-4075-8cc6-bd591d290563\") " pod="openstack/nova-scheduler-0" Dec 11 08:42:30 crc kubenswrapper[4881]: I1211 08:42:30.670360 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b12d1174-33a2-4075-8cc6-bd591d290563-config-data\") pod \"nova-scheduler-0\" (UID: \"b12d1174-33a2-4075-8cc6-bd591d290563\") " pod="openstack/nova-scheduler-0" Dec 11 08:42:30 crc kubenswrapper[4881]: I1211 08:42:30.676675 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b12d1174-33a2-4075-8cc6-bd591d290563-config-data\") pod \"nova-scheduler-0\" (UID: \"b12d1174-33a2-4075-8cc6-bd591d290563\") " pod="openstack/nova-scheduler-0" Dec 11 08:42:30 crc kubenswrapper[4881]: I1211 08:42:30.676834 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b12d1174-33a2-4075-8cc6-bd591d290563-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"b12d1174-33a2-4075-8cc6-bd591d290563\") " pod="openstack/nova-scheduler-0" Dec 11 08:42:30 crc kubenswrapper[4881]: I1211 08:42:30.688465 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rcr4k\" (UniqueName: \"kubernetes.io/projected/b12d1174-33a2-4075-8cc6-bd591d290563-kube-api-access-rcr4k\") pod \"nova-scheduler-0\" (UID: \"b12d1174-33a2-4075-8cc6-bd591d290563\") " pod="openstack/nova-scheduler-0" Dec 11 08:42:30 crc kubenswrapper[4881]: I1211 08:42:30.798079 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Dec 11 08:42:30 crc kubenswrapper[4881]: I1211 08:42:30.811617 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Dec 11 08:42:31 crc kubenswrapper[4881]: I1211 08:42:31.024894 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="41b377bb-0ea7-45df-be96-b7f03c9ff994" path="/var/lib/kubelet/pods/41b377bb-0ea7-45df-be96-b7f03c9ff994/volumes" Dec 11 08:42:31 crc kubenswrapper[4881]: I1211 08:42:31.025941 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7a5a5663-d97f-47f5-b4a8-4c18ae3bd844" path="/var/lib/kubelet/pods/7a5a5663-d97f-47f5-b4a8-4c18ae3bd844/volumes" Dec 11 08:42:31 crc kubenswrapper[4881]: I1211 08:42:31.026724 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ae7bac1b-08d2-4ab8-b7b1-d5f0152f844c" path="/var/lib/kubelet/pods/ae7bac1b-08d2-4ab8-b7b1-d5f0152f844c/volumes" Dec 11 08:42:31 crc kubenswrapper[4881]: I1211 08:42:31.067229 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Dec 11 08:42:31 crc kubenswrapper[4881]: I1211 08:42:31.364210 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Dec 11 08:42:31 crc kubenswrapper[4881]: I1211 08:42:31.461495 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Dec 11 08:42:32 crc kubenswrapper[4881]: I1211 08:42:32.019020 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"b12d1174-33a2-4075-8cc6-bd591d290563","Type":"ContainerStarted","Data":"675e7085dd1404c25cf037edd0a34d4dae69e70bcd3210c53e4444885347edae"} Dec 11 08:42:32 crc kubenswrapper[4881]: I1211 08:42:32.019422 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"b12d1174-33a2-4075-8cc6-bd591d290563","Type":"ContainerStarted","Data":"d8ec2ae6f8448e78237b468f3a2eee9306cdab2e16ad1f865e246cf9c25b9cba"} Dec 11 08:42:32 crc kubenswrapper[4881]: I1211 08:42:32.024605 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"af5edf02-ece3-42cd-b5da-a84f734d2505","Type":"ContainerStarted","Data":"164611e9169d4cbbc2e93b65807148e9cadb60a8fb7efe8e696298f7745af56f"} Dec 11 08:42:32 crc kubenswrapper[4881]: I1211 08:42:32.024707 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"af5edf02-ece3-42cd-b5da-a84f734d2505","Type":"ContainerStarted","Data":"f890ae957dc622a1789a8fd33f95d23d43c415adf9d534ed5b0d3346cb6b1a18"} Dec 11 08:42:32 crc kubenswrapper[4881]: I1211 08:42:32.024724 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"af5edf02-ece3-42cd-b5da-a84f734d2505","Type":"ContainerStarted","Data":"79227b3d8ba2bf9e644f91f3aac93b515ca299cc9ec90db6b680ef542306f76a"} Dec 11 08:42:32 crc kubenswrapper[4881]: I1211 08:42:32.028070 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"a461b235-8929-4c28-a4bc-fcc40fe9ede9","Type":"ContainerStarted","Data":"e19980442e6aab3839d21521ed08544322544f78abb573ce6f1b8a7d022ebbfc"} Dec 11 08:42:32 crc kubenswrapper[4881]: I1211 08:42:32.028144 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"a461b235-8929-4c28-a4bc-fcc40fe9ede9","Type":"ContainerStarted","Data":"06defc81945ec2066fdbc8bb153d04f6549a494e289aa477e316379cff9a420b"} Dec 11 08:42:32 crc kubenswrapper[4881]: I1211 08:42:32.028163 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"a461b235-8929-4c28-a4bc-fcc40fe9ede9","Type":"ContainerStarted","Data":"2c5f98e4a9a2bf26a1c000c64c75d045c4f57c0808c31eb28288c02a7e63f7da"} Dec 11 08:42:32 crc kubenswrapper[4881]: I1211 08:42:32.044283 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.044259494 podStartE2EDuration="2.044259494s" podCreationTimestamp="2025-12-11 08:42:30 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 08:42:32.038636556 +0000 UTC m=+1600.416005263" watchObservedRunningTime="2025-12-11 08:42:32.044259494 +0000 UTC m=+1600.421628191" Dec 11 08:42:32 crc kubenswrapper[4881]: I1211 08:42:32.127800 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.127780509 podStartE2EDuration="2.127780509s" podCreationTimestamp="2025-12-11 08:42:30 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 08:42:32.092757992 +0000 UTC m=+1600.470126689" watchObservedRunningTime="2025-12-11 08:42:32.127780509 +0000 UTC m=+1600.505149206" Dec 11 08:42:32 crc kubenswrapper[4881]: I1211 08:42:32.129273 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.129266154 podStartE2EDuration="2.129266154s" podCreationTimestamp="2025-12-11 08:42:30 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 08:42:32.111693188 +0000 UTC m=+1600.489061895" watchObservedRunningTime="2025-12-11 08:42:32.129266154 +0000 UTC m=+1600.506634851" Dec 11 08:42:33 crc kubenswrapper[4881]: I1211 08:42:33.705977 4881 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-metadata-0" podUID="7a5a5663-d97f-47f5-b4a8-4c18ae3bd844" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.245:8775/\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Dec 11 08:42:33 crc kubenswrapper[4881]: I1211 08:42:33.706084 4881 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-metadata-0" podUID="7a5a5663-d97f-47f5-b4a8-4c18ae3bd844" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.245:8775/\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Dec 11 08:42:35 crc kubenswrapper[4881]: I1211 08:42:35.799199 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Dec 11 08:42:35 crc kubenswrapper[4881]: I1211 08:42:35.799490 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Dec 11 08:42:35 crc kubenswrapper[4881]: I1211 08:42:35.812060 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Dec 11 08:42:40 crc kubenswrapper[4881]: I1211 08:42:40.546573 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Dec 11 08:42:40 crc kubenswrapper[4881]: I1211 08:42:40.547203 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Dec 11 08:42:40 crc kubenswrapper[4881]: I1211 08:42:40.798530 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Dec 11 08:42:40 crc kubenswrapper[4881]: I1211 08:42:40.799003 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Dec 11 08:42:40 crc kubenswrapper[4881]: I1211 08:42:40.812564 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Dec 11 08:42:40 crc kubenswrapper[4881]: I1211 08:42:40.845209 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Dec 11 08:42:41 crc kubenswrapper[4881]: I1211 08:42:41.204933 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Dec 11 08:42:41 crc kubenswrapper[4881]: I1211 08:42:41.567107 4881 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="a461b235-8929-4c28-a4bc-fcc40fe9ede9" containerName="nova-api-api" probeResult="failure" output="Get \"https://10.217.1.1:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Dec 11 08:42:41 crc kubenswrapper[4881]: I1211 08:42:41.567177 4881 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="a461b235-8929-4c28-a4bc-fcc40fe9ede9" containerName="nova-api-log" probeResult="failure" output="Get \"https://10.217.1.1:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Dec 11 08:42:41 crc kubenswrapper[4881]: I1211 08:42:41.582997 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Dec 11 08:42:41 crc kubenswrapper[4881]: I1211 08:42:41.840644 4881 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="af5edf02-ece3-42cd-b5da-a84f734d2505" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.1.2:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Dec 11 08:42:41 crc kubenswrapper[4881]: I1211 08:42:41.840693 4881 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="af5edf02-ece3-42cd-b5da-a84f734d2505" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.1.2:8775/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Dec 11 08:42:50 crc kubenswrapper[4881]: I1211 08:42:50.558790 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Dec 11 08:42:50 crc kubenswrapper[4881]: I1211 08:42:50.559576 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Dec 11 08:42:50 crc kubenswrapper[4881]: I1211 08:42:50.561974 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Dec 11 08:42:50 crc kubenswrapper[4881]: I1211 08:42:50.564832 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Dec 11 08:42:50 crc kubenswrapper[4881]: I1211 08:42:50.809922 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Dec 11 08:42:50 crc kubenswrapper[4881]: I1211 08:42:50.815328 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Dec 11 08:42:50 crc kubenswrapper[4881]: I1211 08:42:50.815493 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Dec 11 08:42:51 crc kubenswrapper[4881]: I1211 08:42:51.299176 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Dec 11 08:42:51 crc kubenswrapper[4881]: I1211 08:42:51.390515 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Dec 11 08:42:51 crc kubenswrapper[4881]: I1211 08:42:51.396742 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Dec 11 08:42:59 crc kubenswrapper[4881]: I1211 08:42:59.396636 4881 patch_prober.go:28] interesting pod/machine-config-daemon-z9nnh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 11 08:42:59 crc kubenswrapper[4881]: I1211 08:42:59.397215 4881 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 11 08:43:01 crc kubenswrapper[4881]: I1211 08:43:01.881411 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-db-sync-jvkz7"] Dec 11 08:43:01 crc kubenswrapper[4881]: I1211 08:43:01.892646 4881 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-db-sync-jvkz7"] Dec 11 08:43:01 crc kubenswrapper[4881]: I1211 08:43:01.999278 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-db-sync-mx8mp"] Dec 11 08:43:02 crc kubenswrapper[4881]: I1211 08:43:02.001314 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-sync-mx8mp" Dec 11 08:43:02 crc kubenswrapper[4881]: I1211 08:43:02.022950 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-db-sync-mx8mp"] Dec 11 08:43:02 crc kubenswrapper[4881]: I1211 08:43:02.124460 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f6950faa-9468-4d23-9834-73fb64506367-config-data\") pod \"heat-db-sync-mx8mp\" (UID: \"f6950faa-9468-4d23-9834-73fb64506367\") " pod="openstack/heat-db-sync-mx8mp" Dec 11 08:43:02 crc kubenswrapper[4881]: I1211 08:43:02.124951 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f6950faa-9468-4d23-9834-73fb64506367-combined-ca-bundle\") pod \"heat-db-sync-mx8mp\" (UID: \"f6950faa-9468-4d23-9834-73fb64506367\") " pod="openstack/heat-db-sync-mx8mp" Dec 11 08:43:02 crc kubenswrapper[4881]: I1211 08:43:02.125123 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n7h4g\" (UniqueName: \"kubernetes.io/projected/f6950faa-9468-4d23-9834-73fb64506367-kube-api-access-n7h4g\") pod \"heat-db-sync-mx8mp\" (UID: \"f6950faa-9468-4d23-9834-73fb64506367\") " pod="openstack/heat-db-sync-mx8mp" Dec 11 08:43:02 crc kubenswrapper[4881]: I1211 08:43:02.227115 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f6950faa-9468-4d23-9834-73fb64506367-config-data\") pod \"heat-db-sync-mx8mp\" (UID: \"f6950faa-9468-4d23-9834-73fb64506367\") " pod="openstack/heat-db-sync-mx8mp" Dec 11 08:43:02 crc kubenswrapper[4881]: I1211 08:43:02.227310 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f6950faa-9468-4d23-9834-73fb64506367-combined-ca-bundle\") pod \"heat-db-sync-mx8mp\" (UID: \"f6950faa-9468-4d23-9834-73fb64506367\") " pod="openstack/heat-db-sync-mx8mp" Dec 11 08:43:02 crc kubenswrapper[4881]: I1211 08:43:02.227409 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n7h4g\" (UniqueName: \"kubernetes.io/projected/f6950faa-9468-4d23-9834-73fb64506367-kube-api-access-n7h4g\") pod \"heat-db-sync-mx8mp\" (UID: \"f6950faa-9468-4d23-9834-73fb64506367\") " pod="openstack/heat-db-sync-mx8mp" Dec 11 08:43:02 crc kubenswrapper[4881]: I1211 08:43:02.234961 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f6950faa-9468-4d23-9834-73fb64506367-combined-ca-bundle\") pod \"heat-db-sync-mx8mp\" (UID: \"f6950faa-9468-4d23-9834-73fb64506367\") " pod="openstack/heat-db-sync-mx8mp" Dec 11 08:43:02 crc kubenswrapper[4881]: I1211 08:43:02.236131 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f6950faa-9468-4d23-9834-73fb64506367-config-data\") pod \"heat-db-sync-mx8mp\" (UID: \"f6950faa-9468-4d23-9834-73fb64506367\") " pod="openstack/heat-db-sync-mx8mp" Dec 11 08:43:02 crc kubenswrapper[4881]: I1211 08:43:02.247850 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n7h4g\" (UniqueName: \"kubernetes.io/projected/f6950faa-9468-4d23-9834-73fb64506367-kube-api-access-n7h4g\") pod \"heat-db-sync-mx8mp\" (UID: \"f6950faa-9468-4d23-9834-73fb64506367\") " pod="openstack/heat-db-sync-mx8mp" Dec 11 08:43:02 crc kubenswrapper[4881]: I1211 08:43:02.319907 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-sync-mx8mp" Dec 11 08:43:02 crc kubenswrapper[4881]: I1211 08:43:02.903684 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-db-sync-mx8mp"] Dec 11 08:43:03 crc kubenswrapper[4881]: I1211 08:43:03.022535 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2dde239c-3502-4b29-8f5d-1893f53819bd" path="/var/lib/kubelet/pods/2dde239c-3502-4b29-8f5d-1893f53819bd/volumes" Dec 11 08:43:03 crc kubenswrapper[4881]: I1211 08:43:03.451677 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-sync-mx8mp" event={"ID":"f6950faa-9468-4d23-9834-73fb64506367","Type":"ContainerStarted","Data":"b1e7a68be8157b59216e078baa842164bb8a25ee7e7e1e876cbef5c690fdbe84"} Dec 11 08:43:04 crc kubenswrapper[4881]: I1211 08:43:04.619008 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Dec 11 08:43:04 crc kubenswrapper[4881]: I1211 08:43:04.619713 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="1cb637c3-35ca-4a93-bfb6-a7e67fec14fa" containerName="ceilometer-central-agent" containerID="cri-o://001dbd7763875d0d496eaf1cf80c51cc1b2a40cbbf021e3ac5e4911ef4236184" gracePeriod=30 Dec 11 08:43:04 crc kubenswrapper[4881]: I1211 08:43:04.619783 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="1cb637c3-35ca-4a93-bfb6-a7e67fec14fa" containerName="ceilometer-notification-agent" containerID="cri-o://280c8d37b0a6337b557a41baaf36c2400eea6b55ff1dfde838e13454aea06c11" gracePeriod=30 Dec 11 08:43:04 crc kubenswrapper[4881]: I1211 08:43:04.619773 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="1cb637c3-35ca-4a93-bfb6-a7e67fec14fa" containerName="proxy-httpd" containerID="cri-o://c15179834beeb123ed7633c6556d2807e15027df6475b0b46ed81d83d81736f0" gracePeriod=30 Dec 11 08:43:04 crc kubenswrapper[4881]: I1211 08:43:04.619796 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="1cb637c3-35ca-4a93-bfb6-a7e67fec14fa" containerName="sg-core" containerID="cri-o://f00a23608c51c731dc3ffaac100788cecea1036c8f366c3cb6d2a83ff48fe5c6" gracePeriod=30 Dec 11 08:43:04 crc kubenswrapper[4881]: I1211 08:43:04.936518 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-0"] Dec 11 08:43:05 crc kubenswrapper[4881]: I1211 08:43:05.490520 4881 generic.go:334] "Generic (PLEG): container finished" podID="1cb637c3-35ca-4a93-bfb6-a7e67fec14fa" containerID="c15179834beeb123ed7633c6556d2807e15027df6475b0b46ed81d83d81736f0" exitCode=0 Dec 11 08:43:05 crc kubenswrapper[4881]: I1211 08:43:05.490826 4881 generic.go:334] "Generic (PLEG): container finished" podID="1cb637c3-35ca-4a93-bfb6-a7e67fec14fa" containerID="f00a23608c51c731dc3ffaac100788cecea1036c8f366c3cb6d2a83ff48fe5c6" exitCode=2 Dec 11 08:43:05 crc kubenswrapper[4881]: I1211 08:43:05.490919 4881 generic.go:334] "Generic (PLEG): container finished" podID="1cb637c3-35ca-4a93-bfb6-a7e67fec14fa" containerID="001dbd7763875d0d496eaf1cf80c51cc1b2a40cbbf021e3ac5e4911ef4236184" exitCode=0 Dec 11 08:43:05 crc kubenswrapper[4881]: I1211 08:43:05.490713 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1cb637c3-35ca-4a93-bfb6-a7e67fec14fa","Type":"ContainerDied","Data":"c15179834beeb123ed7633c6556d2807e15027df6475b0b46ed81d83d81736f0"} Dec 11 08:43:05 crc kubenswrapper[4881]: I1211 08:43:05.491084 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1cb637c3-35ca-4a93-bfb6-a7e67fec14fa","Type":"ContainerDied","Data":"f00a23608c51c731dc3ffaac100788cecea1036c8f366c3cb6d2a83ff48fe5c6"} Dec 11 08:43:05 crc kubenswrapper[4881]: I1211 08:43:05.491172 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1cb637c3-35ca-4a93-bfb6-a7e67fec14fa","Type":"ContainerDied","Data":"001dbd7763875d0d496eaf1cf80c51cc1b2a40cbbf021e3ac5e4911ef4236184"} Dec 11 08:43:05 crc kubenswrapper[4881]: I1211 08:43:05.961936 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Dec 11 08:43:09 crc kubenswrapper[4881]: I1211 08:43:09.555827 4881 generic.go:334] "Generic (PLEG): container finished" podID="1cb637c3-35ca-4a93-bfb6-a7e67fec14fa" containerID="280c8d37b0a6337b557a41baaf36c2400eea6b55ff1dfde838e13454aea06c11" exitCode=0 Dec 11 08:43:09 crc kubenswrapper[4881]: I1211 08:43:09.556115 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1cb637c3-35ca-4a93-bfb6-a7e67fec14fa","Type":"ContainerDied","Data":"280c8d37b0a6337b557a41baaf36c2400eea6b55ff1dfde838e13454aea06c11"} Dec 11 08:43:09 crc kubenswrapper[4881]: I1211 08:43:09.789170 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 11 08:43:09 crc kubenswrapper[4881]: I1211 08:43:09.956355 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1cb637c3-35ca-4a93-bfb6-a7e67fec14fa-combined-ca-bundle\") pod \"1cb637c3-35ca-4a93-bfb6-a7e67fec14fa\" (UID: \"1cb637c3-35ca-4a93-bfb6-a7e67fec14fa\") " Dec 11 08:43:09 crc kubenswrapper[4881]: I1211 08:43:09.956434 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/1cb637c3-35ca-4a93-bfb6-a7e67fec14fa-ceilometer-tls-certs\") pod \"1cb637c3-35ca-4a93-bfb6-a7e67fec14fa\" (UID: \"1cb637c3-35ca-4a93-bfb6-a7e67fec14fa\") " Dec 11 08:43:09 crc kubenswrapper[4881]: I1211 08:43:09.956608 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7cwbb\" (UniqueName: \"kubernetes.io/projected/1cb637c3-35ca-4a93-bfb6-a7e67fec14fa-kube-api-access-7cwbb\") pod \"1cb637c3-35ca-4a93-bfb6-a7e67fec14fa\" (UID: \"1cb637c3-35ca-4a93-bfb6-a7e67fec14fa\") " Dec 11 08:43:09 crc kubenswrapper[4881]: I1211 08:43:09.956663 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1cb637c3-35ca-4a93-bfb6-a7e67fec14fa-run-httpd\") pod \"1cb637c3-35ca-4a93-bfb6-a7e67fec14fa\" (UID: \"1cb637c3-35ca-4a93-bfb6-a7e67fec14fa\") " Dec 11 08:43:09 crc kubenswrapper[4881]: I1211 08:43:09.956852 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1cb637c3-35ca-4a93-bfb6-a7e67fec14fa-scripts\") pod \"1cb637c3-35ca-4a93-bfb6-a7e67fec14fa\" (UID: \"1cb637c3-35ca-4a93-bfb6-a7e67fec14fa\") " Dec 11 08:43:09 crc kubenswrapper[4881]: I1211 08:43:09.956953 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1cb637c3-35ca-4a93-bfb6-a7e67fec14fa-log-httpd\") pod \"1cb637c3-35ca-4a93-bfb6-a7e67fec14fa\" (UID: \"1cb637c3-35ca-4a93-bfb6-a7e67fec14fa\") " Dec 11 08:43:09 crc kubenswrapper[4881]: I1211 08:43:09.956984 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1cb637c3-35ca-4a93-bfb6-a7e67fec14fa-config-data\") pod \"1cb637c3-35ca-4a93-bfb6-a7e67fec14fa\" (UID: \"1cb637c3-35ca-4a93-bfb6-a7e67fec14fa\") " Dec 11 08:43:09 crc kubenswrapper[4881]: I1211 08:43:09.957034 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/1cb637c3-35ca-4a93-bfb6-a7e67fec14fa-sg-core-conf-yaml\") pod \"1cb637c3-35ca-4a93-bfb6-a7e67fec14fa\" (UID: \"1cb637c3-35ca-4a93-bfb6-a7e67fec14fa\") " Dec 11 08:43:09 crc kubenswrapper[4881]: I1211 08:43:09.959026 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1cb637c3-35ca-4a93-bfb6-a7e67fec14fa-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "1cb637c3-35ca-4a93-bfb6-a7e67fec14fa" (UID: "1cb637c3-35ca-4a93-bfb6-a7e67fec14fa"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 08:43:09 crc kubenswrapper[4881]: I1211 08:43:09.960879 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1cb637c3-35ca-4a93-bfb6-a7e67fec14fa-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "1cb637c3-35ca-4a93-bfb6-a7e67fec14fa" (UID: "1cb637c3-35ca-4a93-bfb6-a7e67fec14fa"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 08:43:09 crc kubenswrapper[4881]: I1211 08:43:09.968562 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1cb637c3-35ca-4a93-bfb6-a7e67fec14fa-kube-api-access-7cwbb" (OuterVolumeSpecName: "kube-api-access-7cwbb") pod "1cb637c3-35ca-4a93-bfb6-a7e67fec14fa" (UID: "1cb637c3-35ca-4a93-bfb6-a7e67fec14fa"). InnerVolumeSpecName "kube-api-access-7cwbb". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:43:09 crc kubenswrapper[4881]: I1211 08:43:09.989945 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1cb637c3-35ca-4a93-bfb6-a7e67fec14fa-scripts" (OuterVolumeSpecName: "scripts") pod "1cb637c3-35ca-4a93-bfb6-a7e67fec14fa" (UID: "1cb637c3-35ca-4a93-bfb6-a7e67fec14fa"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:43:10 crc kubenswrapper[4881]: I1211 08:43:10.012252 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1cb637c3-35ca-4a93-bfb6-a7e67fec14fa-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "1cb637c3-35ca-4a93-bfb6-a7e67fec14fa" (UID: "1cb637c3-35ca-4a93-bfb6-a7e67fec14fa"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:43:10 crc kubenswrapper[4881]: I1211 08:43:10.062218 4881 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1cb637c3-35ca-4a93-bfb6-a7e67fec14fa-scripts\") on node \"crc\" DevicePath \"\"" Dec 11 08:43:10 crc kubenswrapper[4881]: I1211 08:43:10.062258 4881 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1cb637c3-35ca-4a93-bfb6-a7e67fec14fa-log-httpd\") on node \"crc\" DevicePath \"\"" Dec 11 08:43:10 crc kubenswrapper[4881]: I1211 08:43:10.062270 4881 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/1cb637c3-35ca-4a93-bfb6-a7e67fec14fa-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Dec 11 08:43:10 crc kubenswrapper[4881]: I1211 08:43:10.062282 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7cwbb\" (UniqueName: \"kubernetes.io/projected/1cb637c3-35ca-4a93-bfb6-a7e67fec14fa-kube-api-access-7cwbb\") on node \"crc\" DevicePath \"\"" Dec 11 08:43:10 crc kubenswrapper[4881]: I1211 08:43:10.062292 4881 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1cb637c3-35ca-4a93-bfb6-a7e67fec14fa-run-httpd\") on node \"crc\" DevicePath \"\"" Dec 11 08:43:10 crc kubenswrapper[4881]: I1211 08:43:10.130676 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1cb637c3-35ca-4a93-bfb6-a7e67fec14fa-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "1cb637c3-35ca-4a93-bfb6-a7e67fec14fa" (UID: "1cb637c3-35ca-4a93-bfb6-a7e67fec14fa"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:43:10 crc kubenswrapper[4881]: I1211 08:43:10.172022 4881 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/1cb637c3-35ca-4a93-bfb6-a7e67fec14fa-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Dec 11 08:43:10 crc kubenswrapper[4881]: I1211 08:43:10.206566 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1cb637c3-35ca-4a93-bfb6-a7e67fec14fa-config-data" (OuterVolumeSpecName: "config-data") pod "1cb637c3-35ca-4a93-bfb6-a7e67fec14fa" (UID: "1cb637c3-35ca-4a93-bfb6-a7e67fec14fa"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:43:10 crc kubenswrapper[4881]: I1211 08:43:10.220528 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1cb637c3-35ca-4a93-bfb6-a7e67fec14fa-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "1cb637c3-35ca-4a93-bfb6-a7e67fec14fa" (UID: "1cb637c3-35ca-4a93-bfb6-a7e67fec14fa"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:43:10 crc kubenswrapper[4881]: I1211 08:43:10.275156 4881 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1cb637c3-35ca-4a93-bfb6-a7e67fec14fa-config-data\") on node \"crc\" DevicePath \"\"" Dec 11 08:43:10 crc kubenswrapper[4881]: I1211 08:43:10.275208 4881 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1cb637c3-35ca-4a93-bfb6-a7e67fec14fa-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 11 08:43:10 crc kubenswrapper[4881]: I1211 08:43:10.609852 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1cb637c3-35ca-4a93-bfb6-a7e67fec14fa","Type":"ContainerDied","Data":"f7c1a6e95bf4c200b1261e4fdfce4995f14d68d08cdd2e7dfae724b3b6eac440"} Dec 11 08:43:10 crc kubenswrapper[4881]: I1211 08:43:10.610396 4881 scope.go:117] "RemoveContainer" containerID="c15179834beeb123ed7633c6556d2807e15027df6475b0b46ed81d83d81736f0" Dec 11 08:43:10 crc kubenswrapper[4881]: I1211 08:43:10.610568 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 11 08:43:10 crc kubenswrapper[4881]: I1211 08:43:10.678050 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Dec 11 08:43:10 crc kubenswrapper[4881]: I1211 08:43:10.697486 4881 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Dec 11 08:43:10 crc kubenswrapper[4881]: I1211 08:43:10.786511 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-server-0" podUID="ed841687-cd89-4419-8726-85e086a5cc21" containerName="rabbitmq" containerID="cri-o://b76ca59b759174d4b3b68dec243d1e27ea0c579c0ee348a6d617483e4567b5ce" gracePeriod=604795 Dec 11 08:43:10 crc kubenswrapper[4881]: I1211 08:43:10.790405 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Dec 11 08:43:10 crc kubenswrapper[4881]: E1211 08:43:10.791973 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1cb637c3-35ca-4a93-bfb6-a7e67fec14fa" containerName="sg-core" Dec 11 08:43:10 crc kubenswrapper[4881]: I1211 08:43:10.792010 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="1cb637c3-35ca-4a93-bfb6-a7e67fec14fa" containerName="sg-core" Dec 11 08:43:10 crc kubenswrapper[4881]: E1211 08:43:10.792070 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1cb637c3-35ca-4a93-bfb6-a7e67fec14fa" containerName="ceilometer-central-agent" Dec 11 08:43:10 crc kubenswrapper[4881]: I1211 08:43:10.792082 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="1cb637c3-35ca-4a93-bfb6-a7e67fec14fa" containerName="ceilometer-central-agent" Dec 11 08:43:10 crc kubenswrapper[4881]: E1211 08:43:10.792113 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1cb637c3-35ca-4a93-bfb6-a7e67fec14fa" containerName="ceilometer-notification-agent" Dec 11 08:43:10 crc kubenswrapper[4881]: I1211 08:43:10.792122 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="1cb637c3-35ca-4a93-bfb6-a7e67fec14fa" containerName="ceilometer-notification-agent" Dec 11 08:43:10 crc kubenswrapper[4881]: E1211 08:43:10.792150 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1cb637c3-35ca-4a93-bfb6-a7e67fec14fa" containerName="proxy-httpd" Dec 11 08:43:10 crc kubenswrapper[4881]: I1211 08:43:10.792159 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="1cb637c3-35ca-4a93-bfb6-a7e67fec14fa" containerName="proxy-httpd" Dec 11 08:43:10 crc kubenswrapper[4881]: I1211 08:43:10.793130 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="1cb637c3-35ca-4a93-bfb6-a7e67fec14fa" containerName="sg-core" Dec 11 08:43:10 crc kubenswrapper[4881]: I1211 08:43:10.793182 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="1cb637c3-35ca-4a93-bfb6-a7e67fec14fa" containerName="ceilometer-notification-agent" Dec 11 08:43:10 crc kubenswrapper[4881]: I1211 08:43:10.793204 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="1cb637c3-35ca-4a93-bfb6-a7e67fec14fa" containerName="ceilometer-central-agent" Dec 11 08:43:10 crc kubenswrapper[4881]: I1211 08:43:10.793231 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="1cb637c3-35ca-4a93-bfb6-a7e67fec14fa" containerName="proxy-httpd" Dec 11 08:43:10 crc kubenswrapper[4881]: I1211 08:43:10.799184 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 11 08:43:10 crc kubenswrapper[4881]: I1211 08:43:10.821821 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Dec 11 08:43:10 crc kubenswrapper[4881]: I1211 08:43:10.822092 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Dec 11 08:43:10 crc kubenswrapper[4881]: I1211 08:43:10.822319 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Dec 11 08:43:10 crc kubenswrapper[4881]: I1211 08:43:10.917366 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Dec 11 08:43:10 crc kubenswrapper[4881]: I1211 08:43:10.974441 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b51fa237-35ec-47d6-b61d-c3e50dc8450f-run-httpd\") pod \"ceilometer-0\" (UID: \"b51fa237-35ec-47d6-b61d-c3e50dc8450f\") " pod="openstack/ceilometer-0" Dec 11 08:43:10 crc kubenswrapper[4881]: I1211 08:43:10.974532 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b51fa237-35ec-47d6-b61d-c3e50dc8450f-scripts\") pod \"ceilometer-0\" (UID: \"b51fa237-35ec-47d6-b61d-c3e50dc8450f\") " pod="openstack/ceilometer-0" Dec 11 08:43:10 crc kubenswrapper[4881]: I1211 08:43:10.974658 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/b51fa237-35ec-47d6-b61d-c3e50dc8450f-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"b51fa237-35ec-47d6-b61d-c3e50dc8450f\") " pod="openstack/ceilometer-0" Dec 11 08:43:10 crc kubenswrapper[4881]: I1211 08:43:10.974686 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b51fa237-35ec-47d6-b61d-c3e50dc8450f-config-data\") pod \"ceilometer-0\" (UID: \"b51fa237-35ec-47d6-b61d-c3e50dc8450f\") " pod="openstack/ceilometer-0" Dec 11 08:43:10 crc kubenswrapper[4881]: I1211 08:43:10.974704 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b51fa237-35ec-47d6-b61d-c3e50dc8450f-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"b51fa237-35ec-47d6-b61d-c3e50dc8450f\") " pod="openstack/ceilometer-0" Dec 11 08:43:10 crc kubenswrapper[4881]: I1211 08:43:10.974756 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b51fa237-35ec-47d6-b61d-c3e50dc8450f-log-httpd\") pod \"ceilometer-0\" (UID: \"b51fa237-35ec-47d6-b61d-c3e50dc8450f\") " pod="openstack/ceilometer-0" Dec 11 08:43:10 crc kubenswrapper[4881]: I1211 08:43:10.974808 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/b51fa237-35ec-47d6-b61d-c3e50dc8450f-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"b51fa237-35ec-47d6-b61d-c3e50dc8450f\") " pod="openstack/ceilometer-0" Dec 11 08:43:10 crc kubenswrapper[4881]: I1211 08:43:10.974923 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nzt2r\" (UniqueName: \"kubernetes.io/projected/b51fa237-35ec-47d6-b61d-c3e50dc8450f-kube-api-access-nzt2r\") pod \"ceilometer-0\" (UID: \"b51fa237-35ec-47d6-b61d-c3e50dc8450f\") " pod="openstack/ceilometer-0" Dec 11 08:43:11 crc kubenswrapper[4881]: I1211 08:43:11.021298 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1cb637c3-35ca-4a93-bfb6-a7e67fec14fa" path="/var/lib/kubelet/pods/1cb637c3-35ca-4a93-bfb6-a7e67fec14fa/volumes" Dec 11 08:43:11 crc kubenswrapper[4881]: I1211 08:43:11.077187 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b51fa237-35ec-47d6-b61d-c3e50dc8450f-log-httpd\") pod \"ceilometer-0\" (UID: \"b51fa237-35ec-47d6-b61d-c3e50dc8450f\") " pod="openstack/ceilometer-0" Dec 11 08:43:11 crc kubenswrapper[4881]: I1211 08:43:11.077275 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/b51fa237-35ec-47d6-b61d-c3e50dc8450f-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"b51fa237-35ec-47d6-b61d-c3e50dc8450f\") " pod="openstack/ceilometer-0" Dec 11 08:43:11 crc kubenswrapper[4881]: I1211 08:43:11.077387 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nzt2r\" (UniqueName: \"kubernetes.io/projected/b51fa237-35ec-47d6-b61d-c3e50dc8450f-kube-api-access-nzt2r\") pod \"ceilometer-0\" (UID: \"b51fa237-35ec-47d6-b61d-c3e50dc8450f\") " pod="openstack/ceilometer-0" Dec 11 08:43:11 crc kubenswrapper[4881]: I1211 08:43:11.077423 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b51fa237-35ec-47d6-b61d-c3e50dc8450f-run-httpd\") pod \"ceilometer-0\" (UID: \"b51fa237-35ec-47d6-b61d-c3e50dc8450f\") " pod="openstack/ceilometer-0" Dec 11 08:43:11 crc kubenswrapper[4881]: I1211 08:43:11.077465 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b51fa237-35ec-47d6-b61d-c3e50dc8450f-scripts\") pod \"ceilometer-0\" (UID: \"b51fa237-35ec-47d6-b61d-c3e50dc8450f\") " pod="openstack/ceilometer-0" Dec 11 08:43:11 crc kubenswrapper[4881]: I1211 08:43:11.077554 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/b51fa237-35ec-47d6-b61d-c3e50dc8450f-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"b51fa237-35ec-47d6-b61d-c3e50dc8450f\") " pod="openstack/ceilometer-0" Dec 11 08:43:11 crc kubenswrapper[4881]: I1211 08:43:11.077570 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b51fa237-35ec-47d6-b61d-c3e50dc8450f-config-data\") pod \"ceilometer-0\" (UID: \"b51fa237-35ec-47d6-b61d-c3e50dc8450f\") " pod="openstack/ceilometer-0" Dec 11 08:43:11 crc kubenswrapper[4881]: I1211 08:43:11.077588 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b51fa237-35ec-47d6-b61d-c3e50dc8450f-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"b51fa237-35ec-47d6-b61d-c3e50dc8450f\") " pod="openstack/ceilometer-0" Dec 11 08:43:11 crc kubenswrapper[4881]: I1211 08:43:11.078002 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b51fa237-35ec-47d6-b61d-c3e50dc8450f-log-httpd\") pod \"ceilometer-0\" (UID: \"b51fa237-35ec-47d6-b61d-c3e50dc8450f\") " pod="openstack/ceilometer-0" Dec 11 08:43:11 crc kubenswrapper[4881]: I1211 08:43:11.078421 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b51fa237-35ec-47d6-b61d-c3e50dc8450f-run-httpd\") pod \"ceilometer-0\" (UID: \"b51fa237-35ec-47d6-b61d-c3e50dc8450f\") " pod="openstack/ceilometer-0" Dec 11 08:43:11 crc kubenswrapper[4881]: I1211 08:43:11.081570 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/b51fa237-35ec-47d6-b61d-c3e50dc8450f-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"b51fa237-35ec-47d6-b61d-c3e50dc8450f\") " pod="openstack/ceilometer-0" Dec 11 08:43:11 crc kubenswrapper[4881]: I1211 08:43:11.082096 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b51fa237-35ec-47d6-b61d-c3e50dc8450f-scripts\") pod \"ceilometer-0\" (UID: \"b51fa237-35ec-47d6-b61d-c3e50dc8450f\") " pod="openstack/ceilometer-0" Dec 11 08:43:11 crc kubenswrapper[4881]: I1211 08:43:11.083111 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b51fa237-35ec-47d6-b61d-c3e50dc8450f-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"b51fa237-35ec-47d6-b61d-c3e50dc8450f\") " pod="openstack/ceilometer-0" Dec 11 08:43:11 crc kubenswrapper[4881]: I1211 08:43:11.097140 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b51fa237-35ec-47d6-b61d-c3e50dc8450f-config-data\") pod \"ceilometer-0\" (UID: \"b51fa237-35ec-47d6-b61d-c3e50dc8450f\") " pod="openstack/ceilometer-0" Dec 11 08:43:11 crc kubenswrapper[4881]: I1211 08:43:11.097760 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/b51fa237-35ec-47d6-b61d-c3e50dc8450f-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"b51fa237-35ec-47d6-b61d-c3e50dc8450f\") " pod="openstack/ceilometer-0" Dec 11 08:43:11 crc kubenswrapper[4881]: I1211 08:43:11.102122 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nzt2r\" (UniqueName: \"kubernetes.io/projected/b51fa237-35ec-47d6-b61d-c3e50dc8450f-kube-api-access-nzt2r\") pod \"ceilometer-0\" (UID: \"b51fa237-35ec-47d6-b61d-c3e50dc8450f\") " pod="openstack/ceilometer-0" Dec 11 08:43:11 crc kubenswrapper[4881]: I1211 08:43:11.181237 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-cell1-server-0" podUID="83d70e0f-d672-49c8-89d6-c1aa99c572a0" containerName="rabbitmq" containerID="cri-o://9c1e8aa9aea1cf14c00df462fc461900deea0c524c22d4f6fe990e20562dd413" gracePeriod=604795 Dec 11 08:43:11 crc kubenswrapper[4881]: I1211 08:43:11.198508 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 11 08:43:12 crc kubenswrapper[4881]: I1211 08:43:12.641988 4881 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-server-0" podUID="ed841687-cd89-4419-8726-85e086a5cc21" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.129:5671: connect: connection refused" Dec 11 08:43:12 crc kubenswrapper[4881]: I1211 08:43:12.937463 4881 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-cell1-server-0" podUID="83d70e0f-d672-49c8-89d6-c1aa99c572a0" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.130:5671: connect: connection refused" Dec 11 08:43:17 crc kubenswrapper[4881]: I1211 08:43:17.974995 4881 scope.go:117] "RemoveContainer" containerID="f00a23608c51c731dc3ffaac100788cecea1036c8f366c3cb6d2a83ff48fe5c6" Dec 11 08:43:18 crc kubenswrapper[4881]: I1211 08:43:18.005467 4881 scope.go:117] "RemoveContainer" containerID="280c8d37b0a6337b557a41baaf36c2400eea6b55ff1dfde838e13454aea06c11" Dec 11 08:43:19 crc kubenswrapper[4881]: I1211 08:43:19.750714 4881 generic.go:334] "Generic (PLEG): container finished" podID="83d70e0f-d672-49c8-89d6-c1aa99c572a0" containerID="9c1e8aa9aea1cf14c00df462fc461900deea0c524c22d4f6fe990e20562dd413" exitCode=0 Dec 11 08:43:19 crc kubenswrapper[4881]: I1211 08:43:19.751268 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"83d70e0f-d672-49c8-89d6-c1aa99c572a0","Type":"ContainerDied","Data":"9c1e8aa9aea1cf14c00df462fc461900deea0c524c22d4f6fe990e20562dd413"} Dec 11 08:43:20 crc kubenswrapper[4881]: I1211 08:43:20.024867 4881 generic.go:334] "Generic (PLEG): container finished" podID="ed841687-cd89-4419-8726-85e086a5cc21" containerID="b76ca59b759174d4b3b68dec243d1e27ea0c579c0ee348a6d617483e4567b5ce" exitCode=0 Dec 11 08:43:20 crc kubenswrapper[4881]: I1211 08:43:20.024935 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"ed841687-cd89-4419-8726-85e086a5cc21","Type":"ContainerDied","Data":"b76ca59b759174d4b3b68dec243d1e27ea0c579c0ee348a6d617483e4567b5ce"} Dec 11 08:43:21 crc kubenswrapper[4881]: I1211 08:43:21.037993 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Dec 11 08:43:23 crc kubenswrapper[4881]: I1211 08:43:23.997862 4881 scope.go:117] "RemoveContainer" containerID="001dbd7763875d0d496eaf1cf80c51cc1b2a40cbbf021e3ac5e4911ef4236184" Dec 11 08:43:24 crc kubenswrapper[4881]: W1211 08:43:24.015368 4881 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb51fa237_35ec_47d6_b61d_c3e50dc8450f.slice/crio-7be1f5c7291e0dedee332cf9bd04cb2a044d8e1bed36e1ff7578f92fe6893a52 WatchSource:0}: Error finding container 7be1f5c7291e0dedee332cf9bd04cb2a044d8e1bed36e1ff7578f92fe6893a52: Status 404 returned error can't find the container with id 7be1f5c7291e0dedee332cf9bd04cb2a044d8e1bed36e1ff7578f92fe6893a52 Dec 11 08:43:24 crc kubenswrapper[4881]: I1211 08:43:24.135878 4881 scope.go:117] "RemoveContainer" containerID="6f5ccd333037555a031831817d11ecc7d4a25ca7cf05d3819deda856ae8cdce8" Dec 11 08:43:24 crc kubenswrapper[4881]: I1211 08:43:24.181916 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"83d70e0f-d672-49c8-89d6-c1aa99c572a0","Type":"ContainerDied","Data":"3217862f1edf01c9a7059bdf56d484104266d33ecf8edab58de2c1065831171f"} Dec 11 08:43:24 crc kubenswrapper[4881]: I1211 08:43:24.182139 4881 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3217862f1edf01c9a7059bdf56d484104266d33ecf8edab58de2c1065831171f" Dec 11 08:43:24 crc kubenswrapper[4881]: I1211 08:43:24.187178 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Dec 11 08:43:24 crc kubenswrapper[4881]: I1211 08:43:24.191681 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Dec 11 08:43:24 crc kubenswrapper[4881]: I1211 08:43:24.208967 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"ed841687-cd89-4419-8726-85e086a5cc21","Type":"ContainerDied","Data":"ab949ebb5f98ec0bd36bca41b542132d17a18fbabaa3b31b787c099504b8a75e"} Dec 11 08:43:24 crc kubenswrapper[4881]: I1211 08:43:24.234531 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b51fa237-35ec-47d6-b61d-c3e50dc8450f","Type":"ContainerStarted","Data":"7be1f5c7291e0dedee332cf9bd04cb2a044d8e1bed36e1ff7578f92fe6893a52"} Dec 11 08:43:24 crc kubenswrapper[4881]: I1211 08:43:24.398509 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/83d70e0f-d672-49c8-89d6-c1aa99c572a0-rabbitmq-plugins\") pod \"83d70e0f-d672-49c8-89d6-c1aa99c572a0\" (UID: \"83d70e0f-d672-49c8-89d6-c1aa99c572a0\") " Dec 11 08:43:24 crc kubenswrapper[4881]: I1211 08:43:24.398798 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/ed841687-cd89-4419-8726-85e086a5cc21-rabbitmq-erlang-cookie\") pod \"ed841687-cd89-4419-8726-85e086a5cc21\" (UID: \"ed841687-cd89-4419-8726-85e086a5cc21\") " Dec 11 08:43:24 crc kubenswrapper[4881]: I1211 08:43:24.398823 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/ed841687-cd89-4419-8726-85e086a5cc21-pod-info\") pod \"ed841687-cd89-4419-8726-85e086a5cc21\" (UID: \"ed841687-cd89-4419-8726-85e086a5cc21\") " Dec 11 08:43:24 crc kubenswrapper[4881]: I1211 08:43:24.398878 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8jt69\" (UniqueName: \"kubernetes.io/projected/ed841687-cd89-4419-8726-85e086a5cc21-kube-api-access-8jt69\") pod \"ed841687-cd89-4419-8726-85e086a5cc21\" (UID: \"ed841687-cd89-4419-8726-85e086a5cc21\") " Dec 11 08:43:24 crc kubenswrapper[4881]: I1211 08:43:24.398895 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/83d70e0f-d672-49c8-89d6-c1aa99c572a0-config-data\") pod \"83d70e0f-d672-49c8-89d6-c1aa99c572a0\" (UID: \"83d70e0f-d672-49c8-89d6-c1aa99c572a0\") " Dec 11 08:43:24 crc kubenswrapper[4881]: I1211 08:43:24.398931 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/ed841687-cd89-4419-8726-85e086a5cc21-server-conf\") pod \"ed841687-cd89-4419-8726-85e086a5cc21\" (UID: \"ed841687-cd89-4419-8726-85e086a5cc21\") " Dec 11 08:43:24 crc kubenswrapper[4881]: I1211 08:43:24.398945 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/83d70e0f-d672-49c8-89d6-c1aa99c572a0-pod-info\") pod \"83d70e0f-d672-49c8-89d6-c1aa99c572a0\" (UID: \"83d70e0f-d672-49c8-89d6-c1aa99c572a0\") " Dec 11 08:43:24 crc kubenswrapper[4881]: I1211 08:43:24.398980 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/83d70e0f-d672-49c8-89d6-c1aa99c572a0-server-conf\") pod \"83d70e0f-d672-49c8-89d6-c1aa99c572a0\" (UID: \"83d70e0f-d672-49c8-89d6-c1aa99c572a0\") " Dec 11 08:43:24 crc kubenswrapper[4881]: I1211 08:43:24.399007 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/83d70e0f-d672-49c8-89d6-c1aa99c572a0-rabbitmq-confd\") pod \"83d70e0f-d672-49c8-89d6-c1aa99c572a0\" (UID: \"83d70e0f-d672-49c8-89d6-c1aa99c572a0\") " Dec 11 08:43:24 crc kubenswrapper[4881]: I1211 08:43:24.399081 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/ed841687-cd89-4419-8726-85e086a5cc21-rabbitmq-plugins\") pod \"ed841687-cd89-4419-8726-85e086a5cc21\" (UID: \"ed841687-cd89-4419-8726-85e086a5cc21\") " Dec 11 08:43:24 crc kubenswrapper[4881]: I1211 08:43:24.399104 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"83d70e0f-d672-49c8-89d6-c1aa99c572a0\" (UID: \"83d70e0f-d672-49c8-89d6-c1aa99c572a0\") " Dec 11 08:43:24 crc kubenswrapper[4881]: I1211 08:43:24.399149 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cfpct\" (UniqueName: \"kubernetes.io/projected/83d70e0f-d672-49c8-89d6-c1aa99c572a0-kube-api-access-cfpct\") pod \"83d70e0f-d672-49c8-89d6-c1aa99c572a0\" (UID: \"83d70e0f-d672-49c8-89d6-c1aa99c572a0\") " Dec 11 08:43:24 crc kubenswrapper[4881]: I1211 08:43:24.399168 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/ed841687-cd89-4419-8726-85e086a5cc21-plugins-conf\") pod \"ed841687-cd89-4419-8726-85e086a5cc21\" (UID: \"ed841687-cd89-4419-8726-85e086a5cc21\") " Dec 11 08:43:24 crc kubenswrapper[4881]: I1211 08:43:24.399196 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/ed841687-cd89-4419-8726-85e086a5cc21-rabbitmq-confd\") pod \"ed841687-cd89-4419-8726-85e086a5cc21\" (UID: \"ed841687-cd89-4419-8726-85e086a5cc21\") " Dec 11 08:43:24 crc kubenswrapper[4881]: I1211 08:43:24.399217 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/ed841687-cd89-4419-8726-85e086a5cc21-config-data\") pod \"ed841687-cd89-4419-8726-85e086a5cc21\" (UID: \"ed841687-cd89-4419-8726-85e086a5cc21\") " Dec 11 08:43:24 crc kubenswrapper[4881]: I1211 08:43:24.399262 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/ed841687-cd89-4419-8726-85e086a5cc21-erlang-cookie-secret\") pod \"ed841687-cd89-4419-8726-85e086a5cc21\" (UID: \"ed841687-cd89-4419-8726-85e086a5cc21\") " Dec 11 08:43:24 crc kubenswrapper[4881]: I1211 08:43:24.399305 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/83d70e0f-d672-49c8-89d6-c1aa99c572a0-plugins-conf\") pod \"83d70e0f-d672-49c8-89d6-c1aa99c572a0\" (UID: \"83d70e0f-d672-49c8-89d6-c1aa99c572a0\") " Dec 11 08:43:24 crc kubenswrapper[4881]: I1211 08:43:24.399322 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/83d70e0f-d672-49c8-89d6-c1aa99c572a0-erlang-cookie-secret\") pod \"83d70e0f-d672-49c8-89d6-c1aa99c572a0\" (UID: \"83d70e0f-d672-49c8-89d6-c1aa99c572a0\") " Dec 11 08:43:24 crc kubenswrapper[4881]: I1211 08:43:24.399351 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/83d70e0f-d672-49c8-89d6-c1aa99c572a0-rabbitmq-tls\") pod \"83d70e0f-d672-49c8-89d6-c1aa99c572a0\" (UID: \"83d70e0f-d672-49c8-89d6-c1aa99c572a0\") " Dec 11 08:43:24 crc kubenswrapper[4881]: I1211 08:43:24.399374 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/ed841687-cd89-4419-8726-85e086a5cc21-rabbitmq-tls\") pod \"ed841687-cd89-4419-8726-85e086a5cc21\" (UID: \"ed841687-cd89-4419-8726-85e086a5cc21\") " Dec 11 08:43:24 crc kubenswrapper[4881]: I1211 08:43:24.399417 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/83d70e0f-d672-49c8-89d6-c1aa99c572a0-rabbitmq-erlang-cookie\") pod \"83d70e0f-d672-49c8-89d6-c1aa99c572a0\" (UID: \"83d70e0f-d672-49c8-89d6-c1aa99c572a0\") " Dec 11 08:43:24 crc kubenswrapper[4881]: I1211 08:43:24.399497 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"ed841687-cd89-4419-8726-85e086a5cc21\" (UID: \"ed841687-cd89-4419-8726-85e086a5cc21\") " Dec 11 08:43:24 crc kubenswrapper[4881]: I1211 08:43:24.409796 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ed841687-cd89-4419-8726-85e086a5cc21-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "ed841687-cd89-4419-8726-85e086a5cc21" (UID: "ed841687-cd89-4419-8726-85e086a5cc21"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 08:43:24 crc kubenswrapper[4881]: I1211 08:43:24.410822 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/83d70e0f-d672-49c8-89d6-c1aa99c572a0-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "83d70e0f-d672-49c8-89d6-c1aa99c572a0" (UID: "83d70e0f-d672-49c8-89d6-c1aa99c572a0"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 08:43:24 crc kubenswrapper[4881]: I1211 08:43:24.417263 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/83d70e0f-d672-49c8-89d6-c1aa99c572a0-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "83d70e0f-d672-49c8-89d6-c1aa99c572a0" (UID: "83d70e0f-d672-49c8-89d6-c1aa99c572a0"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 08:43:24 crc kubenswrapper[4881]: I1211 08:43:24.417779 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ed841687-cd89-4419-8726-85e086a5cc21-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "ed841687-cd89-4419-8726-85e086a5cc21" (UID: "ed841687-cd89-4419-8726-85e086a5cc21"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 08:43:24 crc kubenswrapper[4881]: I1211 08:43:24.418346 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/83d70e0f-d672-49c8-89d6-c1aa99c572a0-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "83d70e0f-d672-49c8-89d6-c1aa99c572a0" (UID: "83d70e0f-d672-49c8-89d6-c1aa99c572a0"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:43:24 crc kubenswrapper[4881]: I1211 08:43:24.419402 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage09-crc" (OuterVolumeSpecName: "persistence") pod "ed841687-cd89-4419-8726-85e086a5cc21" (UID: "ed841687-cd89-4419-8726-85e086a5cc21"). InnerVolumeSpecName "local-storage09-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Dec 11 08:43:24 crc kubenswrapper[4881]: I1211 08:43:24.420201 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ed841687-cd89-4419-8726-85e086a5cc21-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "ed841687-cd89-4419-8726-85e086a5cc21" (UID: "ed841687-cd89-4419-8726-85e086a5cc21"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:43:24 crc kubenswrapper[4881]: I1211 08:43:24.427787 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ed841687-cd89-4419-8726-85e086a5cc21-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "ed841687-cd89-4419-8726-85e086a5cc21" (UID: "ed841687-cd89-4419-8726-85e086a5cc21"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:43:24 crc kubenswrapper[4881]: I1211 08:43:24.427898 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/83d70e0f-d672-49c8-89d6-c1aa99c572a0-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "83d70e0f-d672-49c8-89d6-c1aa99c572a0" (UID: "83d70e0f-d672-49c8-89d6-c1aa99c572a0"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:43:24 crc kubenswrapper[4881]: I1211 08:43:24.448261 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/83d70e0f-d672-49c8-89d6-c1aa99c572a0-rabbitmq-tls" (OuterVolumeSpecName: "rabbitmq-tls") pod "83d70e0f-d672-49c8-89d6-c1aa99c572a0" (UID: "83d70e0f-d672-49c8-89d6-c1aa99c572a0"). InnerVolumeSpecName "rabbitmq-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:43:24 crc kubenswrapper[4881]: I1211 08:43:24.449610 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/83d70e0f-d672-49c8-89d6-c1aa99c572a0-pod-info" (OuterVolumeSpecName: "pod-info") pod "83d70e0f-d672-49c8-89d6-c1aa99c572a0" (UID: "83d70e0f-d672-49c8-89d6-c1aa99c572a0"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Dec 11 08:43:24 crc kubenswrapper[4881]: I1211 08:43:24.449839 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ed841687-cd89-4419-8726-85e086a5cc21-rabbitmq-tls" (OuterVolumeSpecName: "rabbitmq-tls") pod "ed841687-cd89-4419-8726-85e086a5cc21" (UID: "ed841687-cd89-4419-8726-85e086a5cc21"). InnerVolumeSpecName "rabbitmq-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:43:24 crc kubenswrapper[4881]: I1211 08:43:24.462159 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ed841687-cd89-4419-8726-85e086a5cc21-kube-api-access-8jt69" (OuterVolumeSpecName: "kube-api-access-8jt69") pod "ed841687-cd89-4419-8726-85e086a5cc21" (UID: "ed841687-cd89-4419-8726-85e086a5cc21"). InnerVolumeSpecName "kube-api-access-8jt69". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:43:24 crc kubenswrapper[4881]: I1211 08:43:24.462256 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/ed841687-cd89-4419-8726-85e086a5cc21-pod-info" (OuterVolumeSpecName: "pod-info") pod "ed841687-cd89-4419-8726-85e086a5cc21" (UID: "ed841687-cd89-4419-8726-85e086a5cc21"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Dec 11 08:43:24 crc kubenswrapper[4881]: I1211 08:43:24.472320 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/83d70e0f-d672-49c8-89d6-c1aa99c572a0-kube-api-access-cfpct" (OuterVolumeSpecName: "kube-api-access-cfpct") pod "83d70e0f-d672-49c8-89d6-c1aa99c572a0" (UID: "83d70e0f-d672-49c8-89d6-c1aa99c572a0"). InnerVolumeSpecName "kube-api-access-cfpct". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:43:24 crc kubenswrapper[4881]: I1211 08:43:24.512708 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage03-crc" (OuterVolumeSpecName: "persistence") pod "83d70e0f-d672-49c8-89d6-c1aa99c572a0" (UID: "83d70e0f-d672-49c8-89d6-c1aa99c572a0"). InnerVolumeSpecName "local-storage03-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Dec 11 08:43:24 crc kubenswrapper[4881]: I1211 08:43:24.517619 4881 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/ed841687-cd89-4419-8726-85e086a5cc21-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Dec 11 08:43:24 crc kubenswrapper[4881]: I1211 08:43:24.517654 4881 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/ed841687-cd89-4419-8726-85e086a5cc21-pod-info\") on node \"crc\" DevicePath \"\"" Dec 11 08:43:24 crc kubenswrapper[4881]: I1211 08:43:24.517664 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8jt69\" (UniqueName: \"kubernetes.io/projected/ed841687-cd89-4419-8726-85e086a5cc21-kube-api-access-8jt69\") on node \"crc\" DevicePath \"\"" Dec 11 08:43:24 crc kubenswrapper[4881]: I1211 08:43:24.517673 4881 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/83d70e0f-d672-49c8-89d6-c1aa99c572a0-pod-info\") on node \"crc\" DevicePath \"\"" Dec 11 08:43:24 crc kubenswrapper[4881]: I1211 08:43:24.517682 4881 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/ed841687-cd89-4419-8726-85e086a5cc21-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Dec 11 08:43:24 crc kubenswrapper[4881]: I1211 08:43:24.517711 4881 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") on node \"crc\" " Dec 11 08:43:24 crc kubenswrapper[4881]: I1211 08:43:24.517721 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cfpct\" (UniqueName: \"kubernetes.io/projected/83d70e0f-d672-49c8-89d6-c1aa99c572a0-kube-api-access-cfpct\") on node \"crc\" DevicePath \"\"" Dec 11 08:43:24 crc kubenswrapper[4881]: I1211 08:43:24.517730 4881 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/ed841687-cd89-4419-8726-85e086a5cc21-plugins-conf\") on node \"crc\" DevicePath \"\"" Dec 11 08:43:24 crc kubenswrapper[4881]: I1211 08:43:24.517738 4881 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/ed841687-cd89-4419-8726-85e086a5cc21-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Dec 11 08:43:24 crc kubenswrapper[4881]: I1211 08:43:24.517746 4881 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/83d70e0f-d672-49c8-89d6-c1aa99c572a0-plugins-conf\") on node \"crc\" DevicePath \"\"" Dec 11 08:43:24 crc kubenswrapper[4881]: I1211 08:43:24.517754 4881 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/83d70e0f-d672-49c8-89d6-c1aa99c572a0-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Dec 11 08:43:24 crc kubenswrapper[4881]: I1211 08:43:24.517763 4881 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/83d70e0f-d672-49c8-89d6-c1aa99c572a0-rabbitmq-tls\") on node \"crc\" DevicePath \"\"" Dec 11 08:43:24 crc kubenswrapper[4881]: I1211 08:43:24.517772 4881 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/ed841687-cd89-4419-8726-85e086a5cc21-rabbitmq-tls\") on node \"crc\" DevicePath \"\"" Dec 11 08:43:24 crc kubenswrapper[4881]: I1211 08:43:24.517781 4881 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/83d70e0f-d672-49c8-89d6-c1aa99c572a0-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Dec 11 08:43:24 crc kubenswrapper[4881]: I1211 08:43:24.517795 4881 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") on node \"crc\" " Dec 11 08:43:24 crc kubenswrapper[4881]: I1211 08:43:24.517803 4881 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/83d70e0f-d672-49c8-89d6-c1aa99c572a0-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Dec 11 08:43:24 crc kubenswrapper[4881]: I1211 08:43:24.532915 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ed841687-cd89-4419-8726-85e086a5cc21-config-data" (OuterVolumeSpecName: "config-data") pod "ed841687-cd89-4419-8726-85e086a5cc21" (UID: "ed841687-cd89-4419-8726-85e086a5cc21"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:43:24 crc kubenswrapper[4881]: I1211 08:43:24.602397 4881 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage03-crc" (UniqueName: "kubernetes.io/local-volume/local-storage03-crc") on node "crc" Dec 11 08:43:24 crc kubenswrapper[4881]: I1211 08:43:24.614116 4881 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage09-crc" (UniqueName: "kubernetes.io/local-volume/local-storage09-crc") on node "crc" Dec 11 08:43:24 crc kubenswrapper[4881]: I1211 08:43:24.621678 4881 reconciler_common.go:293] "Volume detached for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") on node \"crc\" DevicePath \"\"" Dec 11 08:43:24 crc kubenswrapper[4881]: I1211 08:43:24.621703 4881 reconciler_common.go:293] "Volume detached for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") on node \"crc\" DevicePath \"\"" Dec 11 08:43:24 crc kubenswrapper[4881]: I1211 08:43:24.621712 4881 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/ed841687-cd89-4419-8726-85e086a5cc21-config-data\") on node \"crc\" DevicePath \"\"" Dec 11 08:43:24 crc kubenswrapper[4881]: I1211 08:43:24.638842 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/83d70e0f-d672-49c8-89d6-c1aa99c572a0-config-data" (OuterVolumeSpecName: "config-data") pod "83d70e0f-d672-49c8-89d6-c1aa99c572a0" (UID: "83d70e0f-d672-49c8-89d6-c1aa99c572a0"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:43:24 crc kubenswrapper[4881]: I1211 08:43:24.672912 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ed841687-cd89-4419-8726-85e086a5cc21-server-conf" (OuterVolumeSpecName: "server-conf") pod "ed841687-cd89-4419-8726-85e086a5cc21" (UID: "ed841687-cd89-4419-8726-85e086a5cc21"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:43:24 crc kubenswrapper[4881]: I1211 08:43:24.672913 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/83d70e0f-d672-49c8-89d6-c1aa99c572a0-server-conf" (OuterVolumeSpecName: "server-conf") pod "83d70e0f-d672-49c8-89d6-c1aa99c572a0" (UID: "83d70e0f-d672-49c8-89d6-c1aa99c572a0"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:43:24 crc kubenswrapper[4881]: I1211 08:43:24.728114 4881 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/83d70e0f-d672-49c8-89d6-c1aa99c572a0-config-data\") on node \"crc\" DevicePath \"\"" Dec 11 08:43:24 crc kubenswrapper[4881]: I1211 08:43:24.728149 4881 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/ed841687-cd89-4419-8726-85e086a5cc21-server-conf\") on node \"crc\" DevicePath \"\"" Dec 11 08:43:24 crc kubenswrapper[4881]: I1211 08:43:24.728157 4881 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/83d70e0f-d672-49c8-89d6-c1aa99c572a0-server-conf\") on node \"crc\" DevicePath \"\"" Dec 11 08:43:24 crc kubenswrapper[4881]: I1211 08:43:24.768676 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ed841687-cd89-4419-8726-85e086a5cc21-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "ed841687-cd89-4419-8726-85e086a5cc21" (UID: "ed841687-cd89-4419-8726-85e086a5cc21"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:43:24 crc kubenswrapper[4881]: I1211 08:43:24.797129 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/83d70e0f-d672-49c8-89d6-c1aa99c572a0-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "83d70e0f-d672-49c8-89d6-c1aa99c572a0" (UID: "83d70e0f-d672-49c8-89d6-c1aa99c572a0"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:43:24 crc kubenswrapper[4881]: I1211 08:43:24.830042 4881 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/83d70e0f-d672-49c8-89d6-c1aa99c572a0-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Dec 11 08:43:24 crc kubenswrapper[4881]: I1211 08:43:24.830082 4881 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/ed841687-cd89-4419-8726-85e086a5cc21-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Dec 11 08:43:25 crc kubenswrapper[4881]: I1211 08:43:25.271689 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Dec 11 08:43:25 crc kubenswrapper[4881]: I1211 08:43:25.271704 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Dec 11 08:43:25 crc kubenswrapper[4881]: I1211 08:43:25.321155 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Dec 11 08:43:25 crc kubenswrapper[4881]: I1211 08:43:25.341573 4881 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Dec 11 08:43:25 crc kubenswrapper[4881]: I1211 08:43:25.370590 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-0"] Dec 11 08:43:25 crc kubenswrapper[4881]: I1211 08:43:25.388489 4881 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-server-0"] Dec 11 08:43:25 crc kubenswrapper[4881]: I1211 08:43:25.394437 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Dec 11 08:43:25 crc kubenswrapper[4881]: E1211 08:43:25.408450 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="83d70e0f-d672-49c8-89d6-c1aa99c572a0" containerName="rabbitmq" Dec 11 08:43:25 crc kubenswrapper[4881]: I1211 08:43:25.408498 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="83d70e0f-d672-49c8-89d6-c1aa99c572a0" containerName="rabbitmq" Dec 11 08:43:25 crc kubenswrapper[4881]: E1211 08:43:25.408690 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="83d70e0f-d672-49c8-89d6-c1aa99c572a0" containerName="setup-container" Dec 11 08:43:25 crc kubenswrapper[4881]: I1211 08:43:25.408705 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="83d70e0f-d672-49c8-89d6-c1aa99c572a0" containerName="setup-container" Dec 11 08:43:25 crc kubenswrapper[4881]: E1211 08:43:25.408730 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ed841687-cd89-4419-8726-85e086a5cc21" containerName="setup-container" Dec 11 08:43:25 crc kubenswrapper[4881]: I1211 08:43:25.408752 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="ed841687-cd89-4419-8726-85e086a5cc21" containerName="setup-container" Dec 11 08:43:25 crc kubenswrapper[4881]: E1211 08:43:25.408794 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ed841687-cd89-4419-8726-85e086a5cc21" containerName="rabbitmq" Dec 11 08:43:25 crc kubenswrapper[4881]: I1211 08:43:25.408803 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="ed841687-cd89-4419-8726-85e086a5cc21" containerName="rabbitmq" Dec 11 08:43:25 crc kubenswrapper[4881]: I1211 08:43:25.409722 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="ed841687-cd89-4419-8726-85e086a5cc21" containerName="rabbitmq" Dec 11 08:43:25 crc kubenswrapper[4881]: I1211 08:43:25.409786 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="83d70e0f-d672-49c8-89d6-c1aa99c572a0" containerName="rabbitmq" Dec 11 08:43:25 crc kubenswrapper[4881]: I1211 08:43:25.418153 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Dec 11 08:43:25 crc kubenswrapper[4881]: I1211 08:43:25.418300 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Dec 11 08:43:25 crc kubenswrapper[4881]: I1211 08:43:25.421927 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-server-0"] Dec 11 08:43:25 crc kubenswrapper[4881]: I1211 08:43:25.422264 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-server-dockercfg-67jkl" Dec 11 08:43:25 crc kubenswrapper[4881]: I1211 08:43:25.422395 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-plugins-conf" Dec 11 08:43:25 crc kubenswrapper[4881]: I1211 08:43:25.422472 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-config-data" Dec 11 08:43:25 crc kubenswrapper[4881]: I1211 08:43:25.422498 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-server-conf" Dec 11 08:43:25 crc kubenswrapper[4881]: I1211 08:43:25.422395 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-cell1-svc" Dec 11 08:43:25 crc kubenswrapper[4881]: I1211 08:43:25.422626 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-erlang-cookie" Dec 11 08:43:25 crc kubenswrapper[4881]: I1211 08:43:25.422675 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-default-user" Dec 11 08:43:25 crc kubenswrapper[4881]: I1211 08:43:25.424485 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Dec 11 08:43:25 crc kubenswrapper[4881]: I1211 08:43:25.433068 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-server-conf" Dec 11 08:43:25 crc kubenswrapper[4881]: I1211 08:43:25.433213 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-erlang-cookie" Dec 11 08:43:25 crc kubenswrapper[4881]: I1211 08:43:25.433305 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-plugins-conf" Dec 11 08:43:25 crc kubenswrapper[4881]: I1211 08:43:25.433420 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-config-data" Dec 11 08:43:25 crc kubenswrapper[4881]: I1211 08:43:25.433440 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-svc" Dec 11 08:43:25 crc kubenswrapper[4881]: I1211 08:43:25.433451 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-server-dockercfg-wcm7n" Dec 11 08:43:25 crc kubenswrapper[4881]: I1211 08:43:25.437307 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-default-user" Dec 11 08:43:25 crc kubenswrapper[4881]: I1211 08:43:25.437656 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Dec 11 08:43:25 crc kubenswrapper[4881]: I1211 08:43:25.452405 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/d52ebbc7-03f0-4f73-827b-8f8066e83146-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"d52ebbc7-03f0-4f73-827b-8f8066e83146\") " pod="openstack/rabbitmq-server-0" Dec 11 08:43:25 crc kubenswrapper[4881]: I1211 08:43:25.452446 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/d52ebbc7-03f0-4f73-827b-8f8066e83146-server-conf\") pod \"rabbitmq-server-0\" (UID: \"d52ebbc7-03f0-4f73-827b-8f8066e83146\") " pod="openstack/rabbitmq-server-0" Dec 11 08:43:25 crc kubenswrapper[4881]: I1211 08:43:25.452520 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fsj4k\" (UniqueName: \"kubernetes.io/projected/d52ebbc7-03f0-4f73-827b-8f8066e83146-kube-api-access-fsj4k\") pod \"rabbitmq-server-0\" (UID: \"d52ebbc7-03f0-4f73-827b-8f8066e83146\") " pod="openstack/rabbitmq-server-0" Dec 11 08:43:25 crc kubenswrapper[4881]: I1211 08:43:25.452746 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/d52ebbc7-03f0-4f73-827b-8f8066e83146-pod-info\") pod \"rabbitmq-server-0\" (UID: \"d52ebbc7-03f0-4f73-827b-8f8066e83146\") " pod="openstack/rabbitmq-server-0" Dec 11 08:43:25 crc kubenswrapper[4881]: I1211 08:43:25.452798 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/d52ebbc7-03f0-4f73-827b-8f8066e83146-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"d52ebbc7-03f0-4f73-827b-8f8066e83146\") " pod="openstack/rabbitmq-server-0" Dec 11 08:43:25 crc kubenswrapper[4881]: I1211 08:43:25.452905 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/d52ebbc7-03f0-4f73-827b-8f8066e83146-config-data\") pod \"rabbitmq-server-0\" (UID: \"d52ebbc7-03f0-4f73-827b-8f8066e83146\") " pod="openstack/rabbitmq-server-0" Dec 11 08:43:25 crc kubenswrapper[4881]: I1211 08:43:25.452963 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/d52ebbc7-03f0-4f73-827b-8f8066e83146-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"d52ebbc7-03f0-4f73-827b-8f8066e83146\") " pod="openstack/rabbitmq-server-0" Dec 11 08:43:25 crc kubenswrapper[4881]: I1211 08:43:25.453057 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"rabbitmq-server-0\" (UID: \"d52ebbc7-03f0-4f73-827b-8f8066e83146\") " pod="openstack/rabbitmq-server-0" Dec 11 08:43:25 crc kubenswrapper[4881]: I1211 08:43:25.453101 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/d52ebbc7-03f0-4f73-827b-8f8066e83146-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"d52ebbc7-03f0-4f73-827b-8f8066e83146\") " pod="openstack/rabbitmq-server-0" Dec 11 08:43:25 crc kubenswrapper[4881]: I1211 08:43:25.453259 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/d52ebbc7-03f0-4f73-827b-8f8066e83146-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"d52ebbc7-03f0-4f73-827b-8f8066e83146\") " pod="openstack/rabbitmq-server-0" Dec 11 08:43:25 crc kubenswrapper[4881]: I1211 08:43:25.453409 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/d52ebbc7-03f0-4f73-827b-8f8066e83146-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"d52ebbc7-03f0-4f73-827b-8f8066e83146\") " pod="openstack/rabbitmq-server-0" Dec 11 08:43:25 crc kubenswrapper[4881]: I1211 08:43:25.555528 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/acc39512-3a6a-4e4c-a2a2-a13ad13b11f0-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"acc39512-3a6a-4e4c-a2a2-a13ad13b11f0\") " pod="openstack/rabbitmq-cell1-server-0" Dec 11 08:43:25 crc kubenswrapper[4881]: I1211 08:43:25.555788 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"acc39512-3a6a-4e4c-a2a2-a13ad13b11f0\") " pod="openstack/rabbitmq-cell1-server-0" Dec 11 08:43:25 crc kubenswrapper[4881]: I1211 08:43:25.555837 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/acc39512-3a6a-4e4c-a2a2-a13ad13b11f0-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"acc39512-3a6a-4e4c-a2a2-a13ad13b11f0\") " pod="openstack/rabbitmq-cell1-server-0" Dec 11 08:43:25 crc kubenswrapper[4881]: I1211 08:43:25.555869 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/d52ebbc7-03f0-4f73-827b-8f8066e83146-pod-info\") pod \"rabbitmq-server-0\" (UID: \"d52ebbc7-03f0-4f73-827b-8f8066e83146\") " pod="openstack/rabbitmq-server-0" Dec 11 08:43:25 crc kubenswrapper[4881]: I1211 08:43:25.555889 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/acc39512-3a6a-4e4c-a2a2-a13ad13b11f0-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"acc39512-3a6a-4e4c-a2a2-a13ad13b11f0\") " pod="openstack/rabbitmq-cell1-server-0" Dec 11 08:43:25 crc kubenswrapper[4881]: I1211 08:43:25.555905 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/d52ebbc7-03f0-4f73-827b-8f8066e83146-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"d52ebbc7-03f0-4f73-827b-8f8066e83146\") " pod="openstack/rabbitmq-server-0" Dec 11 08:43:25 crc kubenswrapper[4881]: I1211 08:43:25.555927 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/acc39512-3a6a-4e4c-a2a2-a13ad13b11f0-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"acc39512-3a6a-4e4c-a2a2-a13ad13b11f0\") " pod="openstack/rabbitmq-cell1-server-0" Dec 11 08:43:25 crc kubenswrapper[4881]: I1211 08:43:25.556063 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/d52ebbc7-03f0-4f73-827b-8f8066e83146-config-data\") pod \"rabbitmq-server-0\" (UID: \"d52ebbc7-03f0-4f73-827b-8f8066e83146\") " pod="openstack/rabbitmq-server-0" Dec 11 08:43:25 crc kubenswrapper[4881]: I1211 08:43:25.556100 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/acc39512-3a6a-4e4c-a2a2-a13ad13b11f0-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"acc39512-3a6a-4e4c-a2a2-a13ad13b11f0\") " pod="openstack/rabbitmq-cell1-server-0" Dec 11 08:43:25 crc kubenswrapper[4881]: I1211 08:43:25.556121 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/d52ebbc7-03f0-4f73-827b-8f8066e83146-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"d52ebbc7-03f0-4f73-827b-8f8066e83146\") " pod="openstack/rabbitmq-server-0" Dec 11 08:43:25 crc kubenswrapper[4881]: I1211 08:43:25.556166 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"rabbitmq-server-0\" (UID: \"d52ebbc7-03f0-4f73-827b-8f8066e83146\") " pod="openstack/rabbitmq-server-0" Dec 11 08:43:25 crc kubenswrapper[4881]: I1211 08:43:25.556187 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/acc39512-3a6a-4e4c-a2a2-a13ad13b11f0-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"acc39512-3a6a-4e4c-a2a2-a13ad13b11f0\") " pod="openstack/rabbitmq-cell1-server-0" Dec 11 08:43:25 crc kubenswrapper[4881]: I1211 08:43:25.556205 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/d52ebbc7-03f0-4f73-827b-8f8066e83146-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"d52ebbc7-03f0-4f73-827b-8f8066e83146\") " pod="openstack/rabbitmq-server-0" Dec 11 08:43:25 crc kubenswrapper[4881]: I1211 08:43:25.556248 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/acc39512-3a6a-4e4c-a2a2-a13ad13b11f0-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"acc39512-3a6a-4e4c-a2a2-a13ad13b11f0\") " pod="openstack/rabbitmq-cell1-server-0" Dec 11 08:43:25 crc kubenswrapper[4881]: I1211 08:43:25.556268 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ctlmg\" (UniqueName: \"kubernetes.io/projected/acc39512-3a6a-4e4c-a2a2-a13ad13b11f0-kube-api-access-ctlmg\") pod \"rabbitmq-cell1-server-0\" (UID: \"acc39512-3a6a-4e4c-a2a2-a13ad13b11f0\") " pod="openstack/rabbitmq-cell1-server-0" Dec 11 08:43:25 crc kubenswrapper[4881]: I1211 08:43:25.556308 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/d52ebbc7-03f0-4f73-827b-8f8066e83146-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"d52ebbc7-03f0-4f73-827b-8f8066e83146\") " pod="openstack/rabbitmq-server-0" Dec 11 08:43:25 crc kubenswrapper[4881]: I1211 08:43:25.556373 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/acc39512-3a6a-4e4c-a2a2-a13ad13b11f0-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"acc39512-3a6a-4e4c-a2a2-a13ad13b11f0\") " pod="openstack/rabbitmq-cell1-server-0" Dec 11 08:43:25 crc kubenswrapper[4881]: I1211 08:43:25.556409 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/d52ebbc7-03f0-4f73-827b-8f8066e83146-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"d52ebbc7-03f0-4f73-827b-8f8066e83146\") " pod="openstack/rabbitmq-server-0" Dec 11 08:43:25 crc kubenswrapper[4881]: I1211 08:43:25.556431 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/acc39512-3a6a-4e4c-a2a2-a13ad13b11f0-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"acc39512-3a6a-4e4c-a2a2-a13ad13b11f0\") " pod="openstack/rabbitmq-cell1-server-0" Dec 11 08:43:25 crc kubenswrapper[4881]: I1211 08:43:25.556472 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/d52ebbc7-03f0-4f73-827b-8f8066e83146-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"d52ebbc7-03f0-4f73-827b-8f8066e83146\") " pod="openstack/rabbitmq-server-0" Dec 11 08:43:25 crc kubenswrapper[4881]: I1211 08:43:25.556494 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/d52ebbc7-03f0-4f73-827b-8f8066e83146-server-conf\") pod \"rabbitmq-server-0\" (UID: \"d52ebbc7-03f0-4f73-827b-8f8066e83146\") " pod="openstack/rabbitmq-server-0" Dec 11 08:43:25 crc kubenswrapper[4881]: I1211 08:43:25.556536 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fsj4k\" (UniqueName: \"kubernetes.io/projected/d52ebbc7-03f0-4f73-827b-8f8066e83146-kube-api-access-fsj4k\") pod \"rabbitmq-server-0\" (UID: \"d52ebbc7-03f0-4f73-827b-8f8066e83146\") " pod="openstack/rabbitmq-server-0" Dec 11 08:43:25 crc kubenswrapper[4881]: I1211 08:43:25.557130 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/d52ebbc7-03f0-4f73-827b-8f8066e83146-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"d52ebbc7-03f0-4f73-827b-8f8066e83146\") " pod="openstack/rabbitmq-server-0" Dec 11 08:43:25 crc kubenswrapper[4881]: I1211 08:43:25.557596 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/d52ebbc7-03f0-4f73-827b-8f8066e83146-config-data\") pod \"rabbitmq-server-0\" (UID: \"d52ebbc7-03f0-4f73-827b-8f8066e83146\") " pod="openstack/rabbitmq-server-0" Dec 11 08:43:25 crc kubenswrapper[4881]: I1211 08:43:25.557639 4881 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"rabbitmq-server-0\" (UID: \"d52ebbc7-03f0-4f73-827b-8f8066e83146\") device mount path \"/mnt/openstack/pv09\"" pod="openstack/rabbitmq-server-0" Dec 11 08:43:25 crc kubenswrapper[4881]: I1211 08:43:25.558248 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/d52ebbc7-03f0-4f73-827b-8f8066e83146-server-conf\") pod \"rabbitmq-server-0\" (UID: \"d52ebbc7-03f0-4f73-827b-8f8066e83146\") " pod="openstack/rabbitmq-server-0" Dec 11 08:43:25 crc kubenswrapper[4881]: I1211 08:43:25.558606 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/d52ebbc7-03f0-4f73-827b-8f8066e83146-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"d52ebbc7-03f0-4f73-827b-8f8066e83146\") " pod="openstack/rabbitmq-server-0" Dec 11 08:43:25 crc kubenswrapper[4881]: I1211 08:43:25.558770 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/d52ebbc7-03f0-4f73-827b-8f8066e83146-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"d52ebbc7-03f0-4f73-827b-8f8066e83146\") " pod="openstack/rabbitmq-server-0" Dec 11 08:43:25 crc kubenswrapper[4881]: I1211 08:43:25.562585 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/d52ebbc7-03f0-4f73-827b-8f8066e83146-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"d52ebbc7-03f0-4f73-827b-8f8066e83146\") " pod="openstack/rabbitmq-server-0" Dec 11 08:43:25 crc kubenswrapper[4881]: I1211 08:43:25.564940 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/d52ebbc7-03f0-4f73-827b-8f8066e83146-pod-info\") pod \"rabbitmq-server-0\" (UID: \"d52ebbc7-03f0-4f73-827b-8f8066e83146\") " pod="openstack/rabbitmq-server-0" Dec 11 08:43:25 crc kubenswrapper[4881]: I1211 08:43:25.571984 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/d52ebbc7-03f0-4f73-827b-8f8066e83146-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"d52ebbc7-03f0-4f73-827b-8f8066e83146\") " pod="openstack/rabbitmq-server-0" Dec 11 08:43:25 crc kubenswrapper[4881]: I1211 08:43:25.572537 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/d52ebbc7-03f0-4f73-827b-8f8066e83146-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"d52ebbc7-03f0-4f73-827b-8f8066e83146\") " pod="openstack/rabbitmq-server-0" Dec 11 08:43:25 crc kubenswrapper[4881]: I1211 08:43:25.575079 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fsj4k\" (UniqueName: \"kubernetes.io/projected/d52ebbc7-03f0-4f73-827b-8f8066e83146-kube-api-access-fsj4k\") pod \"rabbitmq-server-0\" (UID: \"d52ebbc7-03f0-4f73-827b-8f8066e83146\") " pod="openstack/rabbitmq-server-0" Dec 11 08:43:25 crc kubenswrapper[4881]: I1211 08:43:25.628730 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"rabbitmq-server-0\" (UID: \"d52ebbc7-03f0-4f73-827b-8f8066e83146\") " pod="openstack/rabbitmq-server-0" Dec 11 08:43:25 crc kubenswrapper[4881]: I1211 08:43:25.658650 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/acc39512-3a6a-4e4c-a2a2-a13ad13b11f0-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"acc39512-3a6a-4e4c-a2a2-a13ad13b11f0\") " pod="openstack/rabbitmq-cell1-server-0" Dec 11 08:43:25 crc kubenswrapper[4881]: I1211 08:43:25.658963 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"acc39512-3a6a-4e4c-a2a2-a13ad13b11f0\") " pod="openstack/rabbitmq-cell1-server-0" Dec 11 08:43:25 crc kubenswrapper[4881]: I1211 08:43:25.659150 4881 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"acc39512-3a6a-4e4c-a2a2-a13ad13b11f0\") device mount path \"/mnt/openstack/pv03\"" pod="openstack/rabbitmq-cell1-server-0" Dec 11 08:43:25 crc kubenswrapper[4881]: I1211 08:43:25.659156 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/acc39512-3a6a-4e4c-a2a2-a13ad13b11f0-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"acc39512-3a6a-4e4c-a2a2-a13ad13b11f0\") " pod="openstack/rabbitmq-cell1-server-0" Dec 11 08:43:25 crc kubenswrapper[4881]: I1211 08:43:25.659284 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/acc39512-3a6a-4e4c-a2a2-a13ad13b11f0-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"acc39512-3a6a-4e4c-a2a2-a13ad13b11f0\") " pod="openstack/rabbitmq-cell1-server-0" Dec 11 08:43:25 crc kubenswrapper[4881]: I1211 08:43:25.659324 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/acc39512-3a6a-4e4c-a2a2-a13ad13b11f0-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"acc39512-3a6a-4e4c-a2a2-a13ad13b11f0\") " pod="openstack/rabbitmq-cell1-server-0" Dec 11 08:43:25 crc kubenswrapper[4881]: I1211 08:43:25.659508 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/acc39512-3a6a-4e4c-a2a2-a13ad13b11f0-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"acc39512-3a6a-4e4c-a2a2-a13ad13b11f0\") " pod="openstack/rabbitmq-cell1-server-0" Dec 11 08:43:25 crc kubenswrapper[4881]: I1211 08:43:25.659598 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/acc39512-3a6a-4e4c-a2a2-a13ad13b11f0-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"acc39512-3a6a-4e4c-a2a2-a13ad13b11f0\") " pod="openstack/rabbitmq-cell1-server-0" Dec 11 08:43:25 crc kubenswrapper[4881]: I1211 08:43:25.659665 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/acc39512-3a6a-4e4c-a2a2-a13ad13b11f0-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"acc39512-3a6a-4e4c-a2a2-a13ad13b11f0\") " pod="openstack/rabbitmq-cell1-server-0" Dec 11 08:43:25 crc kubenswrapper[4881]: I1211 08:43:25.659691 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ctlmg\" (UniqueName: \"kubernetes.io/projected/acc39512-3a6a-4e4c-a2a2-a13ad13b11f0-kube-api-access-ctlmg\") pod \"rabbitmq-cell1-server-0\" (UID: \"acc39512-3a6a-4e4c-a2a2-a13ad13b11f0\") " pod="openstack/rabbitmq-cell1-server-0" Dec 11 08:43:25 crc kubenswrapper[4881]: I1211 08:43:25.659791 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/acc39512-3a6a-4e4c-a2a2-a13ad13b11f0-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"acc39512-3a6a-4e4c-a2a2-a13ad13b11f0\") " pod="openstack/rabbitmq-cell1-server-0" Dec 11 08:43:25 crc kubenswrapper[4881]: I1211 08:43:25.659830 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/acc39512-3a6a-4e4c-a2a2-a13ad13b11f0-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"acc39512-3a6a-4e4c-a2a2-a13ad13b11f0\") " pod="openstack/rabbitmq-cell1-server-0" Dec 11 08:43:25 crc kubenswrapper[4881]: I1211 08:43:25.660282 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/acc39512-3a6a-4e4c-a2a2-a13ad13b11f0-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"acc39512-3a6a-4e4c-a2a2-a13ad13b11f0\") " pod="openstack/rabbitmq-cell1-server-0" Dec 11 08:43:25 crc kubenswrapper[4881]: I1211 08:43:25.660917 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/acc39512-3a6a-4e4c-a2a2-a13ad13b11f0-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"acc39512-3a6a-4e4c-a2a2-a13ad13b11f0\") " pod="openstack/rabbitmq-cell1-server-0" Dec 11 08:43:25 crc kubenswrapper[4881]: I1211 08:43:25.660971 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/acc39512-3a6a-4e4c-a2a2-a13ad13b11f0-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"acc39512-3a6a-4e4c-a2a2-a13ad13b11f0\") " pod="openstack/rabbitmq-cell1-server-0" Dec 11 08:43:25 crc kubenswrapper[4881]: I1211 08:43:25.661266 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/acc39512-3a6a-4e4c-a2a2-a13ad13b11f0-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"acc39512-3a6a-4e4c-a2a2-a13ad13b11f0\") " pod="openstack/rabbitmq-cell1-server-0" Dec 11 08:43:25 crc kubenswrapper[4881]: I1211 08:43:25.661862 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/acc39512-3a6a-4e4c-a2a2-a13ad13b11f0-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"acc39512-3a6a-4e4c-a2a2-a13ad13b11f0\") " pod="openstack/rabbitmq-cell1-server-0" Dec 11 08:43:25 crc kubenswrapper[4881]: I1211 08:43:25.665095 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/acc39512-3a6a-4e4c-a2a2-a13ad13b11f0-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"acc39512-3a6a-4e4c-a2a2-a13ad13b11f0\") " pod="openstack/rabbitmq-cell1-server-0" Dec 11 08:43:25 crc kubenswrapper[4881]: I1211 08:43:25.665838 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/acc39512-3a6a-4e4c-a2a2-a13ad13b11f0-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"acc39512-3a6a-4e4c-a2a2-a13ad13b11f0\") " pod="openstack/rabbitmq-cell1-server-0" Dec 11 08:43:25 crc kubenswrapper[4881]: I1211 08:43:25.666050 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/acc39512-3a6a-4e4c-a2a2-a13ad13b11f0-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"acc39512-3a6a-4e4c-a2a2-a13ad13b11f0\") " pod="openstack/rabbitmq-cell1-server-0" Dec 11 08:43:25 crc kubenswrapper[4881]: I1211 08:43:25.669132 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/acc39512-3a6a-4e4c-a2a2-a13ad13b11f0-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"acc39512-3a6a-4e4c-a2a2-a13ad13b11f0\") " pod="openstack/rabbitmq-cell1-server-0" Dec 11 08:43:25 crc kubenswrapper[4881]: I1211 08:43:25.680441 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ctlmg\" (UniqueName: \"kubernetes.io/projected/acc39512-3a6a-4e4c-a2a2-a13ad13b11f0-kube-api-access-ctlmg\") pod \"rabbitmq-cell1-server-0\" (UID: \"acc39512-3a6a-4e4c-a2a2-a13ad13b11f0\") " pod="openstack/rabbitmq-cell1-server-0" Dec 11 08:43:25 crc kubenswrapper[4881]: I1211 08:43:25.704058 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"acc39512-3a6a-4e4c-a2a2-a13ad13b11f0\") " pod="openstack/rabbitmq-cell1-server-0" Dec 11 08:43:25 crc kubenswrapper[4881]: I1211 08:43:25.758640 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Dec 11 08:43:25 crc kubenswrapper[4881]: I1211 08:43:25.776594 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Dec 11 08:43:26 crc kubenswrapper[4881]: I1211 08:43:26.843604 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-68df85789f-jzdqb"] Dec 11 08:43:26 crc kubenswrapper[4881]: I1211 08:43:26.845771 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-68df85789f-jzdqb" Dec 11 08:43:26 crc kubenswrapper[4881]: I1211 08:43:26.847551 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-edpm-ipam" Dec 11 08:43:26 crc kubenswrapper[4881]: I1211 08:43:26.866060 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-68df85789f-jzdqb"] Dec 11 08:43:26 crc kubenswrapper[4881]: I1211 08:43:26.990495 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/bd4bde03-43c2-4be2-a95c-c3171e9c2dbb-openstack-edpm-ipam\") pod \"dnsmasq-dns-68df85789f-jzdqb\" (UID: \"bd4bde03-43c2-4be2-a95c-c3171e9c2dbb\") " pod="openstack/dnsmasq-dns-68df85789f-jzdqb" Dec 11 08:43:26 crc kubenswrapper[4881]: I1211 08:43:26.990567 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bd4bde03-43c2-4be2-a95c-c3171e9c2dbb-config\") pod \"dnsmasq-dns-68df85789f-jzdqb\" (UID: \"bd4bde03-43c2-4be2-a95c-c3171e9c2dbb\") " pod="openstack/dnsmasq-dns-68df85789f-jzdqb" Dec 11 08:43:26 crc kubenswrapper[4881]: I1211 08:43:26.990601 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/bd4bde03-43c2-4be2-a95c-c3171e9c2dbb-ovsdbserver-nb\") pod \"dnsmasq-dns-68df85789f-jzdqb\" (UID: \"bd4bde03-43c2-4be2-a95c-c3171e9c2dbb\") " pod="openstack/dnsmasq-dns-68df85789f-jzdqb" Dec 11 08:43:26 crc kubenswrapper[4881]: I1211 08:43:26.990648 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/bd4bde03-43c2-4be2-a95c-c3171e9c2dbb-dns-swift-storage-0\") pod \"dnsmasq-dns-68df85789f-jzdqb\" (UID: \"bd4bde03-43c2-4be2-a95c-c3171e9c2dbb\") " pod="openstack/dnsmasq-dns-68df85789f-jzdqb" Dec 11 08:43:26 crc kubenswrapper[4881]: I1211 08:43:26.990838 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/bd4bde03-43c2-4be2-a95c-c3171e9c2dbb-dns-svc\") pod \"dnsmasq-dns-68df85789f-jzdqb\" (UID: \"bd4bde03-43c2-4be2-a95c-c3171e9c2dbb\") " pod="openstack/dnsmasq-dns-68df85789f-jzdqb" Dec 11 08:43:26 crc kubenswrapper[4881]: I1211 08:43:26.991124 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qj4vw\" (UniqueName: \"kubernetes.io/projected/bd4bde03-43c2-4be2-a95c-c3171e9c2dbb-kube-api-access-qj4vw\") pod \"dnsmasq-dns-68df85789f-jzdqb\" (UID: \"bd4bde03-43c2-4be2-a95c-c3171e9c2dbb\") " pod="openstack/dnsmasq-dns-68df85789f-jzdqb" Dec 11 08:43:26 crc kubenswrapper[4881]: I1211 08:43:26.991278 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/bd4bde03-43c2-4be2-a95c-c3171e9c2dbb-ovsdbserver-sb\") pod \"dnsmasq-dns-68df85789f-jzdqb\" (UID: \"bd4bde03-43c2-4be2-a95c-c3171e9c2dbb\") " pod="openstack/dnsmasq-dns-68df85789f-jzdqb" Dec 11 08:43:27 crc kubenswrapper[4881]: I1211 08:43:27.024959 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="83d70e0f-d672-49c8-89d6-c1aa99c572a0" path="/var/lib/kubelet/pods/83d70e0f-d672-49c8-89d6-c1aa99c572a0/volumes" Dec 11 08:43:27 crc kubenswrapper[4881]: I1211 08:43:27.026499 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ed841687-cd89-4419-8726-85e086a5cc21" path="/var/lib/kubelet/pods/ed841687-cd89-4419-8726-85e086a5cc21/volumes" Dec 11 08:43:27 crc kubenswrapper[4881]: I1211 08:43:27.093921 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qj4vw\" (UniqueName: \"kubernetes.io/projected/bd4bde03-43c2-4be2-a95c-c3171e9c2dbb-kube-api-access-qj4vw\") pod \"dnsmasq-dns-68df85789f-jzdqb\" (UID: \"bd4bde03-43c2-4be2-a95c-c3171e9c2dbb\") " pod="openstack/dnsmasq-dns-68df85789f-jzdqb" Dec 11 08:43:27 crc kubenswrapper[4881]: I1211 08:43:27.094030 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/bd4bde03-43c2-4be2-a95c-c3171e9c2dbb-ovsdbserver-sb\") pod \"dnsmasq-dns-68df85789f-jzdqb\" (UID: \"bd4bde03-43c2-4be2-a95c-c3171e9c2dbb\") " pod="openstack/dnsmasq-dns-68df85789f-jzdqb" Dec 11 08:43:27 crc kubenswrapper[4881]: I1211 08:43:27.094099 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/bd4bde03-43c2-4be2-a95c-c3171e9c2dbb-openstack-edpm-ipam\") pod \"dnsmasq-dns-68df85789f-jzdqb\" (UID: \"bd4bde03-43c2-4be2-a95c-c3171e9c2dbb\") " pod="openstack/dnsmasq-dns-68df85789f-jzdqb" Dec 11 08:43:27 crc kubenswrapper[4881]: I1211 08:43:27.094141 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bd4bde03-43c2-4be2-a95c-c3171e9c2dbb-config\") pod \"dnsmasq-dns-68df85789f-jzdqb\" (UID: \"bd4bde03-43c2-4be2-a95c-c3171e9c2dbb\") " pod="openstack/dnsmasq-dns-68df85789f-jzdqb" Dec 11 08:43:27 crc kubenswrapper[4881]: I1211 08:43:27.094169 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/bd4bde03-43c2-4be2-a95c-c3171e9c2dbb-ovsdbserver-nb\") pod \"dnsmasq-dns-68df85789f-jzdqb\" (UID: \"bd4bde03-43c2-4be2-a95c-c3171e9c2dbb\") " pod="openstack/dnsmasq-dns-68df85789f-jzdqb" Dec 11 08:43:27 crc kubenswrapper[4881]: I1211 08:43:27.094195 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/bd4bde03-43c2-4be2-a95c-c3171e9c2dbb-dns-swift-storage-0\") pod \"dnsmasq-dns-68df85789f-jzdqb\" (UID: \"bd4bde03-43c2-4be2-a95c-c3171e9c2dbb\") " pod="openstack/dnsmasq-dns-68df85789f-jzdqb" Dec 11 08:43:27 crc kubenswrapper[4881]: I1211 08:43:27.094234 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/bd4bde03-43c2-4be2-a95c-c3171e9c2dbb-dns-svc\") pod \"dnsmasq-dns-68df85789f-jzdqb\" (UID: \"bd4bde03-43c2-4be2-a95c-c3171e9c2dbb\") " pod="openstack/dnsmasq-dns-68df85789f-jzdqb" Dec 11 08:43:27 crc kubenswrapper[4881]: I1211 08:43:27.095380 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/bd4bde03-43c2-4be2-a95c-c3171e9c2dbb-ovsdbserver-nb\") pod \"dnsmasq-dns-68df85789f-jzdqb\" (UID: \"bd4bde03-43c2-4be2-a95c-c3171e9c2dbb\") " pod="openstack/dnsmasq-dns-68df85789f-jzdqb" Dec 11 08:43:27 crc kubenswrapper[4881]: I1211 08:43:27.095398 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/bd4bde03-43c2-4be2-a95c-c3171e9c2dbb-ovsdbserver-sb\") pod \"dnsmasq-dns-68df85789f-jzdqb\" (UID: \"bd4bde03-43c2-4be2-a95c-c3171e9c2dbb\") " pod="openstack/dnsmasq-dns-68df85789f-jzdqb" Dec 11 08:43:27 crc kubenswrapper[4881]: I1211 08:43:27.095423 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bd4bde03-43c2-4be2-a95c-c3171e9c2dbb-config\") pod \"dnsmasq-dns-68df85789f-jzdqb\" (UID: \"bd4bde03-43c2-4be2-a95c-c3171e9c2dbb\") " pod="openstack/dnsmasq-dns-68df85789f-jzdqb" Dec 11 08:43:27 crc kubenswrapper[4881]: I1211 08:43:27.095591 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/bd4bde03-43c2-4be2-a95c-c3171e9c2dbb-openstack-edpm-ipam\") pod \"dnsmasq-dns-68df85789f-jzdqb\" (UID: \"bd4bde03-43c2-4be2-a95c-c3171e9c2dbb\") " pod="openstack/dnsmasq-dns-68df85789f-jzdqb" Dec 11 08:43:27 crc kubenswrapper[4881]: I1211 08:43:27.095646 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/bd4bde03-43c2-4be2-a95c-c3171e9c2dbb-dns-swift-storage-0\") pod \"dnsmasq-dns-68df85789f-jzdqb\" (UID: \"bd4bde03-43c2-4be2-a95c-c3171e9c2dbb\") " pod="openstack/dnsmasq-dns-68df85789f-jzdqb" Dec 11 08:43:27 crc kubenswrapper[4881]: I1211 08:43:27.095915 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/bd4bde03-43c2-4be2-a95c-c3171e9c2dbb-dns-svc\") pod \"dnsmasq-dns-68df85789f-jzdqb\" (UID: \"bd4bde03-43c2-4be2-a95c-c3171e9c2dbb\") " pod="openstack/dnsmasq-dns-68df85789f-jzdqb" Dec 11 08:43:27 crc kubenswrapper[4881]: I1211 08:43:27.125161 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qj4vw\" (UniqueName: \"kubernetes.io/projected/bd4bde03-43c2-4be2-a95c-c3171e9c2dbb-kube-api-access-qj4vw\") pod \"dnsmasq-dns-68df85789f-jzdqb\" (UID: \"bd4bde03-43c2-4be2-a95c-c3171e9c2dbb\") " pod="openstack/dnsmasq-dns-68df85789f-jzdqb" Dec 11 08:43:27 crc kubenswrapper[4881]: I1211 08:43:27.166016 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-68df85789f-jzdqb" Dec 11 08:43:27 crc kubenswrapper[4881]: I1211 08:43:27.642731 4881 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-server-0" podUID="ed841687-cd89-4419-8726-85e086a5cc21" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.129:5671: i/o timeout" Dec 11 08:43:27 crc kubenswrapper[4881]: I1211 08:43:27.939462 4881 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-cell1-server-0" podUID="83d70e0f-d672-49c8-89d6-c1aa99c572a0" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.130:5671: i/o timeout" Dec 11 08:43:29 crc kubenswrapper[4881]: I1211 08:43:29.396599 4881 patch_prober.go:28] interesting pod/machine-config-daemon-z9nnh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 11 08:43:29 crc kubenswrapper[4881]: I1211 08:43:29.397085 4881 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 11 08:43:29 crc kubenswrapper[4881]: I1211 08:43:29.397132 4881 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" Dec 11 08:43:29 crc kubenswrapper[4881]: I1211 08:43:29.398093 4881 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"6d4e8d739554b5cd95df4279914b6fd3590e9c30b07a9063a641aef8fdb20ae9"} pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Dec 11 08:43:29 crc kubenswrapper[4881]: I1211 08:43:29.398149 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" containerName="machine-config-daemon" containerID="cri-o://6d4e8d739554b5cd95df4279914b6fd3590e9c30b07a9063a641aef8fdb20ae9" gracePeriod=600 Dec 11 08:43:32 crc kubenswrapper[4881]: I1211 08:43:32.637132 4881 generic.go:334] "Generic (PLEG): container finished" podID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" containerID="6d4e8d739554b5cd95df4279914b6fd3590e9c30b07a9063a641aef8fdb20ae9" exitCode=0 Dec 11 08:43:32 crc kubenswrapper[4881]: I1211 08:43:32.637421 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" event={"ID":"56d69133-af36-4cbd-af7d-3a58cc4dd8ca","Type":"ContainerDied","Data":"6d4e8d739554b5cd95df4279914b6fd3590e9c30b07a9063a641aef8fdb20ae9"} Dec 11 08:43:33 crc kubenswrapper[4881]: I1211 08:43:33.655127 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-grrwm"] Dec 11 08:43:33 crc kubenswrapper[4881]: I1211 08:43:33.658454 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-grrwm" Dec 11 08:43:33 crc kubenswrapper[4881]: I1211 08:43:33.659307 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9c312c92-31ea-4058-a68a-8c512b40ecd1-utilities\") pod \"certified-operators-grrwm\" (UID: \"9c312c92-31ea-4058-a68a-8c512b40ecd1\") " pod="openshift-marketplace/certified-operators-grrwm" Dec 11 08:43:33 crc kubenswrapper[4881]: I1211 08:43:33.659559 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9c312c92-31ea-4058-a68a-8c512b40ecd1-catalog-content\") pod \"certified-operators-grrwm\" (UID: \"9c312c92-31ea-4058-a68a-8c512b40ecd1\") " pod="openshift-marketplace/certified-operators-grrwm" Dec 11 08:43:33 crc kubenswrapper[4881]: I1211 08:43:33.659755 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hc2dd\" (UniqueName: \"kubernetes.io/projected/9c312c92-31ea-4058-a68a-8c512b40ecd1-kube-api-access-hc2dd\") pod \"certified-operators-grrwm\" (UID: \"9c312c92-31ea-4058-a68a-8c512b40ecd1\") " pod="openshift-marketplace/certified-operators-grrwm" Dec 11 08:43:33 crc kubenswrapper[4881]: I1211 08:43:33.670796 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-grrwm"] Dec 11 08:43:33 crc kubenswrapper[4881]: I1211 08:43:33.762371 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9c312c92-31ea-4058-a68a-8c512b40ecd1-catalog-content\") pod \"certified-operators-grrwm\" (UID: \"9c312c92-31ea-4058-a68a-8c512b40ecd1\") " pod="openshift-marketplace/certified-operators-grrwm" Dec 11 08:43:33 crc kubenswrapper[4881]: I1211 08:43:33.762802 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hc2dd\" (UniqueName: \"kubernetes.io/projected/9c312c92-31ea-4058-a68a-8c512b40ecd1-kube-api-access-hc2dd\") pod \"certified-operators-grrwm\" (UID: \"9c312c92-31ea-4058-a68a-8c512b40ecd1\") " pod="openshift-marketplace/certified-operators-grrwm" Dec 11 08:43:33 crc kubenswrapper[4881]: I1211 08:43:33.762805 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9c312c92-31ea-4058-a68a-8c512b40ecd1-catalog-content\") pod \"certified-operators-grrwm\" (UID: \"9c312c92-31ea-4058-a68a-8c512b40ecd1\") " pod="openshift-marketplace/certified-operators-grrwm" Dec 11 08:43:33 crc kubenswrapper[4881]: I1211 08:43:33.763475 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9c312c92-31ea-4058-a68a-8c512b40ecd1-utilities\") pod \"certified-operators-grrwm\" (UID: \"9c312c92-31ea-4058-a68a-8c512b40ecd1\") " pod="openshift-marketplace/certified-operators-grrwm" Dec 11 08:43:33 crc kubenswrapper[4881]: I1211 08:43:33.763589 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9c312c92-31ea-4058-a68a-8c512b40ecd1-utilities\") pod \"certified-operators-grrwm\" (UID: \"9c312c92-31ea-4058-a68a-8c512b40ecd1\") " pod="openshift-marketplace/certified-operators-grrwm" Dec 11 08:43:33 crc kubenswrapper[4881]: I1211 08:43:33.779416 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hc2dd\" (UniqueName: \"kubernetes.io/projected/9c312c92-31ea-4058-a68a-8c512b40ecd1-kube-api-access-hc2dd\") pod \"certified-operators-grrwm\" (UID: \"9c312c92-31ea-4058-a68a-8c512b40ecd1\") " pod="openshift-marketplace/certified-operators-grrwm" Dec 11 08:43:33 crc kubenswrapper[4881]: I1211 08:43:33.991757 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-grrwm" Dec 11 08:43:42 crc kubenswrapper[4881]: I1211 08:43:42.901773 4881 scope.go:117] "RemoveContainer" containerID="d7d9878db78b7c7b900f17e224a18ac7b574b29f36904ba9edfbf03574b307d5" Dec 11 08:43:42 crc kubenswrapper[4881]: I1211 08:43:42.943123 4881 scope.go:117] "RemoveContainer" containerID="b76ca59b759174d4b3b68dec243d1e27ea0c579c0ee348a6d617483e4567b5ce" Dec 11 08:43:42 crc kubenswrapper[4881]: E1211 08:43:42.946108 4881 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Dec 11 08:43:42 crc kubenswrapper[4881]: E1211 08:43:42.946152 4881 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Dec 11 08:43:42 crc kubenswrapper[4881]: E1211 08:43:42.946278 4881 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:heat-db-sync,Image:quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested,Command:[/bin/bash],Args:[-c /usr/bin/heat-manage --config-dir /etc/heat/heat.conf.d db_sync],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/heat/heat.conf.d/00-default.conf,SubPath:00-default.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:false,MountPath:/etc/heat/heat.conf.d/01-custom.conf,SubPath:01-custom.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-n7h4g,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42418,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:*42418,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod heat-db-sync-mx8mp_openstack(f6950faa-9468-4d23-9834-73fb64506367): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 11 08:43:42 crc kubenswrapper[4881]: E1211 08:43:42.947834 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/heat-db-sync-mx8mp" podUID="f6950faa-9468-4d23-9834-73fb64506367" Dec 11 08:43:43 crc kubenswrapper[4881]: I1211 08:43:43.059092 4881 scope.go:117] "RemoveContainer" containerID="94c3019cf47f713b5a44bc81dd351be9c696fd6a3b9a6a5da9c12b249515fa87" Dec 11 08:43:43 crc kubenswrapper[4881]: I1211 08:43:43.079201 4881 scope.go:117] "RemoveContainer" containerID="080f4092c28abc98f3cd45e00f973d275bf68faf0a43fb8d189c7bc8955176a2" Dec 11 08:43:43 crc kubenswrapper[4881]: I1211 08:43:43.385011 4881 scope.go:117] "RemoveContainer" containerID="a515bd066db37a4e28461e603c6dd047107539c1d10350095f8c9ca546f164e8" Dec 11 08:43:43 crc kubenswrapper[4881]: E1211 08:43:43.480868 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 08:43:43 crc kubenswrapper[4881]: I1211 08:43:43.594899 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Dec 11 08:43:43 crc kubenswrapper[4881]: W1211 08:43:43.641193 4881 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podacc39512_3a6a_4e4c_a2a2_a13ad13b11f0.slice/crio-f3fa602fd4db291b26c2c702c3b41cea5a50f8ebee679bc5f40877318581f3fe WatchSource:0}: Error finding container f3fa602fd4db291b26c2c702c3b41cea5a50f8ebee679bc5f40877318581f3fe: Status 404 returned error can't find the container with id f3fa602fd4db291b26c2c702c3b41cea5a50f8ebee679bc5f40877318581f3fe Dec 11 08:43:43 crc kubenswrapper[4881]: I1211 08:43:43.771400 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-grrwm"] Dec 11 08:43:43 crc kubenswrapper[4881]: I1211 08:43:43.785729 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-grrwm" event={"ID":"9c312c92-31ea-4058-a68a-8c512b40ecd1","Type":"ContainerStarted","Data":"3105f314bc78be0f8b0abfc2ffcd01151b9427e62f734c5fe0b5f34b15b43040"} Dec 11 08:43:43 crc kubenswrapper[4881]: I1211 08:43:43.787191 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"acc39512-3a6a-4e4c-a2a2-a13ad13b11f0","Type":"ContainerStarted","Data":"f3fa602fd4db291b26c2c702c3b41cea5a50f8ebee679bc5f40877318581f3fe"} Dec 11 08:43:43 crc kubenswrapper[4881]: I1211 08:43:43.790023 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"d52ebbc7-03f0-4f73-827b-8f8066e83146","Type":"ContainerStarted","Data":"ff6269ee6d396f93b02619198a752df3d51c50e4fa16b9902223b401d4b98d99"} Dec 11 08:43:43 crc kubenswrapper[4881]: I1211 08:43:43.798363 4881 scope.go:117] "RemoveContainer" containerID="6d4e8d739554b5cd95df4279914b6fd3590e9c30b07a9063a641aef8fdb20ae9" Dec 11 08:43:43 crc kubenswrapper[4881]: E1211 08:43:43.798714 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 08:43:43 crc kubenswrapper[4881]: I1211 08:43:43.802425 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Dec 11 08:43:43 crc kubenswrapper[4881]: E1211 08:43:43.810113 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-mx8mp" podUID="f6950faa-9468-4d23-9834-73fb64506367" Dec 11 08:43:43 crc kubenswrapper[4881]: I1211 08:43:43.977413 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-68df85789f-jzdqb"] Dec 11 08:43:44 crc kubenswrapper[4881]: I1211 08:43:44.817616 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-68df85789f-jzdqb" event={"ID":"bd4bde03-43c2-4be2-a95c-c3171e9c2dbb","Type":"ContainerStarted","Data":"02f8e9b41f7752e19158fa715d8dc673670b083933657e48ec27c366387d4f3d"} Dec 11 08:43:44 crc kubenswrapper[4881]: I1211 08:43:44.819959 4881 generic.go:334] "Generic (PLEG): container finished" podID="9c312c92-31ea-4058-a68a-8c512b40ecd1" containerID="72d1dc2c98e64f523a5745f2c2591b06edcf5d04adae77d22a52c46b8841f0df" exitCode=0 Dec 11 08:43:44 crc kubenswrapper[4881]: I1211 08:43:44.820002 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-grrwm" event={"ID":"9c312c92-31ea-4058-a68a-8c512b40ecd1","Type":"ContainerDied","Data":"72d1dc2c98e64f523a5745f2c2591b06edcf5d04adae77d22a52c46b8841f0df"} Dec 11 08:43:45 crc kubenswrapper[4881]: I1211 08:43:45.833649 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"acc39512-3a6a-4e4c-a2a2-a13ad13b11f0","Type":"ContainerStarted","Data":"d7472def6224df6184e1faf5d9a10ab83871f4aa957b91b569c5ac6d1d8306d2"} Dec 11 08:43:45 crc kubenswrapper[4881]: I1211 08:43:45.836969 4881 generic.go:334] "Generic (PLEG): container finished" podID="bd4bde03-43c2-4be2-a95c-c3171e9c2dbb" containerID="271251342d6d3c7660d019f962cd1be8b8959141d473958f288468a70fbd5991" exitCode=0 Dec 11 08:43:45 crc kubenswrapper[4881]: I1211 08:43:45.837039 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-68df85789f-jzdqb" event={"ID":"bd4bde03-43c2-4be2-a95c-c3171e9c2dbb","Type":"ContainerDied","Data":"271251342d6d3c7660d019f962cd1be8b8959141d473958f288468a70fbd5991"} Dec 11 08:43:46 crc kubenswrapper[4881]: I1211 08:43:46.858131 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"d52ebbc7-03f0-4f73-827b-8f8066e83146","Type":"ContainerStarted","Data":"6d3ddabb3dc74a0d0d8f900d4c5b0d1545dccbbb4986242ee6f13d27f36d6bb5"} Dec 11 08:43:50 crc kubenswrapper[4881]: I1211 08:43:50.925555 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-68df85789f-jzdqb" event={"ID":"bd4bde03-43c2-4be2-a95c-c3171e9c2dbb","Type":"ContainerStarted","Data":"9dc65edadd690932884193e8523c34e69e6af4875e9656e51aebec7803c9408e"} Dec 11 08:43:50 crc kubenswrapper[4881]: I1211 08:43:50.926103 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-68df85789f-jzdqb" Dec 11 08:43:51 crc kubenswrapper[4881]: I1211 08:43:51.951963 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b51fa237-35ec-47d6-b61d-c3e50dc8450f","Type":"ContainerStarted","Data":"cf45a41050248534f1555c709e7e7e77a21c6fc6cbcf8d38c25cf5146070596f"} Dec 11 08:43:52 crc kubenswrapper[4881]: I1211 08:43:52.966836 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-grrwm" event={"ID":"9c312c92-31ea-4058-a68a-8c512b40ecd1","Type":"ContainerStarted","Data":"409a7b69ec180cfe52d183933482c661d00615b699e81fbd86cc02ea15fde517"} Dec 11 08:43:52 crc kubenswrapper[4881]: I1211 08:43:52.998126 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-68df85789f-jzdqb" podStartSLOduration=26.998104723 podStartE2EDuration="26.998104723s" podCreationTimestamp="2025-12-11 08:43:26 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 08:43:50.947192418 +0000 UTC m=+1679.324561115" watchObservedRunningTime="2025-12-11 08:43:52.998104723 +0000 UTC m=+1681.375473420" Dec 11 08:43:57 crc kubenswrapper[4881]: I1211 08:43:57.167509 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-68df85789f-jzdqb" Dec 11 08:43:57 crc kubenswrapper[4881]: I1211 08:43:57.310271 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-79b5d74c8c-gr9md"] Dec 11 08:43:57 crc kubenswrapper[4881]: I1211 08:43:57.310762 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-79b5d74c8c-gr9md" podUID="f2376e63-11dc-4d35-be7c-eff1af8f8534" containerName="dnsmasq-dns" containerID="cri-o://c9cdc438b161a719a0ae7bd63f2b369f4154f6de71f70fb5542982d0dac402e1" gracePeriod=10 Dec 11 08:43:57 crc kubenswrapper[4881]: I1211 08:43:57.469260 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-bb85b8995-xpw6s"] Dec 11 08:43:57 crc kubenswrapper[4881]: I1211 08:43:57.478312 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-bb85b8995-xpw6s" Dec 11 08:43:57 crc kubenswrapper[4881]: I1211 08:43:57.496273 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-bb85b8995-xpw6s"] Dec 11 08:43:57 crc kubenswrapper[4881]: I1211 08:43:57.540360 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ff97623d-cd72-4130-b08b-aa41fb1f3e55-ovsdbserver-sb\") pod \"dnsmasq-dns-bb85b8995-xpw6s\" (UID: \"ff97623d-cd72-4130-b08b-aa41fb1f3e55\") " pod="openstack/dnsmasq-dns-bb85b8995-xpw6s" Dec 11 08:43:57 crc kubenswrapper[4881]: I1211 08:43:57.540461 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ff97623d-cd72-4130-b08b-aa41fb1f3e55-config\") pod \"dnsmasq-dns-bb85b8995-xpw6s\" (UID: \"ff97623d-cd72-4130-b08b-aa41fb1f3e55\") " pod="openstack/dnsmasq-dns-bb85b8995-xpw6s" Dec 11 08:43:57 crc kubenswrapper[4881]: I1211 08:43:57.540511 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/ff97623d-cd72-4130-b08b-aa41fb1f3e55-dns-swift-storage-0\") pod \"dnsmasq-dns-bb85b8995-xpw6s\" (UID: \"ff97623d-cd72-4130-b08b-aa41fb1f3e55\") " pod="openstack/dnsmasq-dns-bb85b8995-xpw6s" Dec 11 08:43:57 crc kubenswrapper[4881]: I1211 08:43:57.540588 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ff97623d-cd72-4130-b08b-aa41fb1f3e55-ovsdbserver-nb\") pod \"dnsmasq-dns-bb85b8995-xpw6s\" (UID: \"ff97623d-cd72-4130-b08b-aa41fb1f3e55\") " pod="openstack/dnsmasq-dns-bb85b8995-xpw6s" Dec 11 08:43:57 crc kubenswrapper[4881]: I1211 08:43:57.540615 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kv8c9\" (UniqueName: \"kubernetes.io/projected/ff97623d-cd72-4130-b08b-aa41fb1f3e55-kube-api-access-kv8c9\") pod \"dnsmasq-dns-bb85b8995-xpw6s\" (UID: \"ff97623d-cd72-4130-b08b-aa41fb1f3e55\") " pod="openstack/dnsmasq-dns-bb85b8995-xpw6s" Dec 11 08:43:57 crc kubenswrapper[4881]: I1211 08:43:57.540659 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/ff97623d-cd72-4130-b08b-aa41fb1f3e55-openstack-edpm-ipam\") pod \"dnsmasq-dns-bb85b8995-xpw6s\" (UID: \"ff97623d-cd72-4130-b08b-aa41fb1f3e55\") " pod="openstack/dnsmasq-dns-bb85b8995-xpw6s" Dec 11 08:43:57 crc kubenswrapper[4881]: I1211 08:43:57.540938 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ff97623d-cd72-4130-b08b-aa41fb1f3e55-dns-svc\") pod \"dnsmasq-dns-bb85b8995-xpw6s\" (UID: \"ff97623d-cd72-4130-b08b-aa41fb1f3e55\") " pod="openstack/dnsmasq-dns-bb85b8995-xpw6s" Dec 11 08:43:57 crc kubenswrapper[4881]: I1211 08:43:57.644380 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ff97623d-cd72-4130-b08b-aa41fb1f3e55-dns-svc\") pod \"dnsmasq-dns-bb85b8995-xpw6s\" (UID: \"ff97623d-cd72-4130-b08b-aa41fb1f3e55\") " pod="openstack/dnsmasq-dns-bb85b8995-xpw6s" Dec 11 08:43:57 crc kubenswrapper[4881]: I1211 08:43:57.644675 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ff97623d-cd72-4130-b08b-aa41fb1f3e55-ovsdbserver-sb\") pod \"dnsmasq-dns-bb85b8995-xpw6s\" (UID: \"ff97623d-cd72-4130-b08b-aa41fb1f3e55\") " pod="openstack/dnsmasq-dns-bb85b8995-xpw6s" Dec 11 08:43:57 crc kubenswrapper[4881]: I1211 08:43:57.644770 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ff97623d-cd72-4130-b08b-aa41fb1f3e55-config\") pod \"dnsmasq-dns-bb85b8995-xpw6s\" (UID: \"ff97623d-cd72-4130-b08b-aa41fb1f3e55\") " pod="openstack/dnsmasq-dns-bb85b8995-xpw6s" Dec 11 08:43:57 crc kubenswrapper[4881]: I1211 08:43:57.644831 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/ff97623d-cd72-4130-b08b-aa41fb1f3e55-dns-swift-storage-0\") pod \"dnsmasq-dns-bb85b8995-xpw6s\" (UID: \"ff97623d-cd72-4130-b08b-aa41fb1f3e55\") " pod="openstack/dnsmasq-dns-bb85b8995-xpw6s" Dec 11 08:43:57 crc kubenswrapper[4881]: I1211 08:43:57.644911 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kv8c9\" (UniqueName: \"kubernetes.io/projected/ff97623d-cd72-4130-b08b-aa41fb1f3e55-kube-api-access-kv8c9\") pod \"dnsmasq-dns-bb85b8995-xpw6s\" (UID: \"ff97623d-cd72-4130-b08b-aa41fb1f3e55\") " pod="openstack/dnsmasq-dns-bb85b8995-xpw6s" Dec 11 08:43:57 crc kubenswrapper[4881]: I1211 08:43:57.644932 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ff97623d-cd72-4130-b08b-aa41fb1f3e55-ovsdbserver-nb\") pod \"dnsmasq-dns-bb85b8995-xpw6s\" (UID: \"ff97623d-cd72-4130-b08b-aa41fb1f3e55\") " pod="openstack/dnsmasq-dns-bb85b8995-xpw6s" Dec 11 08:43:57 crc kubenswrapper[4881]: I1211 08:43:57.644993 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/ff97623d-cd72-4130-b08b-aa41fb1f3e55-openstack-edpm-ipam\") pod \"dnsmasq-dns-bb85b8995-xpw6s\" (UID: \"ff97623d-cd72-4130-b08b-aa41fb1f3e55\") " pod="openstack/dnsmasq-dns-bb85b8995-xpw6s" Dec 11 08:43:57 crc kubenswrapper[4881]: I1211 08:43:57.645964 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ff97623d-cd72-4130-b08b-aa41fb1f3e55-ovsdbserver-nb\") pod \"dnsmasq-dns-bb85b8995-xpw6s\" (UID: \"ff97623d-cd72-4130-b08b-aa41fb1f3e55\") " pod="openstack/dnsmasq-dns-bb85b8995-xpw6s" Dec 11 08:43:57 crc kubenswrapper[4881]: I1211 08:43:57.646024 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ff97623d-cd72-4130-b08b-aa41fb1f3e55-config\") pod \"dnsmasq-dns-bb85b8995-xpw6s\" (UID: \"ff97623d-cd72-4130-b08b-aa41fb1f3e55\") " pod="openstack/dnsmasq-dns-bb85b8995-xpw6s" Dec 11 08:43:57 crc kubenswrapper[4881]: I1211 08:43:57.646138 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/ff97623d-cd72-4130-b08b-aa41fb1f3e55-dns-swift-storage-0\") pod \"dnsmasq-dns-bb85b8995-xpw6s\" (UID: \"ff97623d-cd72-4130-b08b-aa41fb1f3e55\") " pod="openstack/dnsmasq-dns-bb85b8995-xpw6s" Dec 11 08:43:57 crc kubenswrapper[4881]: I1211 08:43:57.646383 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ff97623d-cd72-4130-b08b-aa41fb1f3e55-dns-svc\") pod \"dnsmasq-dns-bb85b8995-xpw6s\" (UID: \"ff97623d-cd72-4130-b08b-aa41fb1f3e55\") " pod="openstack/dnsmasq-dns-bb85b8995-xpw6s" Dec 11 08:43:57 crc kubenswrapper[4881]: I1211 08:43:57.646569 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/ff97623d-cd72-4130-b08b-aa41fb1f3e55-openstack-edpm-ipam\") pod \"dnsmasq-dns-bb85b8995-xpw6s\" (UID: \"ff97623d-cd72-4130-b08b-aa41fb1f3e55\") " pod="openstack/dnsmasq-dns-bb85b8995-xpw6s" Dec 11 08:43:57 crc kubenswrapper[4881]: I1211 08:43:57.646996 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ff97623d-cd72-4130-b08b-aa41fb1f3e55-ovsdbserver-sb\") pod \"dnsmasq-dns-bb85b8995-xpw6s\" (UID: \"ff97623d-cd72-4130-b08b-aa41fb1f3e55\") " pod="openstack/dnsmasq-dns-bb85b8995-xpw6s" Dec 11 08:43:57 crc kubenswrapper[4881]: I1211 08:43:57.679449 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kv8c9\" (UniqueName: \"kubernetes.io/projected/ff97623d-cd72-4130-b08b-aa41fb1f3e55-kube-api-access-kv8c9\") pod \"dnsmasq-dns-bb85b8995-xpw6s\" (UID: \"ff97623d-cd72-4130-b08b-aa41fb1f3e55\") " pod="openstack/dnsmasq-dns-bb85b8995-xpw6s" Dec 11 08:43:57 crc kubenswrapper[4881]: I1211 08:43:57.833108 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-bb85b8995-xpw6s" Dec 11 08:43:58 crc kubenswrapper[4881]: I1211 08:43:58.005816 4881 scope.go:117] "RemoveContainer" containerID="6d4e8d739554b5cd95df4279914b6fd3590e9c30b07a9063a641aef8fdb20ae9" Dec 11 08:43:58 crc kubenswrapper[4881]: E1211 08:43:58.006373 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 08:43:58 crc kubenswrapper[4881]: I1211 08:43:58.023388 4881 generic.go:334] "Generic (PLEG): container finished" podID="9c312c92-31ea-4058-a68a-8c512b40ecd1" containerID="409a7b69ec180cfe52d183933482c661d00615b699e81fbd86cc02ea15fde517" exitCode=0 Dec 11 08:43:58 crc kubenswrapper[4881]: I1211 08:43:58.023430 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-grrwm" event={"ID":"9c312c92-31ea-4058-a68a-8c512b40ecd1","Type":"ContainerDied","Data":"409a7b69ec180cfe52d183933482c661d00615b699e81fbd86cc02ea15fde517"} Dec 11 08:43:58 crc kubenswrapper[4881]: I1211 08:43:58.317135 4881 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-79b5d74c8c-gr9md" podUID="f2376e63-11dc-4d35-be7c-eff1af8f8534" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.250:5353: connect: connection refused" Dec 11 08:43:59 crc kubenswrapper[4881]: I1211 08:43:59.049801 4881 generic.go:334] "Generic (PLEG): container finished" podID="f2376e63-11dc-4d35-be7c-eff1af8f8534" containerID="c9cdc438b161a719a0ae7bd63f2b369f4154f6de71f70fb5542982d0dac402e1" exitCode=0 Dec 11 08:43:59 crc kubenswrapper[4881]: I1211 08:43:59.049860 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-79b5d74c8c-gr9md" event={"ID":"f2376e63-11dc-4d35-be7c-eff1af8f8534","Type":"ContainerDied","Data":"c9cdc438b161a719a0ae7bd63f2b369f4154f6de71f70fb5542982d0dac402e1"} Dec 11 08:43:59 crc kubenswrapper[4881]: I1211 08:43:59.824692 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-bb85b8995-xpw6s"] Dec 11 08:44:00 crc kubenswrapper[4881]: I1211 08:44:00.062922 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-bb85b8995-xpw6s" event={"ID":"ff97623d-cd72-4130-b08b-aa41fb1f3e55","Type":"ContainerStarted","Data":"6a03d4968a9f2b066fd1effb8dcfab86ef69ac145bf0059082ae489dc8fbb826"} Dec 11 08:44:02 crc kubenswrapper[4881]: I1211 08:44:02.103512 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-bb85b8995-xpw6s" event={"ID":"ff97623d-cd72-4130-b08b-aa41fb1f3e55","Type":"ContainerStarted","Data":"9a99debf56a9748d44ba09acdac2ff4725f002c0e5299f4a5ec95e118bb26a11"} Dec 11 08:44:02 crc kubenswrapper[4881]: I1211 08:44:02.681465 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-79b5d74c8c-gr9md" Dec 11 08:44:02 crc kubenswrapper[4881]: I1211 08:44:02.772386 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f2376e63-11dc-4d35-be7c-eff1af8f8534-config\") pod \"f2376e63-11dc-4d35-be7c-eff1af8f8534\" (UID: \"f2376e63-11dc-4d35-be7c-eff1af8f8534\") " Dec 11 08:44:02 crc kubenswrapper[4881]: I1211 08:44:02.772790 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f2376e63-11dc-4d35-be7c-eff1af8f8534-dns-svc\") pod \"f2376e63-11dc-4d35-be7c-eff1af8f8534\" (UID: \"f2376e63-11dc-4d35-be7c-eff1af8f8534\") " Dec 11 08:44:02 crc kubenswrapper[4881]: I1211 08:44:02.772855 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/f2376e63-11dc-4d35-be7c-eff1af8f8534-dns-swift-storage-0\") pod \"f2376e63-11dc-4d35-be7c-eff1af8f8534\" (UID: \"f2376e63-11dc-4d35-be7c-eff1af8f8534\") " Dec 11 08:44:02 crc kubenswrapper[4881]: I1211 08:44:02.772888 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2d4cp\" (UniqueName: \"kubernetes.io/projected/f2376e63-11dc-4d35-be7c-eff1af8f8534-kube-api-access-2d4cp\") pod \"f2376e63-11dc-4d35-be7c-eff1af8f8534\" (UID: \"f2376e63-11dc-4d35-be7c-eff1af8f8534\") " Dec 11 08:44:02 crc kubenswrapper[4881]: I1211 08:44:02.772940 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f2376e63-11dc-4d35-be7c-eff1af8f8534-ovsdbserver-nb\") pod \"f2376e63-11dc-4d35-be7c-eff1af8f8534\" (UID: \"f2376e63-11dc-4d35-be7c-eff1af8f8534\") " Dec 11 08:44:02 crc kubenswrapper[4881]: I1211 08:44:02.773008 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f2376e63-11dc-4d35-be7c-eff1af8f8534-ovsdbserver-sb\") pod \"f2376e63-11dc-4d35-be7c-eff1af8f8534\" (UID: \"f2376e63-11dc-4d35-be7c-eff1af8f8534\") " Dec 11 08:44:02 crc kubenswrapper[4881]: I1211 08:44:02.786606 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f2376e63-11dc-4d35-be7c-eff1af8f8534-kube-api-access-2d4cp" (OuterVolumeSpecName: "kube-api-access-2d4cp") pod "f2376e63-11dc-4d35-be7c-eff1af8f8534" (UID: "f2376e63-11dc-4d35-be7c-eff1af8f8534"). InnerVolumeSpecName "kube-api-access-2d4cp". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:44:02 crc kubenswrapper[4881]: I1211 08:44:02.883783 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2d4cp\" (UniqueName: \"kubernetes.io/projected/f2376e63-11dc-4d35-be7c-eff1af8f8534-kube-api-access-2d4cp\") on node \"crc\" DevicePath \"\"" Dec 11 08:44:02 crc kubenswrapper[4881]: I1211 08:44:02.884120 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f2376e63-11dc-4d35-be7c-eff1af8f8534-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "f2376e63-11dc-4d35-be7c-eff1af8f8534" (UID: "f2376e63-11dc-4d35-be7c-eff1af8f8534"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:44:02 crc kubenswrapper[4881]: I1211 08:44:02.885984 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f2376e63-11dc-4d35-be7c-eff1af8f8534-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "f2376e63-11dc-4d35-be7c-eff1af8f8534" (UID: "f2376e63-11dc-4d35-be7c-eff1af8f8534"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:44:02 crc kubenswrapper[4881]: I1211 08:44:02.887328 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f2376e63-11dc-4d35-be7c-eff1af8f8534-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "f2376e63-11dc-4d35-be7c-eff1af8f8534" (UID: "f2376e63-11dc-4d35-be7c-eff1af8f8534"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:44:02 crc kubenswrapper[4881]: I1211 08:44:02.889728 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f2376e63-11dc-4d35-be7c-eff1af8f8534-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "f2376e63-11dc-4d35-be7c-eff1af8f8534" (UID: "f2376e63-11dc-4d35-be7c-eff1af8f8534"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:44:02 crc kubenswrapper[4881]: I1211 08:44:02.890787 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f2376e63-11dc-4d35-be7c-eff1af8f8534-config" (OuterVolumeSpecName: "config") pod "f2376e63-11dc-4d35-be7c-eff1af8f8534" (UID: "f2376e63-11dc-4d35-be7c-eff1af8f8534"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:44:02 crc kubenswrapper[4881]: I1211 08:44:02.985773 4881 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f2376e63-11dc-4d35-be7c-eff1af8f8534-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Dec 11 08:44:02 crc kubenswrapper[4881]: I1211 08:44:02.985815 4881 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f2376e63-11dc-4d35-be7c-eff1af8f8534-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Dec 11 08:44:02 crc kubenswrapper[4881]: I1211 08:44:02.985858 4881 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f2376e63-11dc-4d35-be7c-eff1af8f8534-config\") on node \"crc\" DevicePath \"\"" Dec 11 08:44:02 crc kubenswrapper[4881]: I1211 08:44:02.985871 4881 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f2376e63-11dc-4d35-be7c-eff1af8f8534-dns-svc\") on node \"crc\" DevicePath \"\"" Dec 11 08:44:02 crc kubenswrapper[4881]: I1211 08:44:02.985885 4881 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/f2376e63-11dc-4d35-be7c-eff1af8f8534-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Dec 11 08:44:03 crc kubenswrapper[4881]: I1211 08:44:03.118117 4881 generic.go:334] "Generic (PLEG): container finished" podID="ff97623d-cd72-4130-b08b-aa41fb1f3e55" containerID="9a99debf56a9748d44ba09acdac2ff4725f002c0e5299f4a5ec95e118bb26a11" exitCode=0 Dec 11 08:44:03 crc kubenswrapper[4881]: I1211 08:44:03.118196 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-bb85b8995-xpw6s" event={"ID":"ff97623d-cd72-4130-b08b-aa41fb1f3e55","Type":"ContainerDied","Data":"9a99debf56a9748d44ba09acdac2ff4725f002c0e5299f4a5ec95e118bb26a11"} Dec 11 08:44:03 crc kubenswrapper[4881]: I1211 08:44:03.126660 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-79b5d74c8c-gr9md" event={"ID":"f2376e63-11dc-4d35-be7c-eff1af8f8534","Type":"ContainerDied","Data":"77ede655a7ec5d1d6a98f2caafcbf372c4edfcb1285571928b8721f6f0923062"} Dec 11 08:44:03 crc kubenswrapper[4881]: I1211 08:44:03.126721 4881 scope.go:117] "RemoveContainer" containerID="c9cdc438b161a719a0ae7bd63f2b369f4154f6de71f70fb5542982d0dac402e1" Dec 11 08:44:03 crc kubenswrapper[4881]: I1211 08:44:03.126732 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-79b5d74c8c-gr9md" Dec 11 08:44:03 crc kubenswrapper[4881]: I1211 08:44:03.225418 4881 scope.go:117] "RemoveContainer" containerID="7b28f08e70b3b16c1b082a8725e98fb5b980250d5cb2fa5b93670a6fd2ba9b15" Dec 11 08:44:03 crc kubenswrapper[4881]: I1211 08:44:03.230208 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-79b5d74c8c-gr9md"] Dec 11 08:44:03 crc kubenswrapper[4881]: I1211 08:44:03.252742 4881 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-79b5d74c8c-gr9md"] Dec 11 08:44:04 crc kubenswrapper[4881]: I1211 08:44:04.149690 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-grrwm" event={"ID":"9c312c92-31ea-4058-a68a-8c512b40ecd1","Type":"ContainerStarted","Data":"4ac8978a8e4c4f8a5cfd12cdaf4ae9daf1f35a19aad61efc1a4a52e6202a8e26"} Dec 11 08:44:04 crc kubenswrapper[4881]: I1211 08:44:04.155824 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-bb85b8995-xpw6s" event={"ID":"ff97623d-cd72-4130-b08b-aa41fb1f3e55","Type":"ContainerStarted","Data":"09ace86581be08d8b1574f68a14ad626e90b9e31fc094f6861964d9c5dd8dd54"} Dec 11 08:44:04 crc kubenswrapper[4881]: I1211 08:44:04.156963 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-bb85b8995-xpw6s" Dec 11 08:44:04 crc kubenswrapper[4881]: I1211 08:44:04.165056 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-sync-mx8mp" event={"ID":"f6950faa-9468-4d23-9834-73fb64506367","Type":"ContainerStarted","Data":"0fe41f4153d771d21a6b9388ac26e7f7d7f07050854a3d1f9c085687af6a8561"} Dec 11 08:44:04 crc kubenswrapper[4881]: I1211 08:44:04.167170 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b51fa237-35ec-47d6-b61d-c3e50dc8450f","Type":"ContainerStarted","Data":"dc308c53082f1bccf6b5621438e9bf1d042c6989ccd99061fbf0bccd45eafb3e"} Dec 11 08:44:04 crc kubenswrapper[4881]: I1211 08:44:04.167198 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b51fa237-35ec-47d6-b61d-c3e50dc8450f","Type":"ContainerStarted","Data":"209edf400e1b5076a1d65b6d356ed69c2b2f0455f23d54776527dbc998fcb642"} Dec 11 08:44:04 crc kubenswrapper[4881]: I1211 08:44:04.176657 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-grrwm" podStartSLOduration=13.532258803 podStartE2EDuration="31.176639817s" podCreationTimestamp="2025-12-11 08:43:33 +0000 UTC" firstStartedPulling="2025-12-11 08:43:44.82219667 +0000 UTC m=+1673.199565367" lastFinishedPulling="2025-12-11 08:44:02.466577684 +0000 UTC m=+1690.843946381" observedRunningTime="2025-12-11 08:44:04.175574441 +0000 UTC m=+1692.552943158" watchObservedRunningTime="2025-12-11 08:44:04.176639817 +0000 UTC m=+1692.554008514" Dec 11 08:44:04 crc kubenswrapper[4881]: I1211 08:44:04.221446 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-bb85b8995-xpw6s" podStartSLOduration=7.221408682 podStartE2EDuration="7.221408682s" podCreationTimestamp="2025-12-11 08:43:57 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 08:44:04.198290201 +0000 UTC m=+1692.575658898" watchObservedRunningTime="2025-12-11 08:44:04.221408682 +0000 UTC m=+1692.598777379" Dec 11 08:44:04 crc kubenswrapper[4881]: I1211 08:44:04.236522 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-db-sync-mx8mp" podStartSLOduration=3.7939847909999997 podStartE2EDuration="1m3.236501814s" podCreationTimestamp="2025-12-11 08:43:01 +0000 UTC" firstStartedPulling="2025-12-11 08:43:02.916755183 +0000 UTC m=+1631.294123880" lastFinishedPulling="2025-12-11 08:44:02.359272206 +0000 UTC m=+1690.736640903" observedRunningTime="2025-12-11 08:44:04.215024034 +0000 UTC m=+1692.592392731" watchObservedRunningTime="2025-12-11 08:44:04.236501814 +0000 UTC m=+1692.613870521" Dec 11 08:44:05 crc kubenswrapper[4881]: I1211 08:44:05.020248 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f2376e63-11dc-4d35-be7c-eff1af8f8534" path="/var/lib/kubelet/pods/f2376e63-11dc-4d35-be7c-eff1af8f8534/volumes" Dec 11 08:44:07 crc kubenswrapper[4881]: I1211 08:44:07.205555 4881 generic.go:334] "Generic (PLEG): container finished" podID="f6950faa-9468-4d23-9834-73fb64506367" containerID="0fe41f4153d771d21a6b9388ac26e7f7d7f07050854a3d1f9c085687af6a8561" exitCode=0 Dec 11 08:44:07 crc kubenswrapper[4881]: I1211 08:44:07.206250 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-sync-mx8mp" event={"ID":"f6950faa-9468-4d23-9834-73fb64506367","Type":"ContainerDied","Data":"0fe41f4153d771d21a6b9388ac26e7f7d7f07050854a3d1f9c085687af6a8561"} Dec 11 08:44:07 crc kubenswrapper[4881]: I1211 08:44:07.210087 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b51fa237-35ec-47d6-b61d-c3e50dc8450f","Type":"ContainerStarted","Data":"30e2d26e88766fefd47e619144d5c45184ec7befcf8a70551180dafcb5622e15"} Dec 11 08:44:07 crc kubenswrapper[4881]: I1211 08:44:07.211006 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Dec 11 08:44:07 crc kubenswrapper[4881]: I1211 08:44:07.267858 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=14.849744802 podStartE2EDuration="57.267837644s" podCreationTimestamp="2025-12-11 08:43:10 +0000 UTC" firstStartedPulling="2025-12-11 08:43:24.018837296 +0000 UTC m=+1652.396205993" lastFinishedPulling="2025-12-11 08:44:06.436930138 +0000 UTC m=+1694.814298835" observedRunningTime="2025-12-11 08:44:07.263285691 +0000 UTC m=+1695.640654388" watchObservedRunningTime="2025-12-11 08:44:07.267837644 +0000 UTC m=+1695.645206341" Dec 11 08:44:08 crc kubenswrapper[4881]: I1211 08:44:08.678974 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-sync-mx8mp" Dec 11 08:44:08 crc kubenswrapper[4881]: I1211 08:44:08.831555 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-n7h4g\" (UniqueName: \"kubernetes.io/projected/f6950faa-9468-4d23-9834-73fb64506367-kube-api-access-n7h4g\") pod \"f6950faa-9468-4d23-9834-73fb64506367\" (UID: \"f6950faa-9468-4d23-9834-73fb64506367\") " Dec 11 08:44:08 crc kubenswrapper[4881]: I1211 08:44:08.831855 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f6950faa-9468-4d23-9834-73fb64506367-config-data\") pod \"f6950faa-9468-4d23-9834-73fb64506367\" (UID: \"f6950faa-9468-4d23-9834-73fb64506367\") " Dec 11 08:44:08 crc kubenswrapper[4881]: I1211 08:44:08.832067 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f6950faa-9468-4d23-9834-73fb64506367-combined-ca-bundle\") pod \"f6950faa-9468-4d23-9834-73fb64506367\" (UID: \"f6950faa-9468-4d23-9834-73fb64506367\") " Dec 11 08:44:08 crc kubenswrapper[4881]: I1211 08:44:08.852622 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f6950faa-9468-4d23-9834-73fb64506367-kube-api-access-n7h4g" (OuterVolumeSpecName: "kube-api-access-n7h4g") pod "f6950faa-9468-4d23-9834-73fb64506367" (UID: "f6950faa-9468-4d23-9834-73fb64506367"). InnerVolumeSpecName "kube-api-access-n7h4g". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:44:08 crc kubenswrapper[4881]: I1211 08:44:08.912581 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f6950faa-9468-4d23-9834-73fb64506367-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "f6950faa-9468-4d23-9834-73fb64506367" (UID: "f6950faa-9468-4d23-9834-73fb64506367"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:44:08 crc kubenswrapper[4881]: I1211 08:44:08.935010 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-n7h4g\" (UniqueName: \"kubernetes.io/projected/f6950faa-9468-4d23-9834-73fb64506367-kube-api-access-n7h4g\") on node \"crc\" DevicePath \"\"" Dec 11 08:44:08 crc kubenswrapper[4881]: I1211 08:44:08.935041 4881 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f6950faa-9468-4d23-9834-73fb64506367-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 11 08:44:09 crc kubenswrapper[4881]: I1211 08:44:09.003050 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f6950faa-9468-4d23-9834-73fb64506367-config-data" (OuterVolumeSpecName: "config-data") pod "f6950faa-9468-4d23-9834-73fb64506367" (UID: "f6950faa-9468-4d23-9834-73fb64506367"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:44:09 crc kubenswrapper[4881]: I1211 08:44:09.037131 4881 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f6950faa-9468-4d23-9834-73fb64506367-config-data\") on node \"crc\" DevicePath \"\"" Dec 11 08:44:09 crc kubenswrapper[4881]: I1211 08:44:09.233101 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-sync-mx8mp" event={"ID":"f6950faa-9468-4d23-9834-73fb64506367","Type":"ContainerDied","Data":"b1e7a68be8157b59216e078baa842164bb8a25ee7e7e1e876cbef5c690fdbe84"} Dec 11 08:44:09 crc kubenswrapper[4881]: I1211 08:44:09.233553 4881 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b1e7a68be8157b59216e078baa842164bb8a25ee7e7e1e876cbef5c690fdbe84" Dec 11 08:44:09 crc kubenswrapper[4881]: I1211 08:44:09.233156 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-sync-mx8mp" Dec 11 08:44:10 crc kubenswrapper[4881]: I1211 08:44:10.280234 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-engine-559447f984-4bfr5"] Dec 11 08:44:10 crc kubenswrapper[4881]: E1211 08:44:10.280988 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f2376e63-11dc-4d35-be7c-eff1af8f8534" containerName="dnsmasq-dns" Dec 11 08:44:10 crc kubenswrapper[4881]: I1211 08:44:10.281000 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="f2376e63-11dc-4d35-be7c-eff1af8f8534" containerName="dnsmasq-dns" Dec 11 08:44:10 crc kubenswrapper[4881]: E1211 08:44:10.281046 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f6950faa-9468-4d23-9834-73fb64506367" containerName="heat-db-sync" Dec 11 08:44:10 crc kubenswrapper[4881]: I1211 08:44:10.281052 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="f6950faa-9468-4d23-9834-73fb64506367" containerName="heat-db-sync" Dec 11 08:44:10 crc kubenswrapper[4881]: E1211 08:44:10.281074 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f2376e63-11dc-4d35-be7c-eff1af8f8534" containerName="init" Dec 11 08:44:10 crc kubenswrapper[4881]: I1211 08:44:10.281080 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="f2376e63-11dc-4d35-be7c-eff1af8f8534" containerName="init" Dec 11 08:44:10 crc kubenswrapper[4881]: I1211 08:44:10.281321 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="f2376e63-11dc-4d35-be7c-eff1af8f8534" containerName="dnsmasq-dns" Dec 11 08:44:10 crc kubenswrapper[4881]: I1211 08:44:10.281354 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="f6950faa-9468-4d23-9834-73fb64506367" containerName="heat-db-sync" Dec 11 08:44:10 crc kubenswrapper[4881]: I1211 08:44:10.282179 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-engine-559447f984-4bfr5" Dec 11 08:44:10 crc kubenswrapper[4881]: I1211 08:44:10.317599 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-engine-559447f984-4bfr5"] Dec 11 08:44:10 crc kubenswrapper[4881]: I1211 08:44:10.334604 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-cfnapi-5f8f5d98d5-xmfmx"] Dec 11 08:44:10 crc kubenswrapper[4881]: I1211 08:44:10.336870 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-5f8f5d98d5-xmfmx" Dec 11 08:44:10 crc kubenswrapper[4881]: I1211 08:44:10.358881 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-api-77bc5fff7b-clttx"] Dec 11 08:44:10 crc kubenswrapper[4881]: I1211 08:44:10.360578 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-77bc5fff7b-clttx" Dec 11 08:44:10 crc kubenswrapper[4881]: I1211 08:44:10.369400 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6b4ff921-94ce-4083-ad5d-783a59c7fb4d-combined-ca-bundle\") pod \"heat-engine-559447f984-4bfr5\" (UID: \"6b4ff921-94ce-4083-ad5d-783a59c7fb4d\") " pod="openstack/heat-engine-559447f984-4bfr5" Dec 11 08:44:10 crc kubenswrapper[4881]: I1211 08:44:10.369491 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8nqcv\" (UniqueName: \"kubernetes.io/projected/6b4ff921-94ce-4083-ad5d-783a59c7fb4d-kube-api-access-8nqcv\") pod \"heat-engine-559447f984-4bfr5\" (UID: \"6b4ff921-94ce-4083-ad5d-783a59c7fb4d\") " pod="openstack/heat-engine-559447f984-4bfr5" Dec 11 08:44:10 crc kubenswrapper[4881]: I1211 08:44:10.369588 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6b4ff921-94ce-4083-ad5d-783a59c7fb4d-config-data\") pod \"heat-engine-559447f984-4bfr5\" (UID: \"6b4ff921-94ce-4083-ad5d-783a59c7fb4d\") " pod="openstack/heat-engine-559447f984-4bfr5" Dec 11 08:44:10 crc kubenswrapper[4881]: I1211 08:44:10.369660 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/6b4ff921-94ce-4083-ad5d-783a59c7fb4d-config-data-custom\") pod \"heat-engine-559447f984-4bfr5\" (UID: \"6b4ff921-94ce-4083-ad5d-783a59c7fb4d\") " pod="openstack/heat-engine-559447f984-4bfr5" Dec 11 08:44:10 crc kubenswrapper[4881]: I1211 08:44:10.402894 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-api-77bc5fff7b-clttx"] Dec 11 08:44:10 crc kubenswrapper[4881]: I1211 08:44:10.418411 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-cfnapi-5f8f5d98d5-xmfmx"] Dec 11 08:44:10 crc kubenswrapper[4881]: I1211 08:44:10.472054 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6b4ff921-94ce-4083-ad5d-783a59c7fb4d-combined-ca-bundle\") pod \"heat-engine-559447f984-4bfr5\" (UID: \"6b4ff921-94ce-4083-ad5d-783a59c7fb4d\") " pod="openstack/heat-engine-559447f984-4bfr5" Dec 11 08:44:10 crc kubenswrapper[4881]: I1211 08:44:10.472138 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/059dc22b-b46b-482a-9a29-ded125bc4dac-config-data-custom\") pod \"heat-cfnapi-5f8f5d98d5-xmfmx\" (UID: \"059dc22b-b46b-482a-9a29-ded125bc4dac\") " pod="openstack/heat-cfnapi-5f8f5d98d5-xmfmx" Dec 11 08:44:10 crc kubenswrapper[4881]: I1211 08:44:10.472168 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/916eca8d-ca13-4db2-a350-b39a66bdee84-config-data\") pod \"heat-api-77bc5fff7b-clttx\" (UID: \"916eca8d-ca13-4db2-a350-b39a66bdee84\") " pod="openstack/heat-api-77bc5fff7b-clttx" Dec 11 08:44:10 crc kubenswrapper[4881]: I1211 08:44:10.472210 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8nqcv\" (UniqueName: \"kubernetes.io/projected/6b4ff921-94ce-4083-ad5d-783a59c7fb4d-kube-api-access-8nqcv\") pod \"heat-engine-559447f984-4bfr5\" (UID: \"6b4ff921-94ce-4083-ad5d-783a59c7fb4d\") " pod="openstack/heat-engine-559447f984-4bfr5" Dec 11 08:44:10 crc kubenswrapper[4881]: I1211 08:44:10.472229 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/916eca8d-ca13-4db2-a350-b39a66bdee84-public-tls-certs\") pod \"heat-api-77bc5fff7b-clttx\" (UID: \"916eca8d-ca13-4db2-a350-b39a66bdee84\") " pod="openstack/heat-api-77bc5fff7b-clttx" Dec 11 08:44:10 crc kubenswrapper[4881]: I1211 08:44:10.472262 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7pzrv\" (UniqueName: \"kubernetes.io/projected/916eca8d-ca13-4db2-a350-b39a66bdee84-kube-api-access-7pzrv\") pod \"heat-api-77bc5fff7b-clttx\" (UID: \"916eca8d-ca13-4db2-a350-b39a66bdee84\") " pod="openstack/heat-api-77bc5fff7b-clttx" Dec 11 08:44:10 crc kubenswrapper[4881]: I1211 08:44:10.472285 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bhd5f\" (UniqueName: \"kubernetes.io/projected/059dc22b-b46b-482a-9a29-ded125bc4dac-kube-api-access-bhd5f\") pod \"heat-cfnapi-5f8f5d98d5-xmfmx\" (UID: \"059dc22b-b46b-482a-9a29-ded125bc4dac\") " pod="openstack/heat-cfnapi-5f8f5d98d5-xmfmx" Dec 11 08:44:10 crc kubenswrapper[4881]: I1211 08:44:10.472314 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/916eca8d-ca13-4db2-a350-b39a66bdee84-config-data-custom\") pod \"heat-api-77bc5fff7b-clttx\" (UID: \"916eca8d-ca13-4db2-a350-b39a66bdee84\") " pod="openstack/heat-api-77bc5fff7b-clttx" Dec 11 08:44:10 crc kubenswrapper[4881]: I1211 08:44:10.472364 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/059dc22b-b46b-482a-9a29-ded125bc4dac-config-data\") pod \"heat-cfnapi-5f8f5d98d5-xmfmx\" (UID: \"059dc22b-b46b-482a-9a29-ded125bc4dac\") " pod="openstack/heat-cfnapi-5f8f5d98d5-xmfmx" Dec 11 08:44:10 crc kubenswrapper[4881]: I1211 08:44:10.472386 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/059dc22b-b46b-482a-9a29-ded125bc4dac-internal-tls-certs\") pod \"heat-cfnapi-5f8f5d98d5-xmfmx\" (UID: \"059dc22b-b46b-482a-9a29-ded125bc4dac\") " pod="openstack/heat-cfnapi-5f8f5d98d5-xmfmx" Dec 11 08:44:10 crc kubenswrapper[4881]: I1211 08:44:10.472407 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/059dc22b-b46b-482a-9a29-ded125bc4dac-combined-ca-bundle\") pod \"heat-cfnapi-5f8f5d98d5-xmfmx\" (UID: \"059dc22b-b46b-482a-9a29-ded125bc4dac\") " pod="openstack/heat-cfnapi-5f8f5d98d5-xmfmx" Dec 11 08:44:10 crc kubenswrapper[4881]: I1211 08:44:10.472449 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6b4ff921-94ce-4083-ad5d-783a59c7fb4d-config-data\") pod \"heat-engine-559447f984-4bfr5\" (UID: \"6b4ff921-94ce-4083-ad5d-783a59c7fb4d\") " pod="openstack/heat-engine-559447f984-4bfr5" Dec 11 08:44:10 crc kubenswrapper[4881]: I1211 08:44:10.472485 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/916eca8d-ca13-4db2-a350-b39a66bdee84-internal-tls-certs\") pod \"heat-api-77bc5fff7b-clttx\" (UID: \"916eca8d-ca13-4db2-a350-b39a66bdee84\") " pod="openstack/heat-api-77bc5fff7b-clttx" Dec 11 08:44:10 crc kubenswrapper[4881]: I1211 08:44:10.472504 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/916eca8d-ca13-4db2-a350-b39a66bdee84-combined-ca-bundle\") pod \"heat-api-77bc5fff7b-clttx\" (UID: \"916eca8d-ca13-4db2-a350-b39a66bdee84\") " pod="openstack/heat-api-77bc5fff7b-clttx" Dec 11 08:44:10 crc kubenswrapper[4881]: I1211 08:44:10.472536 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/059dc22b-b46b-482a-9a29-ded125bc4dac-public-tls-certs\") pod \"heat-cfnapi-5f8f5d98d5-xmfmx\" (UID: \"059dc22b-b46b-482a-9a29-ded125bc4dac\") " pod="openstack/heat-cfnapi-5f8f5d98d5-xmfmx" Dec 11 08:44:10 crc kubenswrapper[4881]: I1211 08:44:10.472573 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/6b4ff921-94ce-4083-ad5d-783a59c7fb4d-config-data-custom\") pod \"heat-engine-559447f984-4bfr5\" (UID: \"6b4ff921-94ce-4083-ad5d-783a59c7fb4d\") " pod="openstack/heat-engine-559447f984-4bfr5" Dec 11 08:44:10 crc kubenswrapper[4881]: I1211 08:44:10.479202 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/6b4ff921-94ce-4083-ad5d-783a59c7fb4d-config-data-custom\") pod \"heat-engine-559447f984-4bfr5\" (UID: \"6b4ff921-94ce-4083-ad5d-783a59c7fb4d\") " pod="openstack/heat-engine-559447f984-4bfr5" Dec 11 08:44:10 crc kubenswrapper[4881]: I1211 08:44:10.480169 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6b4ff921-94ce-4083-ad5d-783a59c7fb4d-config-data\") pod \"heat-engine-559447f984-4bfr5\" (UID: \"6b4ff921-94ce-4083-ad5d-783a59c7fb4d\") " pod="openstack/heat-engine-559447f984-4bfr5" Dec 11 08:44:10 crc kubenswrapper[4881]: I1211 08:44:10.481072 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6b4ff921-94ce-4083-ad5d-783a59c7fb4d-combined-ca-bundle\") pod \"heat-engine-559447f984-4bfr5\" (UID: \"6b4ff921-94ce-4083-ad5d-783a59c7fb4d\") " pod="openstack/heat-engine-559447f984-4bfr5" Dec 11 08:44:10 crc kubenswrapper[4881]: I1211 08:44:10.491542 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8nqcv\" (UniqueName: \"kubernetes.io/projected/6b4ff921-94ce-4083-ad5d-783a59c7fb4d-kube-api-access-8nqcv\") pod \"heat-engine-559447f984-4bfr5\" (UID: \"6b4ff921-94ce-4083-ad5d-783a59c7fb4d\") " pod="openstack/heat-engine-559447f984-4bfr5" Dec 11 08:44:10 crc kubenswrapper[4881]: I1211 08:44:10.575139 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/916eca8d-ca13-4db2-a350-b39a66bdee84-internal-tls-certs\") pod \"heat-api-77bc5fff7b-clttx\" (UID: \"916eca8d-ca13-4db2-a350-b39a66bdee84\") " pod="openstack/heat-api-77bc5fff7b-clttx" Dec 11 08:44:10 crc kubenswrapper[4881]: I1211 08:44:10.575191 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/916eca8d-ca13-4db2-a350-b39a66bdee84-combined-ca-bundle\") pod \"heat-api-77bc5fff7b-clttx\" (UID: \"916eca8d-ca13-4db2-a350-b39a66bdee84\") " pod="openstack/heat-api-77bc5fff7b-clttx" Dec 11 08:44:10 crc kubenswrapper[4881]: I1211 08:44:10.575237 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/059dc22b-b46b-482a-9a29-ded125bc4dac-public-tls-certs\") pod \"heat-cfnapi-5f8f5d98d5-xmfmx\" (UID: \"059dc22b-b46b-482a-9a29-ded125bc4dac\") " pod="openstack/heat-cfnapi-5f8f5d98d5-xmfmx" Dec 11 08:44:10 crc kubenswrapper[4881]: I1211 08:44:10.575404 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/059dc22b-b46b-482a-9a29-ded125bc4dac-config-data-custom\") pod \"heat-cfnapi-5f8f5d98d5-xmfmx\" (UID: \"059dc22b-b46b-482a-9a29-ded125bc4dac\") " pod="openstack/heat-cfnapi-5f8f5d98d5-xmfmx" Dec 11 08:44:10 crc kubenswrapper[4881]: I1211 08:44:10.575433 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/916eca8d-ca13-4db2-a350-b39a66bdee84-config-data\") pod \"heat-api-77bc5fff7b-clttx\" (UID: \"916eca8d-ca13-4db2-a350-b39a66bdee84\") " pod="openstack/heat-api-77bc5fff7b-clttx" Dec 11 08:44:10 crc kubenswrapper[4881]: I1211 08:44:10.575484 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/916eca8d-ca13-4db2-a350-b39a66bdee84-public-tls-certs\") pod \"heat-api-77bc5fff7b-clttx\" (UID: \"916eca8d-ca13-4db2-a350-b39a66bdee84\") " pod="openstack/heat-api-77bc5fff7b-clttx" Dec 11 08:44:10 crc kubenswrapper[4881]: I1211 08:44:10.575526 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7pzrv\" (UniqueName: \"kubernetes.io/projected/916eca8d-ca13-4db2-a350-b39a66bdee84-kube-api-access-7pzrv\") pod \"heat-api-77bc5fff7b-clttx\" (UID: \"916eca8d-ca13-4db2-a350-b39a66bdee84\") " pod="openstack/heat-api-77bc5fff7b-clttx" Dec 11 08:44:10 crc kubenswrapper[4881]: I1211 08:44:10.576155 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bhd5f\" (UniqueName: \"kubernetes.io/projected/059dc22b-b46b-482a-9a29-ded125bc4dac-kube-api-access-bhd5f\") pod \"heat-cfnapi-5f8f5d98d5-xmfmx\" (UID: \"059dc22b-b46b-482a-9a29-ded125bc4dac\") " pod="openstack/heat-cfnapi-5f8f5d98d5-xmfmx" Dec 11 08:44:10 crc kubenswrapper[4881]: I1211 08:44:10.576223 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/916eca8d-ca13-4db2-a350-b39a66bdee84-config-data-custom\") pod \"heat-api-77bc5fff7b-clttx\" (UID: \"916eca8d-ca13-4db2-a350-b39a66bdee84\") " pod="openstack/heat-api-77bc5fff7b-clttx" Dec 11 08:44:10 crc kubenswrapper[4881]: I1211 08:44:10.576268 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/059dc22b-b46b-482a-9a29-ded125bc4dac-config-data\") pod \"heat-cfnapi-5f8f5d98d5-xmfmx\" (UID: \"059dc22b-b46b-482a-9a29-ded125bc4dac\") " pod="openstack/heat-cfnapi-5f8f5d98d5-xmfmx" Dec 11 08:44:10 crc kubenswrapper[4881]: I1211 08:44:10.576300 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/059dc22b-b46b-482a-9a29-ded125bc4dac-internal-tls-certs\") pod \"heat-cfnapi-5f8f5d98d5-xmfmx\" (UID: \"059dc22b-b46b-482a-9a29-ded125bc4dac\") " pod="openstack/heat-cfnapi-5f8f5d98d5-xmfmx" Dec 11 08:44:10 crc kubenswrapper[4881]: I1211 08:44:10.576355 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/059dc22b-b46b-482a-9a29-ded125bc4dac-combined-ca-bundle\") pod \"heat-cfnapi-5f8f5d98d5-xmfmx\" (UID: \"059dc22b-b46b-482a-9a29-ded125bc4dac\") " pod="openstack/heat-cfnapi-5f8f5d98d5-xmfmx" Dec 11 08:44:10 crc kubenswrapper[4881]: I1211 08:44:10.584162 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/059dc22b-b46b-482a-9a29-ded125bc4dac-combined-ca-bundle\") pod \"heat-cfnapi-5f8f5d98d5-xmfmx\" (UID: \"059dc22b-b46b-482a-9a29-ded125bc4dac\") " pod="openstack/heat-cfnapi-5f8f5d98d5-xmfmx" Dec 11 08:44:10 crc kubenswrapper[4881]: I1211 08:44:10.584553 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/916eca8d-ca13-4db2-a350-b39a66bdee84-config-data-custom\") pod \"heat-api-77bc5fff7b-clttx\" (UID: \"916eca8d-ca13-4db2-a350-b39a66bdee84\") " pod="openstack/heat-api-77bc5fff7b-clttx" Dec 11 08:44:10 crc kubenswrapper[4881]: I1211 08:44:10.584673 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/916eca8d-ca13-4db2-a350-b39a66bdee84-internal-tls-certs\") pod \"heat-api-77bc5fff7b-clttx\" (UID: \"916eca8d-ca13-4db2-a350-b39a66bdee84\") " pod="openstack/heat-api-77bc5fff7b-clttx" Dec 11 08:44:10 crc kubenswrapper[4881]: I1211 08:44:10.585545 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/916eca8d-ca13-4db2-a350-b39a66bdee84-public-tls-certs\") pod \"heat-api-77bc5fff7b-clttx\" (UID: \"916eca8d-ca13-4db2-a350-b39a66bdee84\") " pod="openstack/heat-api-77bc5fff7b-clttx" Dec 11 08:44:10 crc kubenswrapper[4881]: I1211 08:44:10.585573 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/916eca8d-ca13-4db2-a350-b39a66bdee84-config-data\") pod \"heat-api-77bc5fff7b-clttx\" (UID: \"916eca8d-ca13-4db2-a350-b39a66bdee84\") " pod="openstack/heat-api-77bc5fff7b-clttx" Dec 11 08:44:10 crc kubenswrapper[4881]: I1211 08:44:10.588168 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/916eca8d-ca13-4db2-a350-b39a66bdee84-combined-ca-bundle\") pod \"heat-api-77bc5fff7b-clttx\" (UID: \"916eca8d-ca13-4db2-a350-b39a66bdee84\") " pod="openstack/heat-api-77bc5fff7b-clttx" Dec 11 08:44:10 crc kubenswrapper[4881]: I1211 08:44:10.589992 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/059dc22b-b46b-482a-9a29-ded125bc4dac-public-tls-certs\") pod \"heat-cfnapi-5f8f5d98d5-xmfmx\" (UID: \"059dc22b-b46b-482a-9a29-ded125bc4dac\") " pod="openstack/heat-cfnapi-5f8f5d98d5-xmfmx" Dec 11 08:44:10 crc kubenswrapper[4881]: I1211 08:44:10.590275 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/059dc22b-b46b-482a-9a29-ded125bc4dac-internal-tls-certs\") pod \"heat-cfnapi-5f8f5d98d5-xmfmx\" (UID: \"059dc22b-b46b-482a-9a29-ded125bc4dac\") " pod="openstack/heat-cfnapi-5f8f5d98d5-xmfmx" Dec 11 08:44:10 crc kubenswrapper[4881]: I1211 08:44:10.593015 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/059dc22b-b46b-482a-9a29-ded125bc4dac-config-data-custom\") pod \"heat-cfnapi-5f8f5d98d5-xmfmx\" (UID: \"059dc22b-b46b-482a-9a29-ded125bc4dac\") " pod="openstack/heat-cfnapi-5f8f5d98d5-xmfmx" Dec 11 08:44:10 crc kubenswrapper[4881]: I1211 08:44:10.593415 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7pzrv\" (UniqueName: \"kubernetes.io/projected/916eca8d-ca13-4db2-a350-b39a66bdee84-kube-api-access-7pzrv\") pod \"heat-api-77bc5fff7b-clttx\" (UID: \"916eca8d-ca13-4db2-a350-b39a66bdee84\") " pod="openstack/heat-api-77bc5fff7b-clttx" Dec 11 08:44:10 crc kubenswrapper[4881]: I1211 08:44:10.599054 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bhd5f\" (UniqueName: \"kubernetes.io/projected/059dc22b-b46b-482a-9a29-ded125bc4dac-kube-api-access-bhd5f\") pod \"heat-cfnapi-5f8f5d98d5-xmfmx\" (UID: \"059dc22b-b46b-482a-9a29-ded125bc4dac\") " pod="openstack/heat-cfnapi-5f8f5d98d5-xmfmx" Dec 11 08:44:10 crc kubenswrapper[4881]: I1211 08:44:10.610502 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/059dc22b-b46b-482a-9a29-ded125bc4dac-config-data\") pod \"heat-cfnapi-5f8f5d98d5-xmfmx\" (UID: \"059dc22b-b46b-482a-9a29-ded125bc4dac\") " pod="openstack/heat-cfnapi-5f8f5d98d5-xmfmx" Dec 11 08:44:10 crc kubenswrapper[4881]: I1211 08:44:10.612172 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-engine-559447f984-4bfr5" Dec 11 08:44:10 crc kubenswrapper[4881]: I1211 08:44:10.673493 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-5f8f5d98d5-xmfmx" Dec 11 08:44:10 crc kubenswrapper[4881]: I1211 08:44:10.701716 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-77bc5fff7b-clttx" Dec 11 08:44:11 crc kubenswrapper[4881]: I1211 08:44:11.008073 4881 scope.go:117] "RemoveContainer" containerID="6d4e8d739554b5cd95df4279914b6fd3590e9c30b07a9063a641aef8fdb20ae9" Dec 11 08:44:11 crc kubenswrapper[4881]: E1211 08:44:11.011879 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 08:44:11 crc kubenswrapper[4881]: W1211 08:44:11.296817 4881 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6b4ff921_94ce_4083_ad5d_783a59c7fb4d.slice/crio-0e6c1461dd1275f561b1cf21977ceeaa4ace2a20e43981a7819a8b615d30f813 WatchSource:0}: Error finding container 0e6c1461dd1275f561b1cf21977ceeaa4ace2a20e43981a7819a8b615d30f813: Status 404 returned error can't find the container with id 0e6c1461dd1275f561b1cf21977ceeaa4ace2a20e43981a7819a8b615d30f813 Dec 11 08:44:11 crc kubenswrapper[4881]: I1211 08:44:11.306040 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-engine-559447f984-4bfr5"] Dec 11 08:44:11 crc kubenswrapper[4881]: I1211 08:44:11.404366 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-api-77bc5fff7b-clttx"] Dec 11 08:44:11 crc kubenswrapper[4881]: I1211 08:44:11.420325 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-cfnapi-5f8f5d98d5-xmfmx"] Dec 11 08:44:11 crc kubenswrapper[4881]: W1211 08:44:11.425992 4881 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod916eca8d_ca13_4db2_a350_b39a66bdee84.slice/crio-ff14d6b42572ad45c550f0194c3055169b9fff5c7e20b73bb27624cf6aa71b53 WatchSource:0}: Error finding container ff14d6b42572ad45c550f0194c3055169b9fff5c7e20b73bb27624cf6aa71b53: Status 404 returned error can't find the container with id ff14d6b42572ad45c550f0194c3055169b9fff5c7e20b73bb27624cf6aa71b53 Dec 11 08:44:11 crc kubenswrapper[4881]: W1211 08:44:11.455973 4881 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod059dc22b_b46b_482a_9a29_ded125bc4dac.slice/crio-0cf313fbfd90aabd1ddc6c559aa25a968c1ddde48d35f8aba813217d47a1c421 WatchSource:0}: Error finding container 0cf313fbfd90aabd1ddc6c559aa25a968c1ddde48d35f8aba813217d47a1c421: Status 404 returned error can't find the container with id 0cf313fbfd90aabd1ddc6c559aa25a968c1ddde48d35f8aba813217d47a1c421 Dec 11 08:44:12 crc kubenswrapper[4881]: I1211 08:44:12.294226 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-5f8f5d98d5-xmfmx" event={"ID":"059dc22b-b46b-482a-9a29-ded125bc4dac","Type":"ContainerStarted","Data":"0cf313fbfd90aabd1ddc6c559aa25a968c1ddde48d35f8aba813217d47a1c421"} Dec 11 08:44:12 crc kubenswrapper[4881]: I1211 08:44:12.314386 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-engine-559447f984-4bfr5" event={"ID":"6b4ff921-94ce-4083-ad5d-783a59c7fb4d","Type":"ContainerStarted","Data":"cd7dc878ddc72d153f7302d795116d1e36f13a0eb235d5892c432a4f9c954051"} Dec 11 08:44:12 crc kubenswrapper[4881]: I1211 08:44:12.314430 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-engine-559447f984-4bfr5" event={"ID":"6b4ff921-94ce-4083-ad5d-783a59c7fb4d","Type":"ContainerStarted","Data":"0e6c1461dd1275f561b1cf21977ceeaa4ace2a20e43981a7819a8b615d30f813"} Dec 11 08:44:12 crc kubenswrapper[4881]: I1211 08:44:12.314495 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-engine-559447f984-4bfr5" Dec 11 08:44:12 crc kubenswrapper[4881]: I1211 08:44:12.318401 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-77bc5fff7b-clttx" event={"ID":"916eca8d-ca13-4db2-a350-b39a66bdee84","Type":"ContainerStarted","Data":"ff14d6b42572ad45c550f0194c3055169b9fff5c7e20b73bb27624cf6aa71b53"} Dec 11 08:44:12 crc kubenswrapper[4881]: I1211 08:44:12.331783 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-engine-559447f984-4bfr5" podStartSLOduration=2.331763756 podStartE2EDuration="2.331763756s" podCreationTimestamp="2025-12-11 08:44:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 08:44:12.329540161 +0000 UTC m=+1700.706908858" watchObservedRunningTime="2025-12-11 08:44:12.331763756 +0000 UTC m=+1700.709132453" Dec 11 08:44:12 crc kubenswrapper[4881]: I1211 08:44:12.834516 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-bb85b8995-xpw6s" Dec 11 08:44:12 crc kubenswrapper[4881]: I1211 08:44:12.904223 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-68df85789f-jzdqb"] Dec 11 08:44:12 crc kubenswrapper[4881]: I1211 08:44:12.904498 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-68df85789f-jzdqb" podUID="bd4bde03-43c2-4be2-a95c-c3171e9c2dbb" containerName="dnsmasq-dns" containerID="cri-o://9dc65edadd690932884193e8523c34e69e6af4875e9656e51aebec7803c9408e" gracePeriod=10 Dec 11 08:44:13 crc kubenswrapper[4881]: I1211 08:44:13.332473 4881 generic.go:334] "Generic (PLEG): container finished" podID="bd4bde03-43c2-4be2-a95c-c3171e9c2dbb" containerID="9dc65edadd690932884193e8523c34e69e6af4875e9656e51aebec7803c9408e" exitCode=0 Dec 11 08:44:13 crc kubenswrapper[4881]: I1211 08:44:13.332668 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-68df85789f-jzdqb" event={"ID":"bd4bde03-43c2-4be2-a95c-c3171e9c2dbb","Type":"ContainerDied","Data":"9dc65edadd690932884193e8523c34e69e6af4875e9656e51aebec7803c9408e"} Dec 11 08:44:13 crc kubenswrapper[4881]: I1211 08:44:13.994016 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-grrwm" Dec 11 08:44:13 crc kubenswrapper[4881]: I1211 08:44:13.994359 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-grrwm" Dec 11 08:44:14 crc kubenswrapper[4881]: I1211 08:44:14.055918 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-grrwm" Dec 11 08:44:14 crc kubenswrapper[4881]: I1211 08:44:14.445819 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-grrwm" Dec 11 08:44:14 crc kubenswrapper[4881]: I1211 08:44:14.508674 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-grrwm"] Dec 11 08:44:14 crc kubenswrapper[4881]: I1211 08:44:14.528863 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-68df85789f-jzdqb" Dec 11 08:44:14 crc kubenswrapper[4881]: I1211 08:44:14.610742 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/bd4bde03-43c2-4be2-a95c-c3171e9c2dbb-ovsdbserver-nb\") pod \"bd4bde03-43c2-4be2-a95c-c3171e9c2dbb\" (UID: \"bd4bde03-43c2-4be2-a95c-c3171e9c2dbb\") " Dec 11 08:44:14 crc kubenswrapper[4881]: I1211 08:44:14.610873 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/bd4bde03-43c2-4be2-a95c-c3171e9c2dbb-ovsdbserver-sb\") pod \"bd4bde03-43c2-4be2-a95c-c3171e9c2dbb\" (UID: \"bd4bde03-43c2-4be2-a95c-c3171e9c2dbb\") " Dec 11 08:44:14 crc kubenswrapper[4881]: I1211 08:44:14.610996 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bd4bde03-43c2-4be2-a95c-c3171e9c2dbb-config\") pod \"bd4bde03-43c2-4be2-a95c-c3171e9c2dbb\" (UID: \"bd4bde03-43c2-4be2-a95c-c3171e9c2dbb\") " Dec 11 08:44:14 crc kubenswrapper[4881]: I1211 08:44:14.611136 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/bd4bde03-43c2-4be2-a95c-c3171e9c2dbb-openstack-edpm-ipam\") pod \"bd4bde03-43c2-4be2-a95c-c3171e9c2dbb\" (UID: \"bd4bde03-43c2-4be2-a95c-c3171e9c2dbb\") " Dec 11 08:44:14 crc kubenswrapper[4881]: I1211 08:44:14.611183 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/bd4bde03-43c2-4be2-a95c-c3171e9c2dbb-dns-svc\") pod \"bd4bde03-43c2-4be2-a95c-c3171e9c2dbb\" (UID: \"bd4bde03-43c2-4be2-a95c-c3171e9c2dbb\") " Dec 11 08:44:14 crc kubenswrapper[4881]: I1211 08:44:14.611291 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/bd4bde03-43c2-4be2-a95c-c3171e9c2dbb-dns-swift-storage-0\") pod \"bd4bde03-43c2-4be2-a95c-c3171e9c2dbb\" (UID: \"bd4bde03-43c2-4be2-a95c-c3171e9c2dbb\") " Dec 11 08:44:14 crc kubenswrapper[4881]: I1211 08:44:14.611329 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qj4vw\" (UniqueName: \"kubernetes.io/projected/bd4bde03-43c2-4be2-a95c-c3171e9c2dbb-kube-api-access-qj4vw\") pod \"bd4bde03-43c2-4be2-a95c-c3171e9c2dbb\" (UID: \"bd4bde03-43c2-4be2-a95c-c3171e9c2dbb\") " Dec 11 08:44:14 crc kubenswrapper[4881]: I1211 08:44:14.621677 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bd4bde03-43c2-4be2-a95c-c3171e9c2dbb-kube-api-access-qj4vw" (OuterVolumeSpecName: "kube-api-access-qj4vw") pod "bd4bde03-43c2-4be2-a95c-c3171e9c2dbb" (UID: "bd4bde03-43c2-4be2-a95c-c3171e9c2dbb"). InnerVolumeSpecName "kube-api-access-qj4vw". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:44:14 crc kubenswrapper[4881]: I1211 08:44:14.687421 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bd4bde03-43c2-4be2-a95c-c3171e9c2dbb-openstack-edpm-ipam" (OuterVolumeSpecName: "openstack-edpm-ipam") pod "bd4bde03-43c2-4be2-a95c-c3171e9c2dbb" (UID: "bd4bde03-43c2-4be2-a95c-c3171e9c2dbb"). InnerVolumeSpecName "openstack-edpm-ipam". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:44:14 crc kubenswrapper[4881]: I1211 08:44:14.694734 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bd4bde03-43c2-4be2-a95c-c3171e9c2dbb-config" (OuterVolumeSpecName: "config") pod "bd4bde03-43c2-4be2-a95c-c3171e9c2dbb" (UID: "bd4bde03-43c2-4be2-a95c-c3171e9c2dbb"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:44:14 crc kubenswrapper[4881]: I1211 08:44:14.705518 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bd4bde03-43c2-4be2-a95c-c3171e9c2dbb-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "bd4bde03-43c2-4be2-a95c-c3171e9c2dbb" (UID: "bd4bde03-43c2-4be2-a95c-c3171e9c2dbb"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:44:14 crc kubenswrapper[4881]: I1211 08:44:14.714044 4881 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/bd4bde03-43c2-4be2-a95c-c3171e9c2dbb-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Dec 11 08:44:14 crc kubenswrapper[4881]: I1211 08:44:14.714076 4881 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/bd4bde03-43c2-4be2-a95c-c3171e9c2dbb-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Dec 11 08:44:14 crc kubenswrapper[4881]: I1211 08:44:14.714085 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qj4vw\" (UniqueName: \"kubernetes.io/projected/bd4bde03-43c2-4be2-a95c-c3171e9c2dbb-kube-api-access-qj4vw\") on node \"crc\" DevicePath \"\"" Dec 11 08:44:14 crc kubenswrapper[4881]: I1211 08:44:14.714096 4881 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bd4bde03-43c2-4be2-a95c-c3171e9c2dbb-config\") on node \"crc\" DevicePath \"\"" Dec 11 08:44:14 crc kubenswrapper[4881]: I1211 08:44:14.725441 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bd4bde03-43c2-4be2-a95c-c3171e9c2dbb-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "bd4bde03-43c2-4be2-a95c-c3171e9c2dbb" (UID: "bd4bde03-43c2-4be2-a95c-c3171e9c2dbb"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:44:14 crc kubenswrapper[4881]: I1211 08:44:14.727275 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bd4bde03-43c2-4be2-a95c-c3171e9c2dbb-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "bd4bde03-43c2-4be2-a95c-c3171e9c2dbb" (UID: "bd4bde03-43c2-4be2-a95c-c3171e9c2dbb"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:44:14 crc kubenswrapper[4881]: I1211 08:44:14.736515 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bd4bde03-43c2-4be2-a95c-c3171e9c2dbb-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "bd4bde03-43c2-4be2-a95c-c3171e9c2dbb" (UID: "bd4bde03-43c2-4be2-a95c-c3171e9c2dbb"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:44:14 crc kubenswrapper[4881]: I1211 08:44:14.823409 4881 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/bd4bde03-43c2-4be2-a95c-c3171e9c2dbb-dns-svc\") on node \"crc\" DevicePath \"\"" Dec 11 08:44:14 crc kubenswrapper[4881]: I1211 08:44:14.823461 4881 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/bd4bde03-43c2-4be2-a95c-c3171e9c2dbb-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Dec 11 08:44:14 crc kubenswrapper[4881]: I1211 08:44:14.823473 4881 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/bd4bde03-43c2-4be2-a95c-c3171e9c2dbb-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Dec 11 08:44:15 crc kubenswrapper[4881]: I1211 08:44:15.358596 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-5f8f5d98d5-xmfmx" event={"ID":"059dc22b-b46b-482a-9a29-ded125bc4dac","Type":"ContainerStarted","Data":"347237a98e4410045808286f6c655f0e44126725fac3bb6e172775131ca2dbab"} Dec 11 08:44:15 crc kubenswrapper[4881]: I1211 08:44:15.359153 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-cfnapi-5f8f5d98d5-xmfmx" Dec 11 08:44:15 crc kubenswrapper[4881]: I1211 08:44:15.361386 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-77bc5fff7b-clttx" event={"ID":"916eca8d-ca13-4db2-a350-b39a66bdee84","Type":"ContainerStarted","Data":"8ed9d1361422adfd97ac17207a2daac97f0c8b7a108abd2047cf7cc59b8cfa89"} Dec 11 08:44:15 crc kubenswrapper[4881]: I1211 08:44:15.361444 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-api-77bc5fff7b-clttx" Dec 11 08:44:15 crc kubenswrapper[4881]: I1211 08:44:15.400110 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-68df85789f-jzdqb" event={"ID":"bd4bde03-43c2-4be2-a95c-c3171e9c2dbb","Type":"ContainerDied","Data":"02f8e9b41f7752e19158fa715d8dc673670b083933657e48ec27c366387d4f3d"} Dec 11 08:44:15 crc kubenswrapper[4881]: I1211 08:44:15.400176 4881 scope.go:117] "RemoveContainer" containerID="9dc65edadd690932884193e8523c34e69e6af4875e9656e51aebec7803c9408e" Dec 11 08:44:15 crc kubenswrapper[4881]: I1211 08:44:15.402898 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-68df85789f-jzdqb" Dec 11 08:44:15 crc kubenswrapper[4881]: I1211 08:44:15.411891 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-cfnapi-5f8f5d98d5-xmfmx" podStartSLOduration=2.130513009 podStartE2EDuration="5.41187008s" podCreationTimestamp="2025-12-11 08:44:10 +0000 UTC" firstStartedPulling="2025-12-11 08:44:11.459365276 +0000 UTC m=+1699.836733973" lastFinishedPulling="2025-12-11 08:44:14.740722337 +0000 UTC m=+1703.118091044" observedRunningTime="2025-12-11 08:44:15.40011428 +0000 UTC m=+1703.777483007" watchObservedRunningTime="2025-12-11 08:44:15.41187008 +0000 UTC m=+1703.789238777" Dec 11 08:44:15 crc kubenswrapper[4881]: I1211 08:44:15.440725 4881 scope.go:117] "RemoveContainer" containerID="271251342d6d3c7660d019f962cd1be8b8959141d473958f288468a70fbd5991" Dec 11 08:44:15 crc kubenswrapper[4881]: I1211 08:44:15.461086 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-api-77bc5fff7b-clttx" podStartSLOduration=2.14674723 podStartE2EDuration="5.461062334s" podCreationTimestamp="2025-12-11 08:44:10 +0000 UTC" firstStartedPulling="2025-12-11 08:44:11.430859063 +0000 UTC m=+1699.808227760" lastFinishedPulling="2025-12-11 08:44:14.745174167 +0000 UTC m=+1703.122542864" observedRunningTime="2025-12-11 08:44:15.417486828 +0000 UTC m=+1703.794855545" watchObservedRunningTime="2025-12-11 08:44:15.461062334 +0000 UTC m=+1703.838431031" Dec 11 08:44:15 crc kubenswrapper[4881]: I1211 08:44:15.485557 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-68df85789f-jzdqb"] Dec 11 08:44:15 crc kubenswrapper[4881]: I1211 08:44:15.499276 4881 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-68df85789f-jzdqb"] Dec 11 08:44:16 crc kubenswrapper[4881]: I1211 08:44:16.410390 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-grrwm" podUID="9c312c92-31ea-4058-a68a-8c512b40ecd1" containerName="registry-server" containerID="cri-o://4ac8978a8e4c4f8a5cfd12cdaf4ae9daf1f35a19aad61efc1a4a52e6202a8e26" gracePeriod=2 Dec 11 08:44:17 crc kubenswrapper[4881]: I1211 08:44:17.021263 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bd4bde03-43c2-4be2-a95c-c3171e9c2dbb" path="/var/lib/kubelet/pods/bd4bde03-43c2-4be2-a95c-c3171e9c2dbb/volumes" Dec 11 08:44:17 crc kubenswrapper[4881]: I1211 08:44:17.426267 4881 generic.go:334] "Generic (PLEG): container finished" podID="9c312c92-31ea-4058-a68a-8c512b40ecd1" containerID="4ac8978a8e4c4f8a5cfd12cdaf4ae9daf1f35a19aad61efc1a4a52e6202a8e26" exitCode=0 Dec 11 08:44:17 crc kubenswrapper[4881]: I1211 08:44:17.426344 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-grrwm" event={"ID":"9c312c92-31ea-4058-a68a-8c512b40ecd1","Type":"ContainerDied","Data":"4ac8978a8e4c4f8a5cfd12cdaf4ae9daf1f35a19aad61efc1a4a52e6202a8e26"} Dec 11 08:44:17 crc kubenswrapper[4881]: I1211 08:44:17.426500 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-grrwm" event={"ID":"9c312c92-31ea-4058-a68a-8c512b40ecd1","Type":"ContainerDied","Data":"3105f314bc78be0f8b0abfc2ffcd01151b9427e62f734c5fe0b5f34b15b43040"} Dec 11 08:44:17 crc kubenswrapper[4881]: I1211 08:44:17.426511 4881 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3105f314bc78be0f8b0abfc2ffcd01151b9427e62f734c5fe0b5f34b15b43040" Dec 11 08:44:17 crc kubenswrapper[4881]: I1211 08:44:17.482470 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-grrwm" Dec 11 08:44:17 crc kubenswrapper[4881]: I1211 08:44:17.614954 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hc2dd\" (UniqueName: \"kubernetes.io/projected/9c312c92-31ea-4058-a68a-8c512b40ecd1-kube-api-access-hc2dd\") pod \"9c312c92-31ea-4058-a68a-8c512b40ecd1\" (UID: \"9c312c92-31ea-4058-a68a-8c512b40ecd1\") " Dec 11 08:44:17 crc kubenswrapper[4881]: I1211 08:44:17.616103 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9c312c92-31ea-4058-a68a-8c512b40ecd1-utilities\") pod \"9c312c92-31ea-4058-a68a-8c512b40ecd1\" (UID: \"9c312c92-31ea-4058-a68a-8c512b40ecd1\") " Dec 11 08:44:17 crc kubenswrapper[4881]: I1211 08:44:17.617479 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9c312c92-31ea-4058-a68a-8c512b40ecd1-utilities" (OuterVolumeSpecName: "utilities") pod "9c312c92-31ea-4058-a68a-8c512b40ecd1" (UID: "9c312c92-31ea-4058-a68a-8c512b40ecd1"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 08:44:17 crc kubenswrapper[4881]: I1211 08:44:17.617766 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9c312c92-31ea-4058-a68a-8c512b40ecd1-catalog-content\") pod \"9c312c92-31ea-4058-a68a-8c512b40ecd1\" (UID: \"9c312c92-31ea-4058-a68a-8c512b40ecd1\") " Dec 11 08:44:17 crc kubenswrapper[4881]: I1211 08:44:17.619058 4881 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9c312c92-31ea-4058-a68a-8c512b40ecd1-utilities\") on node \"crc\" DevicePath \"\"" Dec 11 08:44:17 crc kubenswrapper[4881]: I1211 08:44:17.621613 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9c312c92-31ea-4058-a68a-8c512b40ecd1-kube-api-access-hc2dd" (OuterVolumeSpecName: "kube-api-access-hc2dd") pod "9c312c92-31ea-4058-a68a-8c512b40ecd1" (UID: "9c312c92-31ea-4058-a68a-8c512b40ecd1"). InnerVolumeSpecName "kube-api-access-hc2dd". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:44:17 crc kubenswrapper[4881]: I1211 08:44:17.673712 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9c312c92-31ea-4058-a68a-8c512b40ecd1-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "9c312c92-31ea-4058-a68a-8c512b40ecd1" (UID: "9c312c92-31ea-4058-a68a-8c512b40ecd1"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 08:44:17 crc kubenswrapper[4881]: I1211 08:44:17.721747 4881 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9c312c92-31ea-4058-a68a-8c512b40ecd1-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 11 08:44:17 crc kubenswrapper[4881]: I1211 08:44:17.721788 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hc2dd\" (UniqueName: \"kubernetes.io/projected/9c312c92-31ea-4058-a68a-8c512b40ecd1-kube-api-access-hc2dd\") on node \"crc\" DevicePath \"\"" Dec 11 08:44:18 crc kubenswrapper[4881]: I1211 08:44:18.439155 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-grrwm" Dec 11 08:44:18 crc kubenswrapper[4881]: I1211 08:44:18.479326 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-grrwm"] Dec 11 08:44:18 crc kubenswrapper[4881]: I1211 08:44:18.495476 4881 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-grrwm"] Dec 11 08:44:19 crc kubenswrapper[4881]: I1211 08:44:19.020039 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9c312c92-31ea-4058-a68a-8c512b40ecd1" path="/var/lib/kubelet/pods/9c312c92-31ea-4058-a68a-8c512b40ecd1/volumes" Dec 11 08:44:19 crc kubenswrapper[4881]: I1211 08:44:19.686006 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-t24nn"] Dec 11 08:44:19 crc kubenswrapper[4881]: E1211 08:44:19.686792 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9c312c92-31ea-4058-a68a-8c512b40ecd1" containerName="extract-content" Dec 11 08:44:19 crc kubenswrapper[4881]: I1211 08:44:19.686806 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="9c312c92-31ea-4058-a68a-8c512b40ecd1" containerName="extract-content" Dec 11 08:44:19 crc kubenswrapper[4881]: E1211 08:44:19.686816 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bd4bde03-43c2-4be2-a95c-c3171e9c2dbb" containerName="dnsmasq-dns" Dec 11 08:44:19 crc kubenswrapper[4881]: I1211 08:44:19.686823 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="bd4bde03-43c2-4be2-a95c-c3171e9c2dbb" containerName="dnsmasq-dns" Dec 11 08:44:19 crc kubenswrapper[4881]: E1211 08:44:19.686839 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9c312c92-31ea-4058-a68a-8c512b40ecd1" containerName="registry-server" Dec 11 08:44:19 crc kubenswrapper[4881]: I1211 08:44:19.686844 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="9c312c92-31ea-4058-a68a-8c512b40ecd1" containerName="registry-server" Dec 11 08:44:19 crc kubenswrapper[4881]: E1211 08:44:19.686854 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bd4bde03-43c2-4be2-a95c-c3171e9c2dbb" containerName="init" Dec 11 08:44:19 crc kubenswrapper[4881]: I1211 08:44:19.686861 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="bd4bde03-43c2-4be2-a95c-c3171e9c2dbb" containerName="init" Dec 11 08:44:19 crc kubenswrapper[4881]: E1211 08:44:19.686876 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9c312c92-31ea-4058-a68a-8c512b40ecd1" containerName="extract-utilities" Dec 11 08:44:19 crc kubenswrapper[4881]: I1211 08:44:19.686882 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="9c312c92-31ea-4058-a68a-8c512b40ecd1" containerName="extract-utilities" Dec 11 08:44:19 crc kubenswrapper[4881]: I1211 08:44:19.687118 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="bd4bde03-43c2-4be2-a95c-c3171e9c2dbb" containerName="dnsmasq-dns" Dec 11 08:44:19 crc kubenswrapper[4881]: I1211 08:44:19.687143 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="9c312c92-31ea-4058-a68a-8c512b40ecd1" containerName="registry-server" Dec 11 08:44:19 crc kubenswrapper[4881]: I1211 08:44:19.687984 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-t24nn" Dec 11 08:44:19 crc kubenswrapper[4881]: I1211 08:44:19.690463 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Dec 11 08:44:19 crc kubenswrapper[4881]: I1211 08:44:19.691681 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Dec 11 08:44:19 crc kubenswrapper[4881]: I1211 08:44:19.691922 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-bnm72" Dec 11 08:44:19 crc kubenswrapper[4881]: I1211 08:44:19.697313 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Dec 11 08:44:19 crc kubenswrapper[4881]: I1211 08:44:19.708452 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-t24nn"] Dec 11 08:44:19 crc kubenswrapper[4881]: I1211 08:44:19.876406 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4nxnf\" (UniqueName: \"kubernetes.io/projected/cde02f48-eb61-4053-b321-3ab152bafeaa-kube-api-access-4nxnf\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-t24nn\" (UID: \"cde02f48-eb61-4053-b321-3ab152bafeaa\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-t24nn" Dec 11 08:44:19 crc kubenswrapper[4881]: I1211 08:44:19.876492 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/cde02f48-eb61-4053-b321-3ab152bafeaa-ssh-key\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-t24nn\" (UID: \"cde02f48-eb61-4053-b321-3ab152bafeaa\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-t24nn" Dec 11 08:44:19 crc kubenswrapper[4881]: I1211 08:44:19.876523 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cde02f48-eb61-4053-b321-3ab152bafeaa-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-t24nn\" (UID: \"cde02f48-eb61-4053-b321-3ab152bafeaa\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-t24nn" Dec 11 08:44:19 crc kubenswrapper[4881]: I1211 08:44:19.876833 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/cde02f48-eb61-4053-b321-3ab152bafeaa-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-t24nn\" (UID: \"cde02f48-eb61-4053-b321-3ab152bafeaa\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-t24nn" Dec 11 08:44:19 crc kubenswrapper[4881]: I1211 08:44:19.978899 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/cde02f48-eb61-4053-b321-3ab152bafeaa-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-t24nn\" (UID: \"cde02f48-eb61-4053-b321-3ab152bafeaa\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-t24nn" Dec 11 08:44:19 crc kubenswrapper[4881]: I1211 08:44:19.979018 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4nxnf\" (UniqueName: \"kubernetes.io/projected/cde02f48-eb61-4053-b321-3ab152bafeaa-kube-api-access-4nxnf\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-t24nn\" (UID: \"cde02f48-eb61-4053-b321-3ab152bafeaa\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-t24nn" Dec 11 08:44:19 crc kubenswrapper[4881]: I1211 08:44:19.979066 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/cde02f48-eb61-4053-b321-3ab152bafeaa-ssh-key\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-t24nn\" (UID: \"cde02f48-eb61-4053-b321-3ab152bafeaa\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-t24nn" Dec 11 08:44:19 crc kubenswrapper[4881]: I1211 08:44:19.979089 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cde02f48-eb61-4053-b321-3ab152bafeaa-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-t24nn\" (UID: \"cde02f48-eb61-4053-b321-3ab152bafeaa\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-t24nn" Dec 11 08:44:19 crc kubenswrapper[4881]: I1211 08:44:19.986420 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/cde02f48-eb61-4053-b321-3ab152bafeaa-ssh-key\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-t24nn\" (UID: \"cde02f48-eb61-4053-b321-3ab152bafeaa\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-t24nn" Dec 11 08:44:19 crc kubenswrapper[4881]: I1211 08:44:19.986739 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/cde02f48-eb61-4053-b321-3ab152bafeaa-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-t24nn\" (UID: \"cde02f48-eb61-4053-b321-3ab152bafeaa\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-t24nn" Dec 11 08:44:19 crc kubenswrapper[4881]: I1211 08:44:19.987211 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cde02f48-eb61-4053-b321-3ab152bafeaa-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-t24nn\" (UID: \"cde02f48-eb61-4053-b321-3ab152bafeaa\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-t24nn" Dec 11 08:44:20 crc kubenswrapper[4881]: I1211 08:44:20.003329 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4nxnf\" (UniqueName: \"kubernetes.io/projected/cde02f48-eb61-4053-b321-3ab152bafeaa-kube-api-access-4nxnf\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-t24nn\" (UID: \"cde02f48-eb61-4053-b321-3ab152bafeaa\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-t24nn" Dec 11 08:44:20 crc kubenswrapper[4881]: I1211 08:44:20.008261 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-t24nn" Dec 11 08:44:20 crc kubenswrapper[4881]: I1211 08:44:20.475289 4881 generic.go:334] "Generic (PLEG): container finished" podID="d52ebbc7-03f0-4f73-827b-8f8066e83146" containerID="6d3ddabb3dc74a0d0d8f900d4c5b0d1545dccbbb4986242ee6f13d27f36d6bb5" exitCode=0 Dec 11 08:44:20 crc kubenswrapper[4881]: I1211 08:44:20.475369 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"d52ebbc7-03f0-4f73-827b-8f8066e83146","Type":"ContainerDied","Data":"6d3ddabb3dc74a0d0d8f900d4c5b0d1545dccbbb4986242ee6f13d27f36d6bb5"} Dec 11 08:44:20 crc kubenswrapper[4881]: I1211 08:44:20.478011 4881 generic.go:334] "Generic (PLEG): container finished" podID="acc39512-3a6a-4e4c-a2a2-a13ad13b11f0" containerID="d7472def6224df6184e1faf5d9a10ab83871f4aa957b91b569c5ac6d1d8306d2" exitCode=0 Dec 11 08:44:20 crc kubenswrapper[4881]: I1211 08:44:20.478048 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"acc39512-3a6a-4e4c-a2a2-a13ad13b11f0","Type":"ContainerDied","Data":"d7472def6224df6184e1faf5d9a10ab83871f4aa957b91b569c5ac6d1d8306d2"} Dec 11 08:44:21 crc kubenswrapper[4881]: I1211 08:44:21.027435 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-t24nn"] Dec 11 08:44:21 crc kubenswrapper[4881]: I1211 08:44:21.495535 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"acc39512-3a6a-4e4c-a2a2-a13ad13b11f0","Type":"ContainerStarted","Data":"2eacd40543c332b1c44d640f43856a189cc30cb1fb9f414c6c994b73e18b4fcb"} Dec 11 08:44:21 crc kubenswrapper[4881]: I1211 08:44:21.495834 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-cell1-server-0" Dec 11 08:44:21 crc kubenswrapper[4881]: I1211 08:44:21.501648 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"d52ebbc7-03f0-4f73-827b-8f8066e83146","Type":"ContainerStarted","Data":"f1daf1c354aa8a3b9b4ac7a12083e21d6150b082bb6609151de0a1bc6e693aff"} Dec 11 08:44:21 crc kubenswrapper[4881]: I1211 08:44:21.501929 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-server-0" Dec 11 08:44:21 crc kubenswrapper[4881]: I1211 08:44:21.535252 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-cell1-server-0" podStartSLOduration=56.535207377 podStartE2EDuration="56.535207377s" podCreationTimestamp="2025-12-11 08:43:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 08:44:21.520013212 +0000 UTC m=+1709.897381939" watchObservedRunningTime="2025-12-11 08:44:21.535207377 +0000 UTC m=+1709.912576074" Dec 11 08:44:21 crc kubenswrapper[4881]: I1211 08:44:21.544583 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-server-0" podStartSLOduration=56.544567648 podStartE2EDuration="56.544567648s" podCreationTimestamp="2025-12-11 08:43:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 08:44:21.540886657 +0000 UTC m=+1709.918255354" watchObservedRunningTime="2025-12-11 08:44:21.544567648 +0000 UTC m=+1709.921936345" Dec 11 08:44:22 crc kubenswrapper[4881]: I1211 08:44:22.515473 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-t24nn" event={"ID":"cde02f48-eb61-4053-b321-3ab152bafeaa","Type":"ContainerStarted","Data":"60f077cd145b6cf5237e287313dd9da78a794d2bd2052b1c75a190599fd52a34"} Dec 11 08:44:23 crc kubenswrapper[4881]: I1211 08:44:23.997160 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/heat-api-77bc5fff7b-clttx" Dec 11 08:44:24 crc kubenswrapper[4881]: I1211 08:44:24.005092 4881 scope.go:117] "RemoveContainer" containerID="6d4e8d739554b5cd95df4279914b6fd3590e9c30b07a9063a641aef8fdb20ae9" Dec 11 08:44:24 crc kubenswrapper[4881]: E1211 08:44:24.005374 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 08:44:24 crc kubenswrapper[4881]: I1211 08:44:24.153043 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-api-b5858866f-srtbn"] Dec 11 08:44:24 crc kubenswrapper[4881]: I1211 08:44:24.153260 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/heat-api-b5858866f-srtbn" podUID="a466820d-9c8f-4aa5-8e39-485b6212a154" containerName="heat-api" containerID="cri-o://83689b425d023b54b62be213c3d9fa1dd9c5fc9184664aa22970bbd9dd3b05c0" gracePeriod=60 Dec 11 08:44:24 crc kubenswrapper[4881]: I1211 08:44:24.634916 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/heat-cfnapi-5f8f5d98d5-xmfmx" Dec 11 08:44:24 crc kubenswrapper[4881]: I1211 08:44:24.709768 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-cfnapi-579694df8d-vpn5m"] Dec 11 08:44:24 crc kubenswrapper[4881]: I1211 08:44:24.709998 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/heat-cfnapi-579694df8d-vpn5m" podUID="8bc9eb50-582d-468f-8286-f9a0d3c1def4" containerName="heat-cfnapi" containerID="cri-o://901ce1a433f29c0b30a0d9382e385ecab56a34197594778ae7cfdc2e92ed9955" gracePeriod=60 Dec 11 08:44:27 crc kubenswrapper[4881]: I1211 08:44:27.581468 4881 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/heat-api-b5858866f-srtbn" podUID="a466820d-9c8f-4aa5-8e39-485b6212a154" containerName="heat-api" probeResult="failure" output="Get \"https://10.217.0.212:8004/healthcheck\": read tcp 10.217.0.2:38372->10.217.0.212:8004: read: connection reset by peer" Dec 11 08:44:27 crc kubenswrapper[4881]: I1211 08:44:27.868379 4881 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/heat-cfnapi-579694df8d-vpn5m" podUID="8bc9eb50-582d-468f-8286-f9a0d3c1def4" containerName="heat-cfnapi" probeResult="failure" output="Get \"https://10.217.0.211:8000/healthcheck\": read tcp 10.217.0.2:44996->10.217.0.211:8000: read: connection reset by peer" Dec 11 08:44:28 crc kubenswrapper[4881]: I1211 08:44:28.455566 4881 patch_prober.go:28] interesting pod/route-controller-manager-95479fb4c-24hkl container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.71:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Dec 11 08:44:28 crc kubenswrapper[4881]: I1211 08:44:28.455961 4881 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-95479fb4c-24hkl" podUID="f318c6eb-08d3-4d22-9f89-d57404e0a8ad" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.71:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Dec 11 08:44:29 crc kubenswrapper[4881]: I1211 08:44:29.410435 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-579694df8d-vpn5m" Dec 11 08:44:29 crc kubenswrapper[4881]: I1211 08:44:29.551243 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/8bc9eb50-582d-468f-8286-f9a0d3c1def4-public-tls-certs\") pod \"8bc9eb50-582d-468f-8286-f9a0d3c1def4\" (UID: \"8bc9eb50-582d-468f-8286-f9a0d3c1def4\") " Dec 11 08:44:29 crc kubenswrapper[4881]: I1211 08:44:29.551740 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8bc9eb50-582d-468f-8286-f9a0d3c1def4-config-data\") pod \"8bc9eb50-582d-468f-8286-f9a0d3c1def4\" (UID: \"8bc9eb50-582d-468f-8286-f9a0d3c1def4\") " Dec 11 08:44:29 crc kubenswrapper[4881]: I1211 08:44:29.551921 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/8bc9eb50-582d-468f-8286-f9a0d3c1def4-config-data-custom\") pod \"8bc9eb50-582d-468f-8286-f9a0d3c1def4\" (UID: \"8bc9eb50-582d-468f-8286-f9a0d3c1def4\") " Dec 11 08:44:29 crc kubenswrapper[4881]: I1211 08:44:29.551964 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xv2xp\" (UniqueName: \"kubernetes.io/projected/8bc9eb50-582d-468f-8286-f9a0d3c1def4-kube-api-access-xv2xp\") pod \"8bc9eb50-582d-468f-8286-f9a0d3c1def4\" (UID: \"8bc9eb50-582d-468f-8286-f9a0d3c1def4\") " Dec 11 08:44:29 crc kubenswrapper[4881]: I1211 08:44:29.552015 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/8bc9eb50-582d-468f-8286-f9a0d3c1def4-internal-tls-certs\") pod \"8bc9eb50-582d-468f-8286-f9a0d3c1def4\" (UID: \"8bc9eb50-582d-468f-8286-f9a0d3c1def4\") " Dec 11 08:44:29 crc kubenswrapper[4881]: I1211 08:44:29.552045 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8bc9eb50-582d-468f-8286-f9a0d3c1def4-combined-ca-bundle\") pod \"8bc9eb50-582d-468f-8286-f9a0d3c1def4\" (UID: \"8bc9eb50-582d-468f-8286-f9a0d3c1def4\") " Dec 11 08:44:29 crc kubenswrapper[4881]: I1211 08:44:29.559642 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8bc9eb50-582d-468f-8286-f9a0d3c1def4-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "8bc9eb50-582d-468f-8286-f9a0d3c1def4" (UID: "8bc9eb50-582d-468f-8286-f9a0d3c1def4"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:44:29 crc kubenswrapper[4881]: I1211 08:44:29.563982 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8bc9eb50-582d-468f-8286-f9a0d3c1def4-kube-api-access-xv2xp" (OuterVolumeSpecName: "kube-api-access-xv2xp") pod "8bc9eb50-582d-468f-8286-f9a0d3c1def4" (UID: "8bc9eb50-582d-468f-8286-f9a0d3c1def4"). InnerVolumeSpecName "kube-api-access-xv2xp". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:44:29 crc kubenswrapper[4881]: I1211 08:44:29.580286 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-b5858866f-srtbn" Dec 11 08:44:29 crc kubenswrapper[4881]: I1211 08:44:29.592802 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8bc9eb50-582d-468f-8286-f9a0d3c1def4-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "8bc9eb50-582d-468f-8286-f9a0d3c1def4" (UID: "8bc9eb50-582d-468f-8286-f9a0d3c1def4"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:44:29 crc kubenswrapper[4881]: I1211 08:44:29.652864 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8bc9eb50-582d-468f-8286-f9a0d3c1def4-config-data" (OuterVolumeSpecName: "config-data") pod "8bc9eb50-582d-468f-8286-f9a0d3c1def4" (UID: "8bc9eb50-582d-468f-8286-f9a0d3c1def4"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:44:29 crc kubenswrapper[4881]: I1211 08:44:29.652968 4881 generic.go:334] "Generic (PLEG): container finished" podID="8bc9eb50-582d-468f-8286-f9a0d3c1def4" containerID="901ce1a433f29c0b30a0d9382e385ecab56a34197594778ae7cfdc2e92ed9955" exitCode=0 Dec 11 08:44:29 crc kubenswrapper[4881]: I1211 08:44:29.653115 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-579694df8d-vpn5m" Dec 11 08:44:29 crc kubenswrapper[4881]: I1211 08:44:29.654046 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-579694df8d-vpn5m" event={"ID":"8bc9eb50-582d-468f-8286-f9a0d3c1def4","Type":"ContainerDied","Data":"901ce1a433f29c0b30a0d9382e385ecab56a34197594778ae7cfdc2e92ed9955"} Dec 11 08:44:29 crc kubenswrapper[4881]: I1211 08:44:29.654087 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-579694df8d-vpn5m" event={"ID":"8bc9eb50-582d-468f-8286-f9a0d3c1def4","Type":"ContainerDied","Data":"c2a925a563afd8b45732a44ef64c5c31aac3671db53e2c458d5f1dea0e099ec5"} Dec 11 08:44:29 crc kubenswrapper[4881]: I1211 08:44:29.654110 4881 scope.go:117] "RemoveContainer" containerID="901ce1a433f29c0b30a0d9382e385ecab56a34197594778ae7cfdc2e92ed9955" Dec 11 08:44:29 crc kubenswrapper[4881]: I1211 08:44:29.654905 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/a466820d-9c8f-4aa5-8e39-485b6212a154-internal-tls-certs\") pod \"a466820d-9c8f-4aa5-8e39-485b6212a154\" (UID: \"a466820d-9c8f-4aa5-8e39-485b6212a154\") " Dec 11 08:44:29 crc kubenswrapper[4881]: I1211 08:44:29.654999 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/a466820d-9c8f-4aa5-8e39-485b6212a154-public-tls-certs\") pod \"a466820d-9c8f-4aa5-8e39-485b6212a154\" (UID: \"a466820d-9c8f-4aa5-8e39-485b6212a154\") " Dec 11 08:44:29 crc kubenswrapper[4881]: I1211 08:44:29.655132 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a466820d-9c8f-4aa5-8e39-485b6212a154-config-data\") pod \"a466820d-9c8f-4aa5-8e39-485b6212a154\" (UID: \"a466820d-9c8f-4aa5-8e39-485b6212a154\") " Dec 11 08:44:29 crc kubenswrapper[4881]: I1211 08:44:29.656443 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a466820d-9c8f-4aa5-8e39-485b6212a154-combined-ca-bundle\") pod \"a466820d-9c8f-4aa5-8e39-485b6212a154\" (UID: \"a466820d-9c8f-4aa5-8e39-485b6212a154\") " Dec 11 08:44:29 crc kubenswrapper[4881]: I1211 08:44:29.656595 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6gwhn\" (UniqueName: \"kubernetes.io/projected/a466820d-9c8f-4aa5-8e39-485b6212a154-kube-api-access-6gwhn\") pod \"a466820d-9c8f-4aa5-8e39-485b6212a154\" (UID: \"a466820d-9c8f-4aa5-8e39-485b6212a154\") " Dec 11 08:44:29 crc kubenswrapper[4881]: I1211 08:44:29.656645 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a466820d-9c8f-4aa5-8e39-485b6212a154-config-data-custom\") pod \"a466820d-9c8f-4aa5-8e39-485b6212a154\" (UID: \"a466820d-9c8f-4aa5-8e39-485b6212a154\") " Dec 11 08:44:29 crc kubenswrapper[4881]: I1211 08:44:29.658207 4881 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8bc9eb50-582d-468f-8286-f9a0d3c1def4-config-data\") on node \"crc\" DevicePath \"\"" Dec 11 08:44:29 crc kubenswrapper[4881]: I1211 08:44:29.658236 4881 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/8bc9eb50-582d-468f-8286-f9a0d3c1def4-config-data-custom\") on node \"crc\" DevicePath \"\"" Dec 11 08:44:29 crc kubenswrapper[4881]: I1211 08:44:29.658253 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xv2xp\" (UniqueName: \"kubernetes.io/projected/8bc9eb50-582d-468f-8286-f9a0d3c1def4-kube-api-access-xv2xp\") on node \"crc\" DevicePath \"\"" Dec 11 08:44:29 crc kubenswrapper[4881]: I1211 08:44:29.658267 4881 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8bc9eb50-582d-468f-8286-f9a0d3c1def4-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 11 08:44:29 crc kubenswrapper[4881]: I1211 08:44:29.659457 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8bc9eb50-582d-468f-8286-f9a0d3c1def4-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "8bc9eb50-582d-468f-8286-f9a0d3c1def4" (UID: "8bc9eb50-582d-468f-8286-f9a0d3c1def4"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:44:29 crc kubenswrapper[4881]: I1211 08:44:29.662449 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a466820d-9c8f-4aa5-8e39-485b6212a154-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "a466820d-9c8f-4aa5-8e39-485b6212a154" (UID: "a466820d-9c8f-4aa5-8e39-485b6212a154"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:44:29 crc kubenswrapper[4881]: I1211 08:44:29.663259 4881 generic.go:334] "Generic (PLEG): container finished" podID="a466820d-9c8f-4aa5-8e39-485b6212a154" containerID="83689b425d023b54b62be213c3d9fa1dd9c5fc9184664aa22970bbd9dd3b05c0" exitCode=0 Dec 11 08:44:29 crc kubenswrapper[4881]: I1211 08:44:29.663276 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a466820d-9c8f-4aa5-8e39-485b6212a154-kube-api-access-6gwhn" (OuterVolumeSpecName: "kube-api-access-6gwhn") pod "a466820d-9c8f-4aa5-8e39-485b6212a154" (UID: "a466820d-9c8f-4aa5-8e39-485b6212a154"). InnerVolumeSpecName "kube-api-access-6gwhn". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:44:29 crc kubenswrapper[4881]: I1211 08:44:29.663302 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-b5858866f-srtbn" event={"ID":"a466820d-9c8f-4aa5-8e39-485b6212a154","Type":"ContainerDied","Data":"83689b425d023b54b62be213c3d9fa1dd9c5fc9184664aa22970bbd9dd3b05c0"} Dec 11 08:44:29 crc kubenswrapper[4881]: I1211 08:44:29.663437 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-b5858866f-srtbn" Dec 11 08:44:29 crc kubenswrapper[4881]: I1211 08:44:29.663455 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-b5858866f-srtbn" event={"ID":"a466820d-9c8f-4aa5-8e39-485b6212a154","Type":"ContainerDied","Data":"8916d11191fdd602bde7332b6da597dddfe517a05db2f9e9152321b82a08085b"} Dec 11 08:44:29 crc kubenswrapper[4881]: I1211 08:44:29.692507 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8bc9eb50-582d-468f-8286-f9a0d3c1def4-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "8bc9eb50-582d-468f-8286-f9a0d3c1def4" (UID: "8bc9eb50-582d-468f-8286-f9a0d3c1def4"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:44:29 crc kubenswrapper[4881]: I1211 08:44:29.704004 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a466820d-9c8f-4aa5-8e39-485b6212a154-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a466820d-9c8f-4aa5-8e39-485b6212a154" (UID: "a466820d-9c8f-4aa5-8e39-485b6212a154"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:44:29 crc kubenswrapper[4881]: I1211 08:44:29.711519 4881 scope.go:117] "RemoveContainer" containerID="901ce1a433f29c0b30a0d9382e385ecab56a34197594778ae7cfdc2e92ed9955" Dec 11 08:44:29 crc kubenswrapper[4881]: E1211 08:44:29.711967 4881 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"901ce1a433f29c0b30a0d9382e385ecab56a34197594778ae7cfdc2e92ed9955\": container with ID starting with 901ce1a433f29c0b30a0d9382e385ecab56a34197594778ae7cfdc2e92ed9955 not found: ID does not exist" containerID="901ce1a433f29c0b30a0d9382e385ecab56a34197594778ae7cfdc2e92ed9955" Dec 11 08:44:29 crc kubenswrapper[4881]: I1211 08:44:29.712004 4881 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"901ce1a433f29c0b30a0d9382e385ecab56a34197594778ae7cfdc2e92ed9955"} err="failed to get container status \"901ce1a433f29c0b30a0d9382e385ecab56a34197594778ae7cfdc2e92ed9955\": rpc error: code = NotFound desc = could not find container \"901ce1a433f29c0b30a0d9382e385ecab56a34197594778ae7cfdc2e92ed9955\": container with ID starting with 901ce1a433f29c0b30a0d9382e385ecab56a34197594778ae7cfdc2e92ed9955 not found: ID does not exist" Dec 11 08:44:29 crc kubenswrapper[4881]: I1211 08:44:29.712031 4881 scope.go:117] "RemoveContainer" containerID="83689b425d023b54b62be213c3d9fa1dd9c5fc9184664aa22970bbd9dd3b05c0" Dec 11 08:44:29 crc kubenswrapper[4881]: I1211 08:44:29.726825 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a466820d-9c8f-4aa5-8e39-485b6212a154-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "a466820d-9c8f-4aa5-8e39-485b6212a154" (UID: "a466820d-9c8f-4aa5-8e39-485b6212a154"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:44:29 crc kubenswrapper[4881]: I1211 08:44:29.754697 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a466820d-9c8f-4aa5-8e39-485b6212a154-config-data" (OuterVolumeSpecName: "config-data") pod "a466820d-9c8f-4aa5-8e39-485b6212a154" (UID: "a466820d-9c8f-4aa5-8e39-485b6212a154"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:44:29 crc kubenswrapper[4881]: I1211 08:44:29.756126 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a466820d-9c8f-4aa5-8e39-485b6212a154-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "a466820d-9c8f-4aa5-8e39-485b6212a154" (UID: "a466820d-9c8f-4aa5-8e39-485b6212a154"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:44:29 crc kubenswrapper[4881]: I1211 08:44:29.761317 4881 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/8bc9eb50-582d-468f-8286-f9a0d3c1def4-public-tls-certs\") on node \"crc\" DevicePath \"\"" Dec 11 08:44:29 crc kubenswrapper[4881]: I1211 08:44:29.761371 4881 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a466820d-9c8f-4aa5-8e39-485b6212a154-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 11 08:44:29 crc kubenswrapper[4881]: I1211 08:44:29.761387 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6gwhn\" (UniqueName: \"kubernetes.io/projected/a466820d-9c8f-4aa5-8e39-485b6212a154-kube-api-access-6gwhn\") on node \"crc\" DevicePath \"\"" Dec 11 08:44:29 crc kubenswrapper[4881]: I1211 08:44:29.761403 4881 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a466820d-9c8f-4aa5-8e39-485b6212a154-config-data-custom\") on node \"crc\" DevicePath \"\"" Dec 11 08:44:29 crc kubenswrapper[4881]: I1211 08:44:29.761415 4881 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/a466820d-9c8f-4aa5-8e39-485b6212a154-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Dec 11 08:44:29 crc kubenswrapper[4881]: I1211 08:44:29.761427 4881 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/8bc9eb50-582d-468f-8286-f9a0d3c1def4-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Dec 11 08:44:29 crc kubenswrapper[4881]: I1211 08:44:29.761439 4881 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/a466820d-9c8f-4aa5-8e39-485b6212a154-public-tls-certs\") on node \"crc\" DevicePath \"\"" Dec 11 08:44:29 crc kubenswrapper[4881]: I1211 08:44:29.761457 4881 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a466820d-9c8f-4aa5-8e39-485b6212a154-config-data\") on node \"crc\" DevicePath \"\"" Dec 11 08:44:29 crc kubenswrapper[4881]: I1211 08:44:29.786101 4881 scope.go:117] "RemoveContainer" containerID="83689b425d023b54b62be213c3d9fa1dd9c5fc9184664aa22970bbd9dd3b05c0" Dec 11 08:44:29 crc kubenswrapper[4881]: E1211 08:44:29.786525 4881 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"83689b425d023b54b62be213c3d9fa1dd9c5fc9184664aa22970bbd9dd3b05c0\": container with ID starting with 83689b425d023b54b62be213c3d9fa1dd9c5fc9184664aa22970bbd9dd3b05c0 not found: ID does not exist" containerID="83689b425d023b54b62be213c3d9fa1dd9c5fc9184664aa22970bbd9dd3b05c0" Dec 11 08:44:29 crc kubenswrapper[4881]: I1211 08:44:29.786553 4881 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"83689b425d023b54b62be213c3d9fa1dd9c5fc9184664aa22970bbd9dd3b05c0"} err="failed to get container status \"83689b425d023b54b62be213c3d9fa1dd9c5fc9184664aa22970bbd9dd3b05c0\": rpc error: code = NotFound desc = could not find container \"83689b425d023b54b62be213c3d9fa1dd9c5fc9184664aa22970bbd9dd3b05c0\": container with ID starting with 83689b425d023b54b62be213c3d9fa1dd9c5fc9184664aa22970bbd9dd3b05c0 not found: ID does not exist" Dec 11 08:44:30 crc kubenswrapper[4881]: I1211 08:44:30.053417 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-cfnapi-579694df8d-vpn5m"] Dec 11 08:44:30 crc kubenswrapper[4881]: I1211 08:44:30.074753 4881 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-cfnapi-579694df8d-vpn5m"] Dec 11 08:44:30 crc kubenswrapper[4881]: I1211 08:44:30.094925 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-api-b5858866f-srtbn"] Dec 11 08:44:30 crc kubenswrapper[4881]: I1211 08:44:30.108866 4881 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-api-b5858866f-srtbn"] Dec 11 08:44:30 crc kubenswrapper[4881]: I1211 08:44:30.652074 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/heat-engine-559447f984-4bfr5" Dec 11 08:44:30 crc kubenswrapper[4881]: I1211 08:44:30.729555 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-engine-d56bd9469-fm4xb"] Dec 11 08:44:30 crc kubenswrapper[4881]: I1211 08:44:30.729829 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/heat-engine-d56bd9469-fm4xb" podUID="8f8f4fc0-e759-433b-835a-f2c0db79850f" containerName="heat-engine" containerID="cri-o://9702f55e0be53bfb061d47124237f59ade3b5b78113b501d7ba47d3f8fac1afa" gracePeriod=60 Dec 11 08:44:31 crc kubenswrapper[4881]: I1211 08:44:31.024703 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8bc9eb50-582d-468f-8286-f9a0d3c1def4" path="/var/lib/kubelet/pods/8bc9eb50-582d-468f-8286-f9a0d3c1def4/volumes" Dec 11 08:44:31 crc kubenswrapper[4881]: I1211 08:44:31.025498 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a466820d-9c8f-4aa5-8e39-485b6212a154" path="/var/lib/kubelet/pods/a466820d-9c8f-4aa5-8e39-485b6212a154/volumes" Dec 11 08:44:33 crc kubenswrapper[4881]: E1211 08:44:33.714811 4881 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="9702f55e0be53bfb061d47124237f59ade3b5b78113b501d7ba47d3f8fac1afa" cmd=["/usr/bin/pgrep","-r","DRST","heat-engine"] Dec 11 08:44:33 crc kubenswrapper[4881]: E1211 08:44:33.716754 4881 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="9702f55e0be53bfb061d47124237f59ade3b5b78113b501d7ba47d3f8fac1afa" cmd=["/usr/bin/pgrep","-r","DRST","heat-engine"] Dec 11 08:44:33 crc kubenswrapper[4881]: E1211 08:44:33.718486 4881 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="9702f55e0be53bfb061d47124237f59ade3b5b78113b501d7ba47d3f8fac1afa" cmd=["/usr/bin/pgrep","-r","DRST","heat-engine"] Dec 11 08:44:33 crc kubenswrapper[4881]: E1211 08:44:33.718531 4881 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/heat-engine-d56bd9469-fm4xb" podUID="8f8f4fc0-e759-433b-835a-f2c0db79850f" containerName="heat-engine" Dec 11 08:44:34 crc kubenswrapper[4881]: I1211 08:44:34.259549 4881 patch_prober.go:28] interesting pod/metrics-server-567ff9c44d-zvlcx container/metrics-server namespace/openshift-monitoring: Liveness probe status=failure output="Get \"https://10.217.0.78:10250/livez\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" start-of-body= Dec 11 08:44:34 crc kubenswrapper[4881]: I1211 08:44:34.259634 4881 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-monitoring/metrics-server-567ff9c44d-zvlcx" podUID="e33cf336-f1a0-4217-a606-c12ebf877533" containerName="metrics-server" probeResult="failure" output="Get \"https://10.217.0.78:10250/livez\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Dec 11 08:44:35 crc kubenswrapper[4881]: I1211 08:44:35.763039 4881 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-cell1-server-0" podUID="acc39512-3a6a-4e4c-a2a2-a13ad13b11f0" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.1.6:5671: connect: connection refused" Dec 11 08:44:35 crc kubenswrapper[4881]: I1211 08:44:35.778738 4881 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-server-0" podUID="d52ebbc7-03f0-4f73-827b-8f8066e83146" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.1.7:5671: connect: connection refused" Dec 11 08:44:39 crc kubenswrapper[4881]: I1211 08:44:39.012074 4881 scope.go:117] "RemoveContainer" containerID="6d4e8d739554b5cd95df4279914b6fd3590e9c30b07a9063a641aef8fdb20ae9" Dec 11 08:44:39 crc kubenswrapper[4881]: E1211 08:44:39.012949 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 08:44:39 crc kubenswrapper[4881]: I1211 08:44:39.787960 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-t24nn" event={"ID":"cde02f48-eb61-4053-b321-3ab152bafeaa","Type":"ContainerStarted","Data":"2350f687aa602ba453abd334d6e2b3be914e9c1d6715005bdd2aecdd5abb8a09"} Dec 11 08:44:39 crc kubenswrapper[4881]: I1211 08:44:39.809178 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-t24nn" podStartSLOduration=3.59572002 podStartE2EDuration="20.809163218s" podCreationTimestamp="2025-12-11 08:44:19 +0000 UTC" firstStartedPulling="2025-12-11 08:44:21.77438931 +0000 UTC m=+1710.151758007" lastFinishedPulling="2025-12-11 08:44:38.987832508 +0000 UTC m=+1727.365201205" observedRunningTime="2025-12-11 08:44:39.807427366 +0000 UTC m=+1728.184796063" watchObservedRunningTime="2025-12-11 08:44:39.809163218 +0000 UTC m=+1728.186531915" Dec 11 08:44:41 crc kubenswrapper[4881]: I1211 08:44:41.222456 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Dec 11 08:44:42 crc kubenswrapper[4881]: I1211 08:44:42.103115 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/aodh-db-sync-tt4br"] Dec 11 08:44:42 crc kubenswrapper[4881]: I1211 08:44:42.120573 4881 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/aodh-db-sync-tt4br"] Dec 11 08:44:42 crc kubenswrapper[4881]: I1211 08:44:42.214259 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/aodh-db-sync-mn4r5"] Dec 11 08:44:42 crc kubenswrapper[4881]: E1211 08:44:42.214889 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a466820d-9c8f-4aa5-8e39-485b6212a154" containerName="heat-api" Dec 11 08:44:42 crc kubenswrapper[4881]: I1211 08:44:42.214916 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="a466820d-9c8f-4aa5-8e39-485b6212a154" containerName="heat-api" Dec 11 08:44:42 crc kubenswrapper[4881]: E1211 08:44:42.214949 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8bc9eb50-582d-468f-8286-f9a0d3c1def4" containerName="heat-cfnapi" Dec 11 08:44:42 crc kubenswrapper[4881]: I1211 08:44:42.214959 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="8bc9eb50-582d-468f-8286-f9a0d3c1def4" containerName="heat-cfnapi" Dec 11 08:44:42 crc kubenswrapper[4881]: I1211 08:44:42.215263 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="a466820d-9c8f-4aa5-8e39-485b6212a154" containerName="heat-api" Dec 11 08:44:42 crc kubenswrapper[4881]: I1211 08:44:42.215313 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="8bc9eb50-582d-468f-8286-f9a0d3c1def4" containerName="heat-cfnapi" Dec 11 08:44:42 crc kubenswrapper[4881]: I1211 08:44:42.216348 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-sync-mn4r5" Dec 11 08:44:42 crc kubenswrapper[4881]: I1211 08:44:42.226788 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-db-sync-mn4r5"] Dec 11 08:44:42 crc kubenswrapper[4881]: I1211 08:44:42.370420 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vzzd6\" (UniqueName: \"kubernetes.io/projected/cb0fb1c6-fd5d-4334-9584-efb1c5c75d05-kube-api-access-vzzd6\") pod \"aodh-db-sync-mn4r5\" (UID: \"cb0fb1c6-fd5d-4334-9584-efb1c5c75d05\") " pod="openstack/aodh-db-sync-mn4r5" Dec 11 08:44:42 crc kubenswrapper[4881]: I1211 08:44:42.370683 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cb0fb1c6-fd5d-4334-9584-efb1c5c75d05-config-data\") pod \"aodh-db-sync-mn4r5\" (UID: \"cb0fb1c6-fd5d-4334-9584-efb1c5c75d05\") " pod="openstack/aodh-db-sync-mn4r5" Dec 11 08:44:42 crc kubenswrapper[4881]: I1211 08:44:42.370832 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cb0fb1c6-fd5d-4334-9584-efb1c5c75d05-scripts\") pod \"aodh-db-sync-mn4r5\" (UID: \"cb0fb1c6-fd5d-4334-9584-efb1c5c75d05\") " pod="openstack/aodh-db-sync-mn4r5" Dec 11 08:44:42 crc kubenswrapper[4881]: I1211 08:44:42.371364 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cb0fb1c6-fd5d-4334-9584-efb1c5c75d05-combined-ca-bundle\") pod \"aodh-db-sync-mn4r5\" (UID: \"cb0fb1c6-fd5d-4334-9584-efb1c5c75d05\") " pod="openstack/aodh-db-sync-mn4r5" Dec 11 08:44:42 crc kubenswrapper[4881]: I1211 08:44:42.473870 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cb0fb1c6-fd5d-4334-9584-efb1c5c75d05-combined-ca-bundle\") pod \"aodh-db-sync-mn4r5\" (UID: \"cb0fb1c6-fd5d-4334-9584-efb1c5c75d05\") " pod="openstack/aodh-db-sync-mn4r5" Dec 11 08:44:42 crc kubenswrapper[4881]: I1211 08:44:42.473958 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vzzd6\" (UniqueName: \"kubernetes.io/projected/cb0fb1c6-fd5d-4334-9584-efb1c5c75d05-kube-api-access-vzzd6\") pod \"aodh-db-sync-mn4r5\" (UID: \"cb0fb1c6-fd5d-4334-9584-efb1c5c75d05\") " pod="openstack/aodh-db-sync-mn4r5" Dec 11 08:44:42 crc kubenswrapper[4881]: I1211 08:44:42.474049 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cb0fb1c6-fd5d-4334-9584-efb1c5c75d05-config-data\") pod \"aodh-db-sync-mn4r5\" (UID: \"cb0fb1c6-fd5d-4334-9584-efb1c5c75d05\") " pod="openstack/aodh-db-sync-mn4r5" Dec 11 08:44:42 crc kubenswrapper[4881]: I1211 08:44:42.474111 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cb0fb1c6-fd5d-4334-9584-efb1c5c75d05-scripts\") pod \"aodh-db-sync-mn4r5\" (UID: \"cb0fb1c6-fd5d-4334-9584-efb1c5c75d05\") " pod="openstack/aodh-db-sync-mn4r5" Dec 11 08:44:42 crc kubenswrapper[4881]: I1211 08:44:42.487429 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cb0fb1c6-fd5d-4334-9584-efb1c5c75d05-config-data\") pod \"aodh-db-sync-mn4r5\" (UID: \"cb0fb1c6-fd5d-4334-9584-efb1c5c75d05\") " pod="openstack/aodh-db-sync-mn4r5" Dec 11 08:44:42 crc kubenswrapper[4881]: I1211 08:44:42.488944 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cb0fb1c6-fd5d-4334-9584-efb1c5c75d05-combined-ca-bundle\") pod \"aodh-db-sync-mn4r5\" (UID: \"cb0fb1c6-fd5d-4334-9584-efb1c5c75d05\") " pod="openstack/aodh-db-sync-mn4r5" Dec 11 08:44:42 crc kubenswrapper[4881]: I1211 08:44:42.491041 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cb0fb1c6-fd5d-4334-9584-efb1c5c75d05-scripts\") pod \"aodh-db-sync-mn4r5\" (UID: \"cb0fb1c6-fd5d-4334-9584-efb1c5c75d05\") " pod="openstack/aodh-db-sync-mn4r5" Dec 11 08:44:42 crc kubenswrapper[4881]: I1211 08:44:42.518028 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vzzd6\" (UniqueName: \"kubernetes.io/projected/cb0fb1c6-fd5d-4334-9584-efb1c5c75d05-kube-api-access-vzzd6\") pod \"aodh-db-sync-mn4r5\" (UID: \"cb0fb1c6-fd5d-4334-9584-efb1c5c75d05\") " pod="openstack/aodh-db-sync-mn4r5" Dec 11 08:44:42 crc kubenswrapper[4881]: I1211 08:44:42.533951 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-sync-mn4r5" Dec 11 08:44:43 crc kubenswrapper[4881]: I1211 08:44:43.020603 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="67c4b27a-5352-4006-9944-6de8dc05d3d1" path="/var/lib/kubelet/pods/67c4b27a-5352-4006-9944-6de8dc05d3d1/volumes" Dec 11 08:44:43 crc kubenswrapper[4881]: E1211 08:44:43.715871 4881 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="9702f55e0be53bfb061d47124237f59ade3b5b78113b501d7ba47d3f8fac1afa" cmd=["/usr/bin/pgrep","-r","DRST","heat-engine"] Dec 11 08:44:43 crc kubenswrapper[4881]: E1211 08:44:43.718625 4881 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="9702f55e0be53bfb061d47124237f59ade3b5b78113b501d7ba47d3f8fac1afa" cmd=["/usr/bin/pgrep","-r","DRST","heat-engine"] Dec 11 08:44:43 crc kubenswrapper[4881]: E1211 08:44:43.720998 4881 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="9702f55e0be53bfb061d47124237f59ade3b5b78113b501d7ba47d3f8fac1afa" cmd=["/usr/bin/pgrep","-r","DRST","heat-engine"] Dec 11 08:44:43 crc kubenswrapper[4881]: E1211 08:44:43.721094 4881 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/heat-engine-d56bd9469-fm4xb" podUID="8f8f4fc0-e759-433b-835a-f2c0db79850f" containerName="heat-engine" Dec 11 08:44:43 crc kubenswrapper[4881]: I1211 08:44:43.732157 4881 scope.go:117] "RemoveContainer" containerID="1caf79a505830b54c1a9c58bcd9a0322448f88ad809e3ede5d5aefad2293f254" Dec 11 08:44:46 crc kubenswrapper[4881]: I1211 08:44:46.826464 4881 patch_prober.go:28] interesting pod/oauth-openshift-846dc6fc5d-rv7gt container/oauth-openshift namespace/openshift-authentication: Readiness probe status=failure output="Get \"https://10.217.0.62:6443/healthz\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" start-of-body= Dec 11 08:44:46 crc kubenswrapper[4881]: I1211 08:44:46.827095 4881 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-authentication/oauth-openshift-846dc6fc5d-rv7gt" podUID="c3868292-0936-4979-bd1f-c9406decb7a8" containerName="oauth-openshift" probeResult="failure" output="Get \"https://10.217.0.62:6443/healthz\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Dec 11 08:44:50 crc kubenswrapper[4881]: E1211 08:44:50.419306 4881 kubelet.go:2526] "Housekeeping took longer than expected" err="housekeeping took too long" expected="1s" actual="7.414s" Dec 11 08:44:50 crc kubenswrapper[4881]: I1211 08:44:50.419977 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-cell1-server-0" Dec 11 08:44:50 crc kubenswrapper[4881]: I1211 08:44:50.420011 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-server-0" Dec 11 08:44:50 crc kubenswrapper[4881]: I1211 08:44:50.648120 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-db-sync-mn4r5"] Dec 11 08:44:53 crc kubenswrapper[4881]: E1211 08:44:53.715132 4881 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="9702f55e0be53bfb061d47124237f59ade3b5b78113b501d7ba47d3f8fac1afa" cmd=["/usr/bin/pgrep","-r","DRST","heat-engine"] Dec 11 08:44:53 crc kubenswrapper[4881]: E1211 08:44:53.716620 4881 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="9702f55e0be53bfb061d47124237f59ade3b5b78113b501d7ba47d3f8fac1afa" cmd=["/usr/bin/pgrep","-r","DRST","heat-engine"] Dec 11 08:44:53 crc kubenswrapper[4881]: E1211 08:44:53.717648 4881 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="9702f55e0be53bfb061d47124237f59ade3b5b78113b501d7ba47d3f8fac1afa" cmd=["/usr/bin/pgrep","-r","DRST","heat-engine"] Dec 11 08:44:53 crc kubenswrapper[4881]: E1211 08:44:53.717686 4881 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/heat-engine-d56bd9469-fm4xb" podUID="8f8f4fc0-e759-433b-835a-f2c0db79850f" containerName="heat-engine" Dec 11 08:44:54 crc kubenswrapper[4881]: I1211 08:44:54.008241 4881 scope.go:117] "RemoveContainer" containerID="6d4e8d739554b5cd95df4279914b6fd3590e9c30b07a9063a641aef8fdb20ae9" Dec 11 08:44:54 crc kubenswrapper[4881]: E1211 08:44:54.008557 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 08:44:57 crc kubenswrapper[4881]: I1211 08:44:57.014682 4881 generic.go:334] "Generic (PLEG): container finished" podID="cde02f48-eb61-4053-b321-3ab152bafeaa" containerID="2350f687aa602ba453abd334d6e2b3be914e9c1d6715005bdd2aecdd5abb8a09" exitCode=0 Dec 11 08:44:57 crc kubenswrapper[4881]: I1211 08:44:57.026657 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-t24nn" event={"ID":"cde02f48-eb61-4053-b321-3ab152bafeaa","Type":"ContainerDied","Data":"2350f687aa602ba453abd334d6e2b3be914e9c1d6715005bdd2aecdd5abb8a09"} Dec 11 08:44:59 crc kubenswrapper[4881]: I1211 08:44:59.043509 4881 generic.go:334] "Generic (PLEG): container finished" podID="8f8f4fc0-e759-433b-835a-f2c0db79850f" containerID="9702f55e0be53bfb061d47124237f59ade3b5b78113b501d7ba47d3f8fac1afa" exitCode=0 Dec 11 08:44:59 crc kubenswrapper[4881]: I1211 08:44:59.159509 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-engine-d56bd9469-fm4xb" event={"ID":"8f8f4fc0-e759-433b-835a-f2c0db79850f","Type":"ContainerDied","Data":"9702f55e0be53bfb061d47124237f59ade3b5b78113b501d7ba47d3f8fac1afa"} Dec 11 08:45:02 crc kubenswrapper[4881]: I1211 08:45:02.228613 4881 patch_prober.go:28] interesting pod/perses-operator-5446b9c989-clg8t container/perses-operator namespace/openshift-operators: Readiness probe status=failure output="Get \"http://10.217.0.12:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Dec 11 08:45:02 crc kubenswrapper[4881]: I1211 08:45:02.229320 4881 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operators/perses-operator-5446b9c989-clg8t" podUID="8df2f7b3-931a-4e09-b473-f71d8ee210d8" containerName="perses-operator" probeResult="failure" output="Get \"http://10.217.0.12:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Dec 11 08:45:03 crc kubenswrapper[4881]: I1211 08:45:03.279100 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29424045-r9ck7"] Dec 11 08:45:03 crc kubenswrapper[4881]: I1211 08:45:03.281152 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29424045-r9ck7" Dec 11 08:45:03 crc kubenswrapper[4881]: I1211 08:45:03.283939 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Dec 11 08:45:03 crc kubenswrapper[4881]: I1211 08:45:03.284469 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Dec 11 08:45:03 crc kubenswrapper[4881]: I1211 08:45:03.308393 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29424045-r9ck7"] Dec 11 08:45:03 crc kubenswrapper[4881]: I1211 08:45:03.435004 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/79fedc84-0e64-4f6f-82a1-ce37cf2e9304-secret-volume\") pod \"collect-profiles-29424045-r9ck7\" (UID: \"79fedc84-0e64-4f6f-82a1-ce37cf2e9304\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29424045-r9ck7" Dec 11 08:45:03 crc kubenswrapper[4881]: I1211 08:45:03.435053 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g48r4\" (UniqueName: \"kubernetes.io/projected/79fedc84-0e64-4f6f-82a1-ce37cf2e9304-kube-api-access-g48r4\") pod \"collect-profiles-29424045-r9ck7\" (UID: \"79fedc84-0e64-4f6f-82a1-ce37cf2e9304\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29424045-r9ck7" Dec 11 08:45:03 crc kubenswrapper[4881]: I1211 08:45:03.435075 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/79fedc84-0e64-4f6f-82a1-ce37cf2e9304-config-volume\") pod \"collect-profiles-29424045-r9ck7\" (UID: \"79fedc84-0e64-4f6f-82a1-ce37cf2e9304\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29424045-r9ck7" Dec 11 08:45:03 crc kubenswrapper[4881]: I1211 08:45:03.537113 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/79fedc84-0e64-4f6f-82a1-ce37cf2e9304-secret-volume\") pod \"collect-profiles-29424045-r9ck7\" (UID: \"79fedc84-0e64-4f6f-82a1-ce37cf2e9304\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29424045-r9ck7" Dec 11 08:45:03 crc kubenswrapper[4881]: I1211 08:45:03.537484 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g48r4\" (UniqueName: \"kubernetes.io/projected/79fedc84-0e64-4f6f-82a1-ce37cf2e9304-kube-api-access-g48r4\") pod \"collect-profiles-29424045-r9ck7\" (UID: \"79fedc84-0e64-4f6f-82a1-ce37cf2e9304\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29424045-r9ck7" Dec 11 08:45:03 crc kubenswrapper[4881]: I1211 08:45:03.537600 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/79fedc84-0e64-4f6f-82a1-ce37cf2e9304-config-volume\") pod \"collect-profiles-29424045-r9ck7\" (UID: \"79fedc84-0e64-4f6f-82a1-ce37cf2e9304\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29424045-r9ck7" Dec 11 08:45:03 crc kubenswrapper[4881]: I1211 08:45:03.538817 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/79fedc84-0e64-4f6f-82a1-ce37cf2e9304-config-volume\") pod \"collect-profiles-29424045-r9ck7\" (UID: \"79fedc84-0e64-4f6f-82a1-ce37cf2e9304\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29424045-r9ck7" Dec 11 08:45:03 crc kubenswrapper[4881]: I1211 08:45:03.550355 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/79fedc84-0e64-4f6f-82a1-ce37cf2e9304-secret-volume\") pod \"collect-profiles-29424045-r9ck7\" (UID: \"79fedc84-0e64-4f6f-82a1-ce37cf2e9304\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29424045-r9ck7" Dec 11 08:45:03 crc kubenswrapper[4881]: I1211 08:45:03.558193 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g48r4\" (UniqueName: \"kubernetes.io/projected/79fedc84-0e64-4f6f-82a1-ce37cf2e9304-kube-api-access-g48r4\") pod \"collect-profiles-29424045-r9ck7\" (UID: \"79fedc84-0e64-4f6f-82a1-ce37cf2e9304\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29424045-r9ck7" Dec 11 08:45:03 crc kubenswrapper[4881]: I1211 08:45:03.619094 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29424045-r9ck7" Dec 11 08:45:03 crc kubenswrapper[4881]: E1211 08:45:03.714238 4881 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 9702f55e0be53bfb061d47124237f59ade3b5b78113b501d7ba47d3f8fac1afa is running failed: container process not found" containerID="9702f55e0be53bfb061d47124237f59ade3b5b78113b501d7ba47d3f8fac1afa" cmd=["/usr/bin/pgrep","-r","DRST","heat-engine"] Dec 11 08:45:03 crc kubenswrapper[4881]: E1211 08:45:03.714900 4881 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 9702f55e0be53bfb061d47124237f59ade3b5b78113b501d7ba47d3f8fac1afa is running failed: container process not found" containerID="9702f55e0be53bfb061d47124237f59ade3b5b78113b501d7ba47d3f8fac1afa" cmd=["/usr/bin/pgrep","-r","DRST","heat-engine"] Dec 11 08:45:03 crc kubenswrapper[4881]: E1211 08:45:03.715388 4881 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 9702f55e0be53bfb061d47124237f59ade3b5b78113b501d7ba47d3f8fac1afa is running failed: container process not found" containerID="9702f55e0be53bfb061d47124237f59ade3b5b78113b501d7ba47d3f8fac1afa" cmd=["/usr/bin/pgrep","-r","DRST","heat-engine"] Dec 11 08:45:03 crc kubenswrapper[4881]: E1211 08:45:03.715432 4881 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 9702f55e0be53bfb061d47124237f59ade3b5b78113b501d7ba47d3f8fac1afa is running failed: container process not found" probeType="Readiness" pod="openstack/heat-engine-d56bd9469-fm4xb" podUID="8f8f4fc0-e759-433b-835a-f2c0db79850f" containerName="heat-engine" Dec 11 08:45:05 crc kubenswrapper[4881]: I1211 08:45:05.711270 4881 scope.go:117] "RemoveContainer" containerID="fad51349caab58607ba58815c5f2340b3cd292421c0d6836f33b9f343fcadef1" Dec 11 08:45:05 crc kubenswrapper[4881]: I1211 08:45:05.923974 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-t24nn" Dec 11 08:45:06 crc kubenswrapper[4881]: I1211 08:45:06.008878 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cde02f48-eb61-4053-b321-3ab152bafeaa-repo-setup-combined-ca-bundle\") pod \"cde02f48-eb61-4053-b321-3ab152bafeaa\" (UID: \"cde02f48-eb61-4053-b321-3ab152bafeaa\") " Dec 11 08:45:06 crc kubenswrapper[4881]: I1211 08:45:06.008985 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/cde02f48-eb61-4053-b321-3ab152bafeaa-inventory\") pod \"cde02f48-eb61-4053-b321-3ab152bafeaa\" (UID: \"cde02f48-eb61-4053-b321-3ab152bafeaa\") " Dec 11 08:45:06 crc kubenswrapper[4881]: I1211 08:45:06.009099 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4nxnf\" (UniqueName: \"kubernetes.io/projected/cde02f48-eb61-4053-b321-3ab152bafeaa-kube-api-access-4nxnf\") pod \"cde02f48-eb61-4053-b321-3ab152bafeaa\" (UID: \"cde02f48-eb61-4053-b321-3ab152bafeaa\") " Dec 11 08:45:06 crc kubenswrapper[4881]: I1211 08:45:06.009134 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/cde02f48-eb61-4053-b321-3ab152bafeaa-ssh-key\") pod \"cde02f48-eb61-4053-b321-3ab152bafeaa\" (UID: \"cde02f48-eb61-4053-b321-3ab152bafeaa\") " Dec 11 08:45:06 crc kubenswrapper[4881]: I1211 08:45:06.015437 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cde02f48-eb61-4053-b321-3ab152bafeaa-kube-api-access-4nxnf" (OuterVolumeSpecName: "kube-api-access-4nxnf") pod "cde02f48-eb61-4053-b321-3ab152bafeaa" (UID: "cde02f48-eb61-4053-b321-3ab152bafeaa"). InnerVolumeSpecName "kube-api-access-4nxnf". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:45:06 crc kubenswrapper[4881]: I1211 08:45:06.036765 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cde02f48-eb61-4053-b321-3ab152bafeaa-repo-setup-combined-ca-bundle" (OuterVolumeSpecName: "repo-setup-combined-ca-bundle") pod "cde02f48-eb61-4053-b321-3ab152bafeaa" (UID: "cde02f48-eb61-4053-b321-3ab152bafeaa"). InnerVolumeSpecName "repo-setup-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:45:06 crc kubenswrapper[4881]: I1211 08:45:06.112863 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4nxnf\" (UniqueName: \"kubernetes.io/projected/cde02f48-eb61-4053-b321-3ab152bafeaa-kube-api-access-4nxnf\") on node \"crc\" DevicePath \"\"" Dec 11 08:45:06 crc kubenswrapper[4881]: I1211 08:45:06.114037 4881 reconciler_common.go:293] "Volume detached for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cde02f48-eb61-4053-b321-3ab152bafeaa-repo-setup-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 11 08:45:06 crc kubenswrapper[4881]: I1211 08:45:06.127034 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-t24nn" event={"ID":"cde02f48-eb61-4053-b321-3ab152bafeaa","Type":"ContainerDied","Data":"60f077cd145b6cf5237e287313dd9da78a794d2bd2052b1c75a190599fd52a34"} Dec 11 08:45:06 crc kubenswrapper[4881]: I1211 08:45:06.127491 4881 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="60f077cd145b6cf5237e287313dd9da78a794d2bd2052b1c75a190599fd52a34" Dec 11 08:45:06 crc kubenswrapper[4881]: I1211 08:45:06.127051 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-t24nn" Dec 11 08:45:06 crc kubenswrapper[4881]: I1211 08:45:06.128953 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-sync-mn4r5" event={"ID":"cb0fb1c6-fd5d-4334-9584-efb1c5c75d05","Type":"ContainerStarted","Data":"092c2b3388bd39e3964463faf140e9bf9f5c3433efe10f39c513ebb05264b02c"} Dec 11 08:45:06 crc kubenswrapper[4881]: I1211 08:45:06.155657 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cde02f48-eb61-4053-b321-3ab152bafeaa-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "cde02f48-eb61-4053-b321-3ab152bafeaa" (UID: "cde02f48-eb61-4053-b321-3ab152bafeaa"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:45:06 crc kubenswrapper[4881]: I1211 08:45:06.168960 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cde02f48-eb61-4053-b321-3ab152bafeaa-inventory" (OuterVolumeSpecName: "inventory") pod "cde02f48-eb61-4053-b321-3ab152bafeaa" (UID: "cde02f48-eb61-4053-b321-3ab152bafeaa"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:45:06 crc kubenswrapper[4881]: I1211 08:45:06.188888 4881 scope.go:117] "RemoveContainer" containerID="040473c11cc342fb095cdc5e6d4af3dd423c70da54ff938477dab4b9b33715a3" Dec 11 08:45:06 crc kubenswrapper[4881]: I1211 08:45:06.216365 4881 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/cde02f48-eb61-4053-b321-3ab152bafeaa-inventory\") on node \"crc\" DevicePath \"\"" Dec 11 08:45:06 crc kubenswrapper[4881]: I1211 08:45:06.216400 4881 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/cde02f48-eb61-4053-b321-3ab152bafeaa-ssh-key\") on node \"crc\" DevicePath \"\"" Dec 11 08:45:06 crc kubenswrapper[4881]: I1211 08:45:06.223973 4881 scope.go:117] "RemoveContainer" containerID="bc26391021fb324f31517bbc60d528d2c90eb437b191ea229c4394e736cb60ba" Dec 11 08:45:06 crc kubenswrapper[4881]: I1211 08:45:06.286510 4881 scope.go:117] "RemoveContainer" containerID="d01bab017020799c07f038c20a04ac7e7800ea0cb58b7a1e1a1d0d58303d7d10" Dec 11 08:45:06 crc kubenswrapper[4881]: I1211 08:45:06.317465 4881 scope.go:117] "RemoveContainer" containerID="9c1e8aa9aea1cf14c00df462fc461900deea0c524c22d4f6fe990e20562dd413" Dec 11 08:45:06 crc kubenswrapper[4881]: I1211 08:45:06.563325 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29424045-r9ck7"] Dec 11 08:45:06 crc kubenswrapper[4881]: I1211 08:45:06.697562 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-engine-d56bd9469-fm4xb" Dec 11 08:45:06 crc kubenswrapper[4881]: I1211 08:45:06.729246 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/8f8f4fc0-e759-433b-835a-f2c0db79850f-config-data-custom\") pod \"8f8f4fc0-e759-433b-835a-f2c0db79850f\" (UID: \"8f8f4fc0-e759-433b-835a-f2c0db79850f\") " Dec 11 08:45:06 crc kubenswrapper[4881]: I1211 08:45:06.729328 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8f8f4fc0-e759-433b-835a-f2c0db79850f-combined-ca-bundle\") pod \"8f8f4fc0-e759-433b-835a-f2c0db79850f\" (UID: \"8f8f4fc0-e759-433b-835a-f2c0db79850f\") " Dec 11 08:45:06 crc kubenswrapper[4881]: I1211 08:45:06.729727 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-r9vp7\" (UniqueName: \"kubernetes.io/projected/8f8f4fc0-e759-433b-835a-f2c0db79850f-kube-api-access-r9vp7\") pod \"8f8f4fc0-e759-433b-835a-f2c0db79850f\" (UID: \"8f8f4fc0-e759-433b-835a-f2c0db79850f\") " Dec 11 08:45:06 crc kubenswrapper[4881]: I1211 08:45:06.729781 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8f8f4fc0-e759-433b-835a-f2c0db79850f-config-data\") pod \"8f8f4fc0-e759-433b-835a-f2c0db79850f\" (UID: \"8f8f4fc0-e759-433b-835a-f2c0db79850f\") " Dec 11 08:45:06 crc kubenswrapper[4881]: I1211 08:45:06.735456 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8f8f4fc0-e759-433b-835a-f2c0db79850f-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "8f8f4fc0-e759-433b-835a-f2c0db79850f" (UID: "8f8f4fc0-e759-433b-835a-f2c0db79850f"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:45:06 crc kubenswrapper[4881]: I1211 08:45:06.736128 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f8f4fc0-e759-433b-835a-f2c0db79850f-kube-api-access-r9vp7" (OuterVolumeSpecName: "kube-api-access-r9vp7") pod "8f8f4fc0-e759-433b-835a-f2c0db79850f" (UID: "8f8f4fc0-e759-433b-835a-f2c0db79850f"). InnerVolumeSpecName "kube-api-access-r9vp7". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:45:06 crc kubenswrapper[4881]: I1211 08:45:06.767089 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8f8f4fc0-e759-433b-835a-f2c0db79850f-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "8f8f4fc0-e759-433b-835a-f2c0db79850f" (UID: "8f8f4fc0-e759-433b-835a-f2c0db79850f"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:45:06 crc kubenswrapper[4881]: I1211 08:45:06.815012 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8f8f4fc0-e759-433b-835a-f2c0db79850f-config-data" (OuterVolumeSpecName: "config-data") pod "8f8f4fc0-e759-433b-835a-f2c0db79850f" (UID: "8f8f4fc0-e759-433b-835a-f2c0db79850f"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:45:06 crc kubenswrapper[4881]: I1211 08:45:06.832152 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-r9vp7\" (UniqueName: \"kubernetes.io/projected/8f8f4fc0-e759-433b-835a-f2c0db79850f-kube-api-access-r9vp7\") on node \"crc\" DevicePath \"\"" Dec 11 08:45:06 crc kubenswrapper[4881]: I1211 08:45:06.832188 4881 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8f8f4fc0-e759-433b-835a-f2c0db79850f-config-data\") on node \"crc\" DevicePath \"\"" Dec 11 08:45:06 crc kubenswrapper[4881]: I1211 08:45:06.832200 4881 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/8f8f4fc0-e759-433b-835a-f2c0db79850f-config-data-custom\") on node \"crc\" DevicePath \"\"" Dec 11 08:45:06 crc kubenswrapper[4881]: I1211 08:45:06.832211 4881 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8f8f4fc0-e759-433b-835a-f2c0db79850f-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 11 08:45:07 crc kubenswrapper[4881]: I1211 08:45:07.038054 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/redhat-edpm-deployment-openstack-edpm-ipam-bfzwp"] Dec 11 08:45:07 crc kubenswrapper[4881]: E1211 08:45:07.038661 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8f8f4fc0-e759-433b-835a-f2c0db79850f" containerName="heat-engine" Dec 11 08:45:07 crc kubenswrapper[4881]: I1211 08:45:07.038692 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="8f8f4fc0-e759-433b-835a-f2c0db79850f" containerName="heat-engine" Dec 11 08:45:07 crc kubenswrapper[4881]: E1211 08:45:07.038763 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cde02f48-eb61-4053-b321-3ab152bafeaa" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Dec 11 08:45:07 crc kubenswrapper[4881]: I1211 08:45:07.038776 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="cde02f48-eb61-4053-b321-3ab152bafeaa" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Dec 11 08:45:07 crc kubenswrapper[4881]: I1211 08:45:07.039063 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="cde02f48-eb61-4053-b321-3ab152bafeaa" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Dec 11 08:45:07 crc kubenswrapper[4881]: I1211 08:45:07.039109 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="8f8f4fc0-e759-433b-835a-f2c0db79850f" containerName="heat-engine" Dec 11 08:45:07 crc kubenswrapper[4881]: I1211 08:45:07.040651 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-bfzwp" Dec 11 08:45:07 crc kubenswrapper[4881]: I1211 08:45:07.044475 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Dec 11 08:45:07 crc kubenswrapper[4881]: I1211 08:45:07.044830 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-bnm72" Dec 11 08:45:07 crc kubenswrapper[4881]: I1211 08:45:07.044946 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Dec 11 08:45:07 crc kubenswrapper[4881]: I1211 08:45:07.045101 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Dec 11 08:45:07 crc kubenswrapper[4881]: I1211 08:45:07.063240 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/redhat-edpm-deployment-openstack-edpm-ipam-bfzwp"] Dec 11 08:45:07 crc kubenswrapper[4881]: I1211 08:45:07.141956 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wvqp6\" (UniqueName: \"kubernetes.io/projected/5c0b2b12-1f8f-4bd7-a3ea-3a078f43f178-kube-api-access-wvqp6\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-bfzwp\" (UID: \"5c0b2b12-1f8f-4bd7-a3ea-3a078f43f178\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-bfzwp" Dec 11 08:45:07 crc kubenswrapper[4881]: I1211 08:45:07.142068 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/5c0b2b12-1f8f-4bd7-a3ea-3a078f43f178-ssh-key\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-bfzwp\" (UID: \"5c0b2b12-1f8f-4bd7-a3ea-3a078f43f178\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-bfzwp" Dec 11 08:45:07 crc kubenswrapper[4881]: I1211 08:45:07.142176 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/5c0b2b12-1f8f-4bd7-a3ea-3a078f43f178-inventory\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-bfzwp\" (UID: \"5c0b2b12-1f8f-4bd7-a3ea-3a078f43f178\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-bfzwp" Dec 11 08:45:07 crc kubenswrapper[4881]: I1211 08:45:07.154017 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-engine-d56bd9469-fm4xb" Dec 11 08:45:07 crc kubenswrapper[4881]: I1211 08:45:07.154005 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-engine-d56bd9469-fm4xb" event={"ID":"8f8f4fc0-e759-433b-835a-f2c0db79850f","Type":"ContainerDied","Data":"390d424708a7d4a7dfb5154fd7148ee8b9df67199b2143b9fc78c1855fea8cd7"} Dec 11 08:45:07 crc kubenswrapper[4881]: I1211 08:45:07.154169 4881 scope.go:117] "RemoveContainer" containerID="9702f55e0be53bfb061d47124237f59ade3b5b78113b501d7ba47d3f8fac1afa" Dec 11 08:45:07 crc kubenswrapper[4881]: I1211 08:45:07.156762 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29424045-r9ck7" event={"ID":"79fedc84-0e64-4f6f-82a1-ce37cf2e9304","Type":"ContainerStarted","Data":"afd3a472892b2a696e9f1749de8addbbc1347f2096dae11bfcee21112b65a8ea"} Dec 11 08:45:07 crc kubenswrapper[4881]: I1211 08:45:07.157070 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29424045-r9ck7" event={"ID":"79fedc84-0e64-4f6f-82a1-ce37cf2e9304","Type":"ContainerStarted","Data":"f1eba3f71eac3d89c081e42d88d94321097e477eaf9f039eec57568b5433af0c"} Dec 11 08:45:07 crc kubenswrapper[4881]: I1211 08:45:07.191227 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-engine-d56bd9469-fm4xb"] Dec 11 08:45:07 crc kubenswrapper[4881]: I1211 08:45:07.203020 4881 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-engine-d56bd9469-fm4xb"] Dec 11 08:45:07 crc kubenswrapper[4881]: I1211 08:45:07.203459 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29424045-r9ck7" podStartSLOduration=4.203447832 podStartE2EDuration="4.203447832s" podCreationTimestamp="2025-12-11 08:45:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 08:45:07.187932099 +0000 UTC m=+1755.565300806" watchObservedRunningTime="2025-12-11 08:45:07.203447832 +0000 UTC m=+1755.580816529" Dec 11 08:45:07 crc kubenswrapper[4881]: I1211 08:45:07.246327 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wvqp6\" (UniqueName: \"kubernetes.io/projected/5c0b2b12-1f8f-4bd7-a3ea-3a078f43f178-kube-api-access-wvqp6\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-bfzwp\" (UID: \"5c0b2b12-1f8f-4bd7-a3ea-3a078f43f178\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-bfzwp" Dec 11 08:45:07 crc kubenswrapper[4881]: I1211 08:45:07.246457 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/5c0b2b12-1f8f-4bd7-a3ea-3a078f43f178-ssh-key\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-bfzwp\" (UID: \"5c0b2b12-1f8f-4bd7-a3ea-3a078f43f178\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-bfzwp" Dec 11 08:45:07 crc kubenswrapper[4881]: I1211 08:45:07.246505 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/5c0b2b12-1f8f-4bd7-a3ea-3a078f43f178-inventory\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-bfzwp\" (UID: \"5c0b2b12-1f8f-4bd7-a3ea-3a078f43f178\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-bfzwp" Dec 11 08:45:07 crc kubenswrapper[4881]: I1211 08:45:07.257389 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/5c0b2b12-1f8f-4bd7-a3ea-3a078f43f178-inventory\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-bfzwp\" (UID: \"5c0b2b12-1f8f-4bd7-a3ea-3a078f43f178\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-bfzwp" Dec 11 08:45:07 crc kubenswrapper[4881]: I1211 08:45:07.257389 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/5c0b2b12-1f8f-4bd7-a3ea-3a078f43f178-ssh-key\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-bfzwp\" (UID: \"5c0b2b12-1f8f-4bd7-a3ea-3a078f43f178\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-bfzwp" Dec 11 08:45:07 crc kubenswrapper[4881]: I1211 08:45:07.263236 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wvqp6\" (UniqueName: \"kubernetes.io/projected/5c0b2b12-1f8f-4bd7-a3ea-3a078f43f178-kube-api-access-wvqp6\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-bfzwp\" (UID: \"5c0b2b12-1f8f-4bd7-a3ea-3a078f43f178\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-bfzwp" Dec 11 08:45:07 crc kubenswrapper[4881]: I1211 08:45:07.370507 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-bfzwp" Dec 11 08:45:07 crc kubenswrapper[4881]: I1211 08:45:07.971691 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/redhat-edpm-deployment-openstack-edpm-ipam-bfzwp"] Dec 11 08:45:07 crc kubenswrapper[4881]: W1211 08:45:07.984207 4881 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod5c0b2b12_1f8f_4bd7_a3ea_3a078f43f178.slice/crio-310b87c8f854bb0840e23a71ec1b99b43a8661311aaf8dd2a17f27ba7b4fe878 WatchSource:0}: Error finding container 310b87c8f854bb0840e23a71ec1b99b43a8661311aaf8dd2a17f27ba7b4fe878: Status 404 returned error can't find the container with id 310b87c8f854bb0840e23a71ec1b99b43a8661311aaf8dd2a17f27ba7b4fe878 Dec 11 08:45:08 crc kubenswrapper[4881]: I1211 08:45:08.172640 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-bfzwp" event={"ID":"5c0b2b12-1f8f-4bd7-a3ea-3a078f43f178","Type":"ContainerStarted","Data":"310b87c8f854bb0840e23a71ec1b99b43a8661311aaf8dd2a17f27ba7b4fe878"} Dec 11 08:45:08 crc kubenswrapper[4881]: I1211 08:45:08.176212 4881 generic.go:334] "Generic (PLEG): container finished" podID="79fedc84-0e64-4f6f-82a1-ce37cf2e9304" containerID="afd3a472892b2a696e9f1749de8addbbc1347f2096dae11bfcee21112b65a8ea" exitCode=0 Dec 11 08:45:08 crc kubenswrapper[4881]: I1211 08:45:08.176256 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29424045-r9ck7" event={"ID":"79fedc84-0e64-4f6f-82a1-ce37cf2e9304","Type":"ContainerDied","Data":"afd3a472892b2a696e9f1749de8addbbc1347f2096dae11bfcee21112b65a8ea"} Dec 11 08:45:09 crc kubenswrapper[4881]: I1211 08:45:09.006231 4881 scope.go:117] "RemoveContainer" containerID="6d4e8d739554b5cd95df4279914b6fd3590e9c30b07a9063a641aef8fdb20ae9" Dec 11 08:45:09 crc kubenswrapper[4881]: E1211 08:45:09.006598 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 08:45:09 crc kubenswrapper[4881]: I1211 08:45:09.040481 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8f8f4fc0-e759-433b-835a-f2c0db79850f" path="/var/lib/kubelet/pods/8f8f4fc0-e759-433b-835a-f2c0db79850f/volumes" Dec 11 08:45:12 crc kubenswrapper[4881]: I1211 08:45:12.128258 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29424045-r9ck7" Dec 11 08:45:12 crc kubenswrapper[4881]: I1211 08:45:12.185912 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/79fedc84-0e64-4f6f-82a1-ce37cf2e9304-config-volume\") pod \"79fedc84-0e64-4f6f-82a1-ce37cf2e9304\" (UID: \"79fedc84-0e64-4f6f-82a1-ce37cf2e9304\") " Dec 11 08:45:12 crc kubenswrapper[4881]: I1211 08:45:12.186044 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/79fedc84-0e64-4f6f-82a1-ce37cf2e9304-secret-volume\") pod \"79fedc84-0e64-4f6f-82a1-ce37cf2e9304\" (UID: \"79fedc84-0e64-4f6f-82a1-ce37cf2e9304\") " Dec 11 08:45:12 crc kubenswrapper[4881]: I1211 08:45:12.186196 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-g48r4\" (UniqueName: \"kubernetes.io/projected/79fedc84-0e64-4f6f-82a1-ce37cf2e9304-kube-api-access-g48r4\") pod \"79fedc84-0e64-4f6f-82a1-ce37cf2e9304\" (UID: \"79fedc84-0e64-4f6f-82a1-ce37cf2e9304\") " Dec 11 08:45:12 crc kubenswrapper[4881]: I1211 08:45:12.187464 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/79fedc84-0e64-4f6f-82a1-ce37cf2e9304-config-volume" (OuterVolumeSpecName: "config-volume") pod "79fedc84-0e64-4f6f-82a1-ce37cf2e9304" (UID: "79fedc84-0e64-4f6f-82a1-ce37cf2e9304"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:45:12 crc kubenswrapper[4881]: I1211 08:45:12.197660 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/79fedc84-0e64-4f6f-82a1-ce37cf2e9304-kube-api-access-g48r4" (OuterVolumeSpecName: "kube-api-access-g48r4") pod "79fedc84-0e64-4f6f-82a1-ce37cf2e9304" (UID: "79fedc84-0e64-4f6f-82a1-ce37cf2e9304"). InnerVolumeSpecName "kube-api-access-g48r4". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:45:12 crc kubenswrapper[4881]: I1211 08:45:12.198153 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/79fedc84-0e64-4f6f-82a1-ce37cf2e9304-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "79fedc84-0e64-4f6f-82a1-ce37cf2e9304" (UID: "79fedc84-0e64-4f6f-82a1-ce37cf2e9304"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:45:12 crc kubenswrapper[4881]: I1211 08:45:12.229614 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29424045-r9ck7" event={"ID":"79fedc84-0e64-4f6f-82a1-ce37cf2e9304","Type":"ContainerDied","Data":"f1eba3f71eac3d89c081e42d88d94321097e477eaf9f039eec57568b5433af0c"} Dec 11 08:45:12 crc kubenswrapper[4881]: I1211 08:45:12.229640 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29424045-r9ck7" Dec 11 08:45:12 crc kubenswrapper[4881]: I1211 08:45:12.229664 4881 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f1eba3f71eac3d89c081e42d88d94321097e477eaf9f039eec57568b5433af0c" Dec 11 08:45:12 crc kubenswrapper[4881]: I1211 08:45:12.289423 4881 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/79fedc84-0e64-4f6f-82a1-ce37cf2e9304-secret-volume\") on node \"crc\" DevicePath \"\"" Dec 11 08:45:12 crc kubenswrapper[4881]: I1211 08:45:12.289457 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-g48r4\" (UniqueName: \"kubernetes.io/projected/79fedc84-0e64-4f6f-82a1-ce37cf2e9304-kube-api-access-g48r4\") on node \"crc\" DevicePath \"\"" Dec 11 08:45:12 crc kubenswrapper[4881]: I1211 08:45:12.289468 4881 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/79fedc84-0e64-4f6f-82a1-ce37cf2e9304-config-volume\") on node \"crc\" DevicePath \"\"" Dec 11 08:45:13 crc kubenswrapper[4881]: I1211 08:45:13.241362 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-sync-mn4r5" event={"ID":"cb0fb1c6-fd5d-4334-9584-efb1c5c75d05","Type":"ContainerStarted","Data":"d11af08761a00ae5f85d754efd0273b40889750d8d3d535fadc9b6f1094dc844"} Dec 11 08:45:13 crc kubenswrapper[4881]: I1211 08:45:13.247465 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-bfzwp" event={"ID":"5c0b2b12-1f8f-4bd7-a3ea-3a078f43f178","Type":"ContainerStarted","Data":"e433f43e5a836715275b64b127fb093e6ad8fb68fb92c455fc4730901ce6b592"} Dec 11 08:45:13 crc kubenswrapper[4881]: I1211 08:45:13.262557 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/aodh-db-sync-mn4r5" podStartSLOduration=25.172360074 podStartE2EDuration="31.26253396s" podCreationTimestamp="2025-12-11 08:44:42 +0000 UTC" firstStartedPulling="2025-12-11 08:45:05.738799604 +0000 UTC m=+1754.116168301" lastFinishedPulling="2025-12-11 08:45:11.82897349 +0000 UTC m=+1760.206342187" observedRunningTime="2025-12-11 08:45:13.258582835 +0000 UTC m=+1761.635951532" watchObservedRunningTime="2025-12-11 08:45:13.26253396 +0000 UTC m=+1761.639902657" Dec 11 08:45:13 crc kubenswrapper[4881]: I1211 08:45:13.288183 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-bfzwp" podStartSLOduration=2.446745531 podStartE2EDuration="6.288162346s" podCreationTimestamp="2025-12-11 08:45:07 +0000 UTC" firstStartedPulling="2025-12-11 08:45:07.987810171 +0000 UTC m=+1756.365178868" lastFinishedPulling="2025-12-11 08:45:11.829226986 +0000 UTC m=+1760.206595683" observedRunningTime="2025-12-11 08:45:13.272623162 +0000 UTC m=+1761.649991869" watchObservedRunningTime="2025-12-11 08:45:13.288162346 +0000 UTC m=+1761.665531043" Dec 11 08:45:15 crc kubenswrapper[4881]: I1211 08:45:15.596673 4881 patch_prober.go:28] interesting pod/openshift-config-operator-7777fb866f-td2hv container/openshift-config-operator namespace/openshift-config-operator: Readiness probe status=failure output="Get \"https://10.217.0.19:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Dec 11 08:45:15 crc kubenswrapper[4881]: I1211 08:45:15.597008 4881 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-config-operator/openshift-config-operator-7777fb866f-td2hv" podUID="8ecef833-b914-464d-a395-49bb7f66a180" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.19:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Dec 11 08:45:17 crc kubenswrapper[4881]: I1211 08:45:17.311997 4881 generic.go:334] "Generic (PLEG): container finished" podID="5c0b2b12-1f8f-4bd7-a3ea-3a078f43f178" containerID="e433f43e5a836715275b64b127fb093e6ad8fb68fb92c455fc4730901ce6b592" exitCode=0 Dec 11 08:45:17 crc kubenswrapper[4881]: I1211 08:45:17.312270 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-bfzwp" event={"ID":"5c0b2b12-1f8f-4bd7-a3ea-3a078f43f178","Type":"ContainerDied","Data":"e433f43e5a836715275b64b127fb093e6ad8fb68fb92c455fc4730901ce6b592"} Dec 11 08:45:18 crc kubenswrapper[4881]: I1211 08:45:18.327878 4881 generic.go:334] "Generic (PLEG): container finished" podID="cb0fb1c6-fd5d-4334-9584-efb1c5c75d05" containerID="d11af08761a00ae5f85d754efd0273b40889750d8d3d535fadc9b6f1094dc844" exitCode=0 Dec 11 08:45:18 crc kubenswrapper[4881]: I1211 08:45:18.327969 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-sync-mn4r5" event={"ID":"cb0fb1c6-fd5d-4334-9584-efb1c5c75d05","Type":"ContainerDied","Data":"d11af08761a00ae5f85d754efd0273b40889750d8d3d535fadc9b6f1094dc844"} Dec 11 08:45:18 crc kubenswrapper[4881]: I1211 08:45:18.815452 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-bfzwp" Dec 11 08:45:18 crc kubenswrapper[4881]: I1211 08:45:18.970018 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wvqp6\" (UniqueName: \"kubernetes.io/projected/5c0b2b12-1f8f-4bd7-a3ea-3a078f43f178-kube-api-access-wvqp6\") pod \"5c0b2b12-1f8f-4bd7-a3ea-3a078f43f178\" (UID: \"5c0b2b12-1f8f-4bd7-a3ea-3a078f43f178\") " Dec 11 08:45:18 crc kubenswrapper[4881]: I1211 08:45:18.970119 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/5c0b2b12-1f8f-4bd7-a3ea-3a078f43f178-inventory\") pod \"5c0b2b12-1f8f-4bd7-a3ea-3a078f43f178\" (UID: \"5c0b2b12-1f8f-4bd7-a3ea-3a078f43f178\") " Dec 11 08:45:18 crc kubenswrapper[4881]: I1211 08:45:18.970313 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/5c0b2b12-1f8f-4bd7-a3ea-3a078f43f178-ssh-key\") pod \"5c0b2b12-1f8f-4bd7-a3ea-3a078f43f178\" (UID: \"5c0b2b12-1f8f-4bd7-a3ea-3a078f43f178\") " Dec 11 08:45:18 crc kubenswrapper[4881]: I1211 08:45:18.985400 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5c0b2b12-1f8f-4bd7-a3ea-3a078f43f178-kube-api-access-wvqp6" (OuterVolumeSpecName: "kube-api-access-wvqp6") pod "5c0b2b12-1f8f-4bd7-a3ea-3a078f43f178" (UID: "5c0b2b12-1f8f-4bd7-a3ea-3a078f43f178"). InnerVolumeSpecName "kube-api-access-wvqp6". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:45:19 crc kubenswrapper[4881]: I1211 08:45:19.003179 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5c0b2b12-1f8f-4bd7-a3ea-3a078f43f178-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "5c0b2b12-1f8f-4bd7-a3ea-3a078f43f178" (UID: "5c0b2b12-1f8f-4bd7-a3ea-3a078f43f178"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:45:19 crc kubenswrapper[4881]: I1211 08:45:19.003679 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5c0b2b12-1f8f-4bd7-a3ea-3a078f43f178-inventory" (OuterVolumeSpecName: "inventory") pod "5c0b2b12-1f8f-4bd7-a3ea-3a078f43f178" (UID: "5c0b2b12-1f8f-4bd7-a3ea-3a078f43f178"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:45:19 crc kubenswrapper[4881]: I1211 08:45:19.073379 4881 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/5c0b2b12-1f8f-4bd7-a3ea-3a078f43f178-ssh-key\") on node \"crc\" DevicePath \"\"" Dec 11 08:45:19 crc kubenswrapper[4881]: I1211 08:45:19.073414 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wvqp6\" (UniqueName: \"kubernetes.io/projected/5c0b2b12-1f8f-4bd7-a3ea-3a078f43f178-kube-api-access-wvqp6\") on node \"crc\" DevicePath \"\"" Dec 11 08:45:19 crc kubenswrapper[4881]: I1211 08:45:19.073426 4881 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/5c0b2b12-1f8f-4bd7-a3ea-3a078f43f178-inventory\") on node \"crc\" DevicePath \"\"" Dec 11 08:45:19 crc kubenswrapper[4881]: I1211 08:45:19.344254 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-bfzwp" Dec 11 08:45:19 crc kubenswrapper[4881]: I1211 08:45:19.344541 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-bfzwp" event={"ID":"5c0b2b12-1f8f-4bd7-a3ea-3a078f43f178","Type":"ContainerDied","Data":"310b87c8f854bb0840e23a71ec1b99b43a8661311aaf8dd2a17f27ba7b4fe878"} Dec 11 08:45:19 crc kubenswrapper[4881]: I1211 08:45:19.344701 4881 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="310b87c8f854bb0840e23a71ec1b99b43a8661311aaf8dd2a17f27ba7b4fe878" Dec 11 08:45:19 crc kubenswrapper[4881]: I1211 08:45:19.421260 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-4n2kr"] Dec 11 08:45:19 crc kubenswrapper[4881]: E1211 08:45:19.421780 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5c0b2b12-1f8f-4bd7-a3ea-3a078f43f178" containerName="redhat-edpm-deployment-openstack-edpm-ipam" Dec 11 08:45:19 crc kubenswrapper[4881]: I1211 08:45:19.421807 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="5c0b2b12-1f8f-4bd7-a3ea-3a078f43f178" containerName="redhat-edpm-deployment-openstack-edpm-ipam" Dec 11 08:45:19 crc kubenswrapper[4881]: E1211 08:45:19.421827 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="79fedc84-0e64-4f6f-82a1-ce37cf2e9304" containerName="collect-profiles" Dec 11 08:45:19 crc kubenswrapper[4881]: I1211 08:45:19.421834 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="79fedc84-0e64-4f6f-82a1-ce37cf2e9304" containerName="collect-profiles" Dec 11 08:45:19 crc kubenswrapper[4881]: I1211 08:45:19.422036 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="79fedc84-0e64-4f6f-82a1-ce37cf2e9304" containerName="collect-profiles" Dec 11 08:45:19 crc kubenswrapper[4881]: I1211 08:45:19.422049 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="5c0b2b12-1f8f-4bd7-a3ea-3a078f43f178" containerName="redhat-edpm-deployment-openstack-edpm-ipam" Dec 11 08:45:19 crc kubenswrapper[4881]: I1211 08:45:19.422857 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-4n2kr" Dec 11 08:45:19 crc kubenswrapper[4881]: I1211 08:45:19.428402 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Dec 11 08:45:19 crc kubenswrapper[4881]: I1211 08:45:19.428470 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Dec 11 08:45:19 crc kubenswrapper[4881]: I1211 08:45:19.428590 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Dec 11 08:45:19 crc kubenswrapper[4881]: I1211 08:45:19.428627 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-bnm72" Dec 11 08:45:19 crc kubenswrapper[4881]: I1211 08:45:19.450106 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-4n2kr"] Dec 11 08:45:19 crc kubenswrapper[4881]: I1211 08:45:19.488389 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e741b94a-ed71-4819-ba06-943aa25aaaf8-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-4n2kr\" (UID: \"e741b94a-ed71-4819-ba06-943aa25aaaf8\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-4n2kr" Dec 11 08:45:19 crc kubenswrapper[4881]: I1211 08:45:19.488482 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e741b94a-ed71-4819-ba06-943aa25aaaf8-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-4n2kr\" (UID: \"e741b94a-ed71-4819-ba06-943aa25aaaf8\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-4n2kr" Dec 11 08:45:19 crc kubenswrapper[4881]: I1211 08:45:19.488578 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4v4dc\" (UniqueName: \"kubernetes.io/projected/e741b94a-ed71-4819-ba06-943aa25aaaf8-kube-api-access-4v4dc\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-4n2kr\" (UID: \"e741b94a-ed71-4819-ba06-943aa25aaaf8\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-4n2kr" Dec 11 08:45:19 crc kubenswrapper[4881]: I1211 08:45:19.488681 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/e741b94a-ed71-4819-ba06-943aa25aaaf8-ssh-key\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-4n2kr\" (UID: \"e741b94a-ed71-4819-ba06-943aa25aaaf8\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-4n2kr" Dec 11 08:45:19 crc kubenswrapper[4881]: I1211 08:45:19.591586 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e741b94a-ed71-4819-ba06-943aa25aaaf8-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-4n2kr\" (UID: \"e741b94a-ed71-4819-ba06-943aa25aaaf8\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-4n2kr" Dec 11 08:45:19 crc kubenswrapper[4881]: I1211 08:45:19.591645 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e741b94a-ed71-4819-ba06-943aa25aaaf8-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-4n2kr\" (UID: \"e741b94a-ed71-4819-ba06-943aa25aaaf8\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-4n2kr" Dec 11 08:45:19 crc kubenswrapper[4881]: I1211 08:45:19.591704 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4v4dc\" (UniqueName: \"kubernetes.io/projected/e741b94a-ed71-4819-ba06-943aa25aaaf8-kube-api-access-4v4dc\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-4n2kr\" (UID: \"e741b94a-ed71-4819-ba06-943aa25aaaf8\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-4n2kr" Dec 11 08:45:19 crc kubenswrapper[4881]: I1211 08:45:19.591748 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/e741b94a-ed71-4819-ba06-943aa25aaaf8-ssh-key\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-4n2kr\" (UID: \"e741b94a-ed71-4819-ba06-943aa25aaaf8\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-4n2kr" Dec 11 08:45:19 crc kubenswrapper[4881]: I1211 08:45:19.611703 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e741b94a-ed71-4819-ba06-943aa25aaaf8-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-4n2kr\" (UID: \"e741b94a-ed71-4819-ba06-943aa25aaaf8\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-4n2kr" Dec 11 08:45:19 crc kubenswrapper[4881]: I1211 08:45:19.622066 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/e741b94a-ed71-4819-ba06-943aa25aaaf8-ssh-key\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-4n2kr\" (UID: \"e741b94a-ed71-4819-ba06-943aa25aaaf8\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-4n2kr" Dec 11 08:45:19 crc kubenswrapper[4881]: I1211 08:45:19.625067 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4v4dc\" (UniqueName: \"kubernetes.io/projected/e741b94a-ed71-4819-ba06-943aa25aaaf8-kube-api-access-4v4dc\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-4n2kr\" (UID: \"e741b94a-ed71-4819-ba06-943aa25aaaf8\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-4n2kr" Dec 11 08:45:19 crc kubenswrapper[4881]: I1211 08:45:19.638299 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e741b94a-ed71-4819-ba06-943aa25aaaf8-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-4n2kr\" (UID: \"e741b94a-ed71-4819-ba06-943aa25aaaf8\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-4n2kr" Dec 11 08:45:19 crc kubenswrapper[4881]: I1211 08:45:19.754388 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-4n2kr" Dec 11 08:45:19 crc kubenswrapper[4881]: I1211 08:45:19.881939 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-sync-mn4r5" Dec 11 08:45:20 crc kubenswrapper[4881]: I1211 08:45:20.004613 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cb0fb1c6-fd5d-4334-9584-efb1c5c75d05-config-data\") pod \"cb0fb1c6-fd5d-4334-9584-efb1c5c75d05\" (UID: \"cb0fb1c6-fd5d-4334-9584-efb1c5c75d05\") " Dec 11 08:45:20 crc kubenswrapper[4881]: I1211 08:45:20.005082 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cb0fb1c6-fd5d-4334-9584-efb1c5c75d05-scripts\") pod \"cb0fb1c6-fd5d-4334-9584-efb1c5c75d05\" (UID: \"cb0fb1c6-fd5d-4334-9584-efb1c5c75d05\") " Dec 11 08:45:20 crc kubenswrapper[4881]: I1211 08:45:20.005143 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cb0fb1c6-fd5d-4334-9584-efb1c5c75d05-combined-ca-bundle\") pod \"cb0fb1c6-fd5d-4334-9584-efb1c5c75d05\" (UID: \"cb0fb1c6-fd5d-4334-9584-efb1c5c75d05\") " Dec 11 08:45:20 crc kubenswrapper[4881]: I1211 08:45:20.005238 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vzzd6\" (UniqueName: \"kubernetes.io/projected/cb0fb1c6-fd5d-4334-9584-efb1c5c75d05-kube-api-access-vzzd6\") pod \"cb0fb1c6-fd5d-4334-9584-efb1c5c75d05\" (UID: \"cb0fb1c6-fd5d-4334-9584-efb1c5c75d05\") " Dec 11 08:45:20 crc kubenswrapper[4881]: I1211 08:45:20.009851 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cb0fb1c6-fd5d-4334-9584-efb1c5c75d05-scripts" (OuterVolumeSpecName: "scripts") pod "cb0fb1c6-fd5d-4334-9584-efb1c5c75d05" (UID: "cb0fb1c6-fd5d-4334-9584-efb1c5c75d05"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:45:20 crc kubenswrapper[4881]: I1211 08:45:20.011542 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cb0fb1c6-fd5d-4334-9584-efb1c5c75d05-kube-api-access-vzzd6" (OuterVolumeSpecName: "kube-api-access-vzzd6") pod "cb0fb1c6-fd5d-4334-9584-efb1c5c75d05" (UID: "cb0fb1c6-fd5d-4334-9584-efb1c5c75d05"). InnerVolumeSpecName "kube-api-access-vzzd6". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:45:20 crc kubenswrapper[4881]: I1211 08:45:20.034041 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cb0fb1c6-fd5d-4334-9584-efb1c5c75d05-config-data" (OuterVolumeSpecName: "config-data") pod "cb0fb1c6-fd5d-4334-9584-efb1c5c75d05" (UID: "cb0fb1c6-fd5d-4334-9584-efb1c5c75d05"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:45:20 crc kubenswrapper[4881]: I1211 08:45:20.052282 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cb0fb1c6-fd5d-4334-9584-efb1c5c75d05-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "cb0fb1c6-fd5d-4334-9584-efb1c5c75d05" (UID: "cb0fb1c6-fd5d-4334-9584-efb1c5c75d05"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:45:20 crc kubenswrapper[4881]: I1211 08:45:20.110249 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vzzd6\" (UniqueName: \"kubernetes.io/projected/cb0fb1c6-fd5d-4334-9584-efb1c5c75d05-kube-api-access-vzzd6\") on node \"crc\" DevicePath \"\"" Dec 11 08:45:20 crc kubenswrapper[4881]: I1211 08:45:20.110278 4881 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cb0fb1c6-fd5d-4334-9584-efb1c5c75d05-config-data\") on node \"crc\" DevicePath \"\"" Dec 11 08:45:20 crc kubenswrapper[4881]: I1211 08:45:20.110287 4881 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cb0fb1c6-fd5d-4334-9584-efb1c5c75d05-scripts\") on node \"crc\" DevicePath \"\"" Dec 11 08:45:20 crc kubenswrapper[4881]: I1211 08:45:20.110297 4881 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cb0fb1c6-fd5d-4334-9584-efb1c5c75d05-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 11 08:45:20 crc kubenswrapper[4881]: I1211 08:45:20.273497 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-4n2kr"] Dec 11 08:45:20 crc kubenswrapper[4881]: I1211 08:45:20.355570 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-4n2kr" event={"ID":"e741b94a-ed71-4819-ba06-943aa25aaaf8","Type":"ContainerStarted","Data":"d192cb2536a3576b69b8e7044f9506a1d0cf2189697f817fb37f0f3891698dbe"} Dec 11 08:45:20 crc kubenswrapper[4881]: I1211 08:45:20.357603 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-sync-mn4r5" event={"ID":"cb0fb1c6-fd5d-4334-9584-efb1c5c75d05","Type":"ContainerDied","Data":"092c2b3388bd39e3964463faf140e9bf9f5c3433efe10f39c513ebb05264b02c"} Dec 11 08:45:20 crc kubenswrapper[4881]: I1211 08:45:20.357637 4881 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="092c2b3388bd39e3964463faf140e9bf9f5c3433efe10f39c513ebb05264b02c" Dec 11 08:45:20 crc kubenswrapper[4881]: I1211 08:45:20.357694 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-sync-mn4r5" Dec 11 08:45:21 crc kubenswrapper[4881]: I1211 08:45:21.370573 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-4n2kr" event={"ID":"e741b94a-ed71-4819-ba06-943aa25aaaf8","Type":"ContainerStarted","Data":"db530b92405f9baf5f6cf5e7132a65bf418aa76ba1b57d73abb4c67e1dc385c6"} Dec 11 08:45:21 crc kubenswrapper[4881]: I1211 08:45:21.388452 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-4n2kr" podStartSLOduration=1.8232415130000001 podStartE2EDuration="2.388427255s" podCreationTimestamp="2025-12-11 08:45:19 +0000 UTC" firstStartedPulling="2025-12-11 08:45:20.273860971 +0000 UTC m=+1768.651229668" lastFinishedPulling="2025-12-11 08:45:20.839046723 +0000 UTC m=+1769.216415410" observedRunningTime="2025-12-11 08:45:21.385633488 +0000 UTC m=+1769.763002195" watchObservedRunningTime="2025-12-11 08:45:21.388427255 +0000 UTC m=+1769.765795952" Dec 11 08:45:22 crc kubenswrapper[4881]: I1211 08:45:22.297090 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/aodh-0"] Dec 11 08:45:22 crc kubenswrapper[4881]: I1211 08:45:22.298209 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/aodh-0" podUID="338db346-be5c-4382-9404-29d345bba595" containerName="aodh-api" containerID="cri-o://51b95edd797ca74ba6373144cec82c5dfe9a83a2ad44219685a75b07beea5471" gracePeriod=30 Dec 11 08:45:22 crc kubenswrapper[4881]: I1211 08:45:22.299012 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/aodh-0" podUID="338db346-be5c-4382-9404-29d345bba595" containerName="aodh-notifier" containerID="cri-o://a6691685fe3ec19d9f53dbfb0791524d93e4a8c82f6e4fee143db74ec34ef4e9" gracePeriod=30 Dec 11 08:45:22 crc kubenswrapper[4881]: I1211 08:45:22.299112 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/aodh-0" podUID="338db346-be5c-4382-9404-29d345bba595" containerName="aodh-evaluator" containerID="cri-o://92821f7a096c136ffc7e64eec1c3ac32209131960744bad0dc1260ba16083605" gracePeriod=30 Dec 11 08:45:22 crc kubenswrapper[4881]: I1211 08:45:22.299206 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/aodh-0" podUID="338db346-be5c-4382-9404-29d345bba595" containerName="aodh-listener" containerID="cri-o://9319640f9dc0eb3ae69dbd3e5ef0eecbac06a70502481fc4c5c4b64aaa45d4a8" gracePeriod=30 Dec 11 08:45:23 crc kubenswrapper[4881]: I1211 08:45:23.016288 4881 scope.go:117] "RemoveContainer" containerID="6d4e8d739554b5cd95df4279914b6fd3590e9c30b07a9063a641aef8fdb20ae9" Dec 11 08:45:23 crc kubenswrapper[4881]: E1211 08:45:23.016742 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 08:45:23 crc kubenswrapper[4881]: I1211 08:45:23.396997 4881 generic.go:334] "Generic (PLEG): container finished" podID="338db346-be5c-4382-9404-29d345bba595" containerID="92821f7a096c136ffc7e64eec1c3ac32209131960744bad0dc1260ba16083605" exitCode=0 Dec 11 08:45:23 crc kubenswrapper[4881]: I1211 08:45:23.397235 4881 generic.go:334] "Generic (PLEG): container finished" podID="338db346-be5c-4382-9404-29d345bba595" containerID="51b95edd797ca74ba6373144cec82c5dfe9a83a2ad44219685a75b07beea5471" exitCode=0 Dec 11 08:45:23 crc kubenswrapper[4881]: I1211 08:45:23.397073 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"338db346-be5c-4382-9404-29d345bba595","Type":"ContainerDied","Data":"92821f7a096c136ffc7e64eec1c3ac32209131960744bad0dc1260ba16083605"} Dec 11 08:45:23 crc kubenswrapper[4881]: I1211 08:45:23.397274 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"338db346-be5c-4382-9404-29d345bba595","Type":"ContainerDied","Data":"51b95edd797ca74ba6373144cec82c5dfe9a83a2ad44219685a75b07beea5471"} Dec 11 08:45:26 crc kubenswrapper[4881]: E1211 08:45:26.701891 4881 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod338db346_be5c_4382_9404_29d345bba595.slice/crio-a6691685fe3ec19d9f53dbfb0791524d93e4a8c82f6e4fee143db74ec34ef4e9.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod338db346_be5c_4382_9404_29d345bba595.slice/crio-conmon-9319640f9dc0eb3ae69dbd3e5ef0eecbac06a70502481fc4c5c4b64aaa45d4a8.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod338db346_be5c_4382_9404_29d345bba595.slice/crio-conmon-a6691685fe3ec19d9f53dbfb0791524d93e4a8c82f6e4fee143db74ec34ef4e9.scope\": RecentStats: unable to find data in memory cache]" Dec 11 08:45:26 crc kubenswrapper[4881]: I1211 08:45:26.969166 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-0" Dec 11 08:45:27 crc kubenswrapper[4881]: I1211 08:45:27.086076 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fcr7x\" (UniqueName: \"kubernetes.io/projected/338db346-be5c-4382-9404-29d345bba595-kube-api-access-fcr7x\") pod \"338db346-be5c-4382-9404-29d345bba595\" (UID: \"338db346-be5c-4382-9404-29d345bba595\") " Dec 11 08:45:27 crc kubenswrapper[4881]: I1211 08:45:27.086180 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/338db346-be5c-4382-9404-29d345bba595-internal-tls-certs\") pod \"338db346-be5c-4382-9404-29d345bba595\" (UID: \"338db346-be5c-4382-9404-29d345bba595\") " Dec 11 08:45:27 crc kubenswrapper[4881]: I1211 08:45:27.086265 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/338db346-be5c-4382-9404-29d345bba595-scripts\") pod \"338db346-be5c-4382-9404-29d345bba595\" (UID: \"338db346-be5c-4382-9404-29d345bba595\") " Dec 11 08:45:27 crc kubenswrapper[4881]: I1211 08:45:27.086310 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/338db346-be5c-4382-9404-29d345bba595-combined-ca-bundle\") pod \"338db346-be5c-4382-9404-29d345bba595\" (UID: \"338db346-be5c-4382-9404-29d345bba595\") " Dec 11 08:45:27 crc kubenswrapper[4881]: I1211 08:45:27.086398 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/338db346-be5c-4382-9404-29d345bba595-public-tls-certs\") pod \"338db346-be5c-4382-9404-29d345bba595\" (UID: \"338db346-be5c-4382-9404-29d345bba595\") " Dec 11 08:45:27 crc kubenswrapper[4881]: I1211 08:45:27.086459 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/338db346-be5c-4382-9404-29d345bba595-config-data\") pod \"338db346-be5c-4382-9404-29d345bba595\" (UID: \"338db346-be5c-4382-9404-29d345bba595\") " Dec 11 08:45:27 crc kubenswrapper[4881]: I1211 08:45:27.098578 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/338db346-be5c-4382-9404-29d345bba595-scripts" (OuterVolumeSpecName: "scripts") pod "338db346-be5c-4382-9404-29d345bba595" (UID: "338db346-be5c-4382-9404-29d345bba595"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:45:27 crc kubenswrapper[4881]: I1211 08:45:27.098849 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/338db346-be5c-4382-9404-29d345bba595-kube-api-access-fcr7x" (OuterVolumeSpecName: "kube-api-access-fcr7x") pod "338db346-be5c-4382-9404-29d345bba595" (UID: "338db346-be5c-4382-9404-29d345bba595"). InnerVolumeSpecName "kube-api-access-fcr7x". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:45:27 crc kubenswrapper[4881]: I1211 08:45:27.161271 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/338db346-be5c-4382-9404-29d345bba595-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "338db346-be5c-4382-9404-29d345bba595" (UID: "338db346-be5c-4382-9404-29d345bba595"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:45:27 crc kubenswrapper[4881]: I1211 08:45:27.191666 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/338db346-be5c-4382-9404-29d345bba595-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "338db346-be5c-4382-9404-29d345bba595" (UID: "338db346-be5c-4382-9404-29d345bba595"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:45:27 crc kubenswrapper[4881]: I1211 08:45:27.192003 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/338db346-be5c-4382-9404-29d345bba595-internal-tls-certs\") pod \"338db346-be5c-4382-9404-29d345bba595\" (UID: \"338db346-be5c-4382-9404-29d345bba595\") " Dec 11 08:45:27 crc kubenswrapper[4881]: I1211 08:45:27.193567 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fcr7x\" (UniqueName: \"kubernetes.io/projected/338db346-be5c-4382-9404-29d345bba595-kube-api-access-fcr7x\") on node \"crc\" DevicePath \"\"" Dec 11 08:45:27 crc kubenswrapper[4881]: I1211 08:45:27.193757 4881 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/338db346-be5c-4382-9404-29d345bba595-scripts\") on node \"crc\" DevicePath \"\"" Dec 11 08:45:27 crc kubenswrapper[4881]: I1211 08:45:27.193893 4881 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/338db346-be5c-4382-9404-29d345bba595-public-tls-certs\") on node \"crc\" DevicePath \"\"" Dec 11 08:45:27 crc kubenswrapper[4881]: W1211 08:45:27.194195 4881 empty_dir.go:500] Warning: Unmount skipped because path does not exist: /var/lib/kubelet/pods/338db346-be5c-4382-9404-29d345bba595/volumes/kubernetes.io~secret/internal-tls-certs Dec 11 08:45:27 crc kubenswrapper[4881]: I1211 08:45:27.194269 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/338db346-be5c-4382-9404-29d345bba595-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "338db346-be5c-4382-9404-29d345bba595" (UID: "338db346-be5c-4382-9404-29d345bba595"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:45:27 crc kubenswrapper[4881]: I1211 08:45:27.241595 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/338db346-be5c-4382-9404-29d345bba595-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "338db346-be5c-4382-9404-29d345bba595" (UID: "338db346-be5c-4382-9404-29d345bba595"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:45:27 crc kubenswrapper[4881]: I1211 08:45:27.243687 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/338db346-be5c-4382-9404-29d345bba595-config-data" (OuterVolumeSpecName: "config-data") pod "338db346-be5c-4382-9404-29d345bba595" (UID: "338db346-be5c-4382-9404-29d345bba595"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:45:27 crc kubenswrapper[4881]: I1211 08:45:27.296525 4881 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/338db346-be5c-4382-9404-29d345bba595-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Dec 11 08:45:27 crc kubenswrapper[4881]: I1211 08:45:27.296564 4881 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/338db346-be5c-4382-9404-29d345bba595-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 11 08:45:27 crc kubenswrapper[4881]: I1211 08:45:27.296577 4881 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/338db346-be5c-4382-9404-29d345bba595-config-data\") on node \"crc\" DevicePath \"\"" Dec 11 08:45:27 crc kubenswrapper[4881]: I1211 08:45:27.454314 4881 generic.go:334] "Generic (PLEG): container finished" podID="338db346-be5c-4382-9404-29d345bba595" containerID="9319640f9dc0eb3ae69dbd3e5ef0eecbac06a70502481fc4c5c4b64aaa45d4a8" exitCode=0 Dec 11 08:45:27 crc kubenswrapper[4881]: I1211 08:45:27.454605 4881 generic.go:334] "Generic (PLEG): container finished" podID="338db346-be5c-4382-9404-29d345bba595" containerID="a6691685fe3ec19d9f53dbfb0791524d93e4a8c82f6e4fee143db74ec34ef4e9" exitCode=0 Dec 11 08:45:27 crc kubenswrapper[4881]: I1211 08:45:27.454484 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-0" Dec 11 08:45:27 crc kubenswrapper[4881]: I1211 08:45:27.454417 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"338db346-be5c-4382-9404-29d345bba595","Type":"ContainerDied","Data":"9319640f9dc0eb3ae69dbd3e5ef0eecbac06a70502481fc4c5c4b64aaa45d4a8"} Dec 11 08:45:27 crc kubenswrapper[4881]: I1211 08:45:27.454945 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"338db346-be5c-4382-9404-29d345bba595","Type":"ContainerDied","Data":"a6691685fe3ec19d9f53dbfb0791524d93e4a8c82f6e4fee143db74ec34ef4e9"} Dec 11 08:45:27 crc kubenswrapper[4881]: I1211 08:45:27.454967 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"338db346-be5c-4382-9404-29d345bba595","Type":"ContainerDied","Data":"32fdb8a9454acf728711e177e91072eb325ebac82c3a589640ac0a10111ce07e"} Dec 11 08:45:27 crc kubenswrapper[4881]: I1211 08:45:27.454982 4881 scope.go:117] "RemoveContainer" containerID="9319640f9dc0eb3ae69dbd3e5ef0eecbac06a70502481fc4c5c4b64aaa45d4a8" Dec 11 08:45:27 crc kubenswrapper[4881]: I1211 08:45:27.497755 4881 scope.go:117] "RemoveContainer" containerID="a6691685fe3ec19d9f53dbfb0791524d93e4a8c82f6e4fee143db74ec34ef4e9" Dec 11 08:45:27 crc kubenswrapper[4881]: I1211 08:45:27.498886 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/aodh-0"] Dec 11 08:45:27 crc kubenswrapper[4881]: I1211 08:45:27.545289 4881 scope.go:117] "RemoveContainer" containerID="92821f7a096c136ffc7e64eec1c3ac32209131960744bad0dc1260ba16083605" Dec 11 08:45:27 crc kubenswrapper[4881]: I1211 08:45:27.545888 4881 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/aodh-0"] Dec 11 08:45:27 crc kubenswrapper[4881]: I1211 08:45:27.590450 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/aodh-0"] Dec 11 08:45:27 crc kubenswrapper[4881]: E1211 08:45:27.591124 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="338db346-be5c-4382-9404-29d345bba595" containerName="aodh-notifier" Dec 11 08:45:27 crc kubenswrapper[4881]: I1211 08:45:27.591152 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="338db346-be5c-4382-9404-29d345bba595" containerName="aodh-notifier" Dec 11 08:45:27 crc kubenswrapper[4881]: E1211 08:45:27.591185 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="338db346-be5c-4382-9404-29d345bba595" containerName="aodh-evaluator" Dec 11 08:45:27 crc kubenswrapper[4881]: I1211 08:45:27.591196 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="338db346-be5c-4382-9404-29d345bba595" containerName="aodh-evaluator" Dec 11 08:45:27 crc kubenswrapper[4881]: E1211 08:45:27.591240 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="338db346-be5c-4382-9404-29d345bba595" containerName="aodh-listener" Dec 11 08:45:27 crc kubenswrapper[4881]: I1211 08:45:27.591248 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="338db346-be5c-4382-9404-29d345bba595" containerName="aodh-listener" Dec 11 08:45:27 crc kubenswrapper[4881]: E1211 08:45:27.591261 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="338db346-be5c-4382-9404-29d345bba595" containerName="aodh-api" Dec 11 08:45:27 crc kubenswrapper[4881]: I1211 08:45:27.591267 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="338db346-be5c-4382-9404-29d345bba595" containerName="aodh-api" Dec 11 08:45:27 crc kubenswrapper[4881]: E1211 08:45:27.591277 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cb0fb1c6-fd5d-4334-9584-efb1c5c75d05" containerName="aodh-db-sync" Dec 11 08:45:27 crc kubenswrapper[4881]: I1211 08:45:27.591282 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="cb0fb1c6-fd5d-4334-9584-efb1c5c75d05" containerName="aodh-db-sync" Dec 11 08:45:27 crc kubenswrapper[4881]: I1211 08:45:27.591528 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="338db346-be5c-4382-9404-29d345bba595" containerName="aodh-evaluator" Dec 11 08:45:27 crc kubenswrapper[4881]: I1211 08:45:27.591552 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="338db346-be5c-4382-9404-29d345bba595" containerName="aodh-notifier" Dec 11 08:45:27 crc kubenswrapper[4881]: I1211 08:45:27.591561 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="338db346-be5c-4382-9404-29d345bba595" containerName="aodh-listener" Dec 11 08:45:27 crc kubenswrapper[4881]: I1211 08:45:27.591573 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="338db346-be5c-4382-9404-29d345bba595" containerName="aodh-api" Dec 11 08:45:27 crc kubenswrapper[4881]: I1211 08:45:27.591592 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="cb0fb1c6-fd5d-4334-9584-efb1c5c75d05" containerName="aodh-db-sync" Dec 11 08:45:27 crc kubenswrapper[4881]: I1211 08:45:27.593940 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-0" Dec 11 08:45:27 crc kubenswrapper[4881]: I1211 08:45:27.597029 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-scripts" Dec 11 08:45:27 crc kubenswrapper[4881]: I1211 08:45:27.597258 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-aodh-internal-svc" Dec 11 08:45:27 crc kubenswrapper[4881]: I1211 08:45:27.597636 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"telemetry-autoscaling-dockercfg-gqtm2" Dec 11 08:45:27 crc kubenswrapper[4881]: I1211 08:45:27.597710 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-aodh-public-svc" Dec 11 08:45:27 crc kubenswrapper[4881]: I1211 08:45:27.597565 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-config-data" Dec 11 08:45:27 crc kubenswrapper[4881]: I1211 08:45:27.605104 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-0"] Dec 11 08:45:27 crc kubenswrapper[4881]: I1211 08:45:27.650032 4881 scope.go:117] "RemoveContainer" containerID="51b95edd797ca74ba6373144cec82c5dfe9a83a2ad44219685a75b07beea5471" Dec 11 08:45:27 crc kubenswrapper[4881]: I1211 08:45:27.681005 4881 scope.go:117] "RemoveContainer" containerID="9319640f9dc0eb3ae69dbd3e5ef0eecbac06a70502481fc4c5c4b64aaa45d4a8" Dec 11 08:45:27 crc kubenswrapper[4881]: E1211 08:45:27.681691 4881 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9319640f9dc0eb3ae69dbd3e5ef0eecbac06a70502481fc4c5c4b64aaa45d4a8\": container with ID starting with 9319640f9dc0eb3ae69dbd3e5ef0eecbac06a70502481fc4c5c4b64aaa45d4a8 not found: ID does not exist" containerID="9319640f9dc0eb3ae69dbd3e5ef0eecbac06a70502481fc4c5c4b64aaa45d4a8" Dec 11 08:45:27 crc kubenswrapper[4881]: I1211 08:45:27.681733 4881 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9319640f9dc0eb3ae69dbd3e5ef0eecbac06a70502481fc4c5c4b64aaa45d4a8"} err="failed to get container status \"9319640f9dc0eb3ae69dbd3e5ef0eecbac06a70502481fc4c5c4b64aaa45d4a8\": rpc error: code = NotFound desc = could not find container \"9319640f9dc0eb3ae69dbd3e5ef0eecbac06a70502481fc4c5c4b64aaa45d4a8\": container with ID starting with 9319640f9dc0eb3ae69dbd3e5ef0eecbac06a70502481fc4c5c4b64aaa45d4a8 not found: ID does not exist" Dec 11 08:45:27 crc kubenswrapper[4881]: I1211 08:45:27.681759 4881 scope.go:117] "RemoveContainer" containerID="a6691685fe3ec19d9f53dbfb0791524d93e4a8c82f6e4fee143db74ec34ef4e9" Dec 11 08:45:27 crc kubenswrapper[4881]: E1211 08:45:27.682238 4881 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a6691685fe3ec19d9f53dbfb0791524d93e4a8c82f6e4fee143db74ec34ef4e9\": container with ID starting with a6691685fe3ec19d9f53dbfb0791524d93e4a8c82f6e4fee143db74ec34ef4e9 not found: ID does not exist" containerID="a6691685fe3ec19d9f53dbfb0791524d93e4a8c82f6e4fee143db74ec34ef4e9" Dec 11 08:45:27 crc kubenswrapper[4881]: I1211 08:45:27.682264 4881 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a6691685fe3ec19d9f53dbfb0791524d93e4a8c82f6e4fee143db74ec34ef4e9"} err="failed to get container status \"a6691685fe3ec19d9f53dbfb0791524d93e4a8c82f6e4fee143db74ec34ef4e9\": rpc error: code = NotFound desc = could not find container \"a6691685fe3ec19d9f53dbfb0791524d93e4a8c82f6e4fee143db74ec34ef4e9\": container with ID starting with a6691685fe3ec19d9f53dbfb0791524d93e4a8c82f6e4fee143db74ec34ef4e9 not found: ID does not exist" Dec 11 08:45:27 crc kubenswrapper[4881]: I1211 08:45:27.682279 4881 scope.go:117] "RemoveContainer" containerID="92821f7a096c136ffc7e64eec1c3ac32209131960744bad0dc1260ba16083605" Dec 11 08:45:27 crc kubenswrapper[4881]: E1211 08:45:27.682526 4881 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"92821f7a096c136ffc7e64eec1c3ac32209131960744bad0dc1260ba16083605\": container with ID starting with 92821f7a096c136ffc7e64eec1c3ac32209131960744bad0dc1260ba16083605 not found: ID does not exist" containerID="92821f7a096c136ffc7e64eec1c3ac32209131960744bad0dc1260ba16083605" Dec 11 08:45:27 crc kubenswrapper[4881]: I1211 08:45:27.682549 4881 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"92821f7a096c136ffc7e64eec1c3ac32209131960744bad0dc1260ba16083605"} err="failed to get container status \"92821f7a096c136ffc7e64eec1c3ac32209131960744bad0dc1260ba16083605\": rpc error: code = NotFound desc = could not find container \"92821f7a096c136ffc7e64eec1c3ac32209131960744bad0dc1260ba16083605\": container with ID starting with 92821f7a096c136ffc7e64eec1c3ac32209131960744bad0dc1260ba16083605 not found: ID does not exist" Dec 11 08:45:27 crc kubenswrapper[4881]: I1211 08:45:27.682562 4881 scope.go:117] "RemoveContainer" containerID="51b95edd797ca74ba6373144cec82c5dfe9a83a2ad44219685a75b07beea5471" Dec 11 08:45:27 crc kubenswrapper[4881]: E1211 08:45:27.682831 4881 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"51b95edd797ca74ba6373144cec82c5dfe9a83a2ad44219685a75b07beea5471\": container with ID starting with 51b95edd797ca74ba6373144cec82c5dfe9a83a2ad44219685a75b07beea5471 not found: ID does not exist" containerID="51b95edd797ca74ba6373144cec82c5dfe9a83a2ad44219685a75b07beea5471" Dec 11 08:45:27 crc kubenswrapper[4881]: I1211 08:45:27.682860 4881 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"51b95edd797ca74ba6373144cec82c5dfe9a83a2ad44219685a75b07beea5471"} err="failed to get container status \"51b95edd797ca74ba6373144cec82c5dfe9a83a2ad44219685a75b07beea5471\": rpc error: code = NotFound desc = could not find container \"51b95edd797ca74ba6373144cec82c5dfe9a83a2ad44219685a75b07beea5471\": container with ID starting with 51b95edd797ca74ba6373144cec82c5dfe9a83a2ad44219685a75b07beea5471 not found: ID does not exist" Dec 11 08:45:27 crc kubenswrapper[4881]: I1211 08:45:27.682874 4881 scope.go:117] "RemoveContainer" containerID="9319640f9dc0eb3ae69dbd3e5ef0eecbac06a70502481fc4c5c4b64aaa45d4a8" Dec 11 08:45:27 crc kubenswrapper[4881]: I1211 08:45:27.683386 4881 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9319640f9dc0eb3ae69dbd3e5ef0eecbac06a70502481fc4c5c4b64aaa45d4a8"} err="failed to get container status \"9319640f9dc0eb3ae69dbd3e5ef0eecbac06a70502481fc4c5c4b64aaa45d4a8\": rpc error: code = NotFound desc = could not find container \"9319640f9dc0eb3ae69dbd3e5ef0eecbac06a70502481fc4c5c4b64aaa45d4a8\": container with ID starting with 9319640f9dc0eb3ae69dbd3e5ef0eecbac06a70502481fc4c5c4b64aaa45d4a8 not found: ID does not exist" Dec 11 08:45:27 crc kubenswrapper[4881]: I1211 08:45:27.683411 4881 scope.go:117] "RemoveContainer" containerID="a6691685fe3ec19d9f53dbfb0791524d93e4a8c82f6e4fee143db74ec34ef4e9" Dec 11 08:45:27 crc kubenswrapper[4881]: I1211 08:45:27.683735 4881 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a6691685fe3ec19d9f53dbfb0791524d93e4a8c82f6e4fee143db74ec34ef4e9"} err="failed to get container status \"a6691685fe3ec19d9f53dbfb0791524d93e4a8c82f6e4fee143db74ec34ef4e9\": rpc error: code = NotFound desc = could not find container \"a6691685fe3ec19d9f53dbfb0791524d93e4a8c82f6e4fee143db74ec34ef4e9\": container with ID starting with a6691685fe3ec19d9f53dbfb0791524d93e4a8c82f6e4fee143db74ec34ef4e9 not found: ID does not exist" Dec 11 08:45:27 crc kubenswrapper[4881]: I1211 08:45:27.683790 4881 scope.go:117] "RemoveContainer" containerID="92821f7a096c136ffc7e64eec1c3ac32209131960744bad0dc1260ba16083605" Dec 11 08:45:27 crc kubenswrapper[4881]: I1211 08:45:27.684087 4881 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"92821f7a096c136ffc7e64eec1c3ac32209131960744bad0dc1260ba16083605"} err="failed to get container status \"92821f7a096c136ffc7e64eec1c3ac32209131960744bad0dc1260ba16083605\": rpc error: code = NotFound desc = could not find container \"92821f7a096c136ffc7e64eec1c3ac32209131960744bad0dc1260ba16083605\": container with ID starting with 92821f7a096c136ffc7e64eec1c3ac32209131960744bad0dc1260ba16083605 not found: ID does not exist" Dec 11 08:45:27 crc kubenswrapper[4881]: I1211 08:45:27.684120 4881 scope.go:117] "RemoveContainer" containerID="51b95edd797ca74ba6373144cec82c5dfe9a83a2ad44219685a75b07beea5471" Dec 11 08:45:27 crc kubenswrapper[4881]: I1211 08:45:27.684426 4881 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"51b95edd797ca74ba6373144cec82c5dfe9a83a2ad44219685a75b07beea5471"} err="failed to get container status \"51b95edd797ca74ba6373144cec82c5dfe9a83a2ad44219685a75b07beea5471\": rpc error: code = NotFound desc = could not find container \"51b95edd797ca74ba6373144cec82c5dfe9a83a2ad44219685a75b07beea5471\": container with ID starting with 51b95edd797ca74ba6373144cec82c5dfe9a83a2ad44219685a75b07beea5471 not found: ID does not exist" Dec 11 08:45:27 crc kubenswrapper[4881]: I1211 08:45:27.710533 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e25df4be-6d20-469b-999e-ae4ffe346be8-config-data\") pod \"aodh-0\" (UID: \"e25df4be-6d20-469b-999e-ae4ffe346be8\") " pod="openstack/aodh-0" Dec 11 08:45:27 crc kubenswrapper[4881]: I1211 08:45:27.710590 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xjcrv\" (UniqueName: \"kubernetes.io/projected/e25df4be-6d20-469b-999e-ae4ffe346be8-kube-api-access-xjcrv\") pod \"aodh-0\" (UID: \"e25df4be-6d20-469b-999e-ae4ffe346be8\") " pod="openstack/aodh-0" Dec 11 08:45:27 crc kubenswrapper[4881]: I1211 08:45:27.710749 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e25df4be-6d20-469b-999e-ae4ffe346be8-combined-ca-bundle\") pod \"aodh-0\" (UID: \"e25df4be-6d20-469b-999e-ae4ffe346be8\") " pod="openstack/aodh-0" Dec 11 08:45:27 crc kubenswrapper[4881]: I1211 08:45:27.710806 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/e25df4be-6d20-469b-999e-ae4ffe346be8-public-tls-certs\") pod \"aodh-0\" (UID: \"e25df4be-6d20-469b-999e-ae4ffe346be8\") " pod="openstack/aodh-0" Dec 11 08:45:27 crc kubenswrapper[4881]: I1211 08:45:27.710834 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e25df4be-6d20-469b-999e-ae4ffe346be8-scripts\") pod \"aodh-0\" (UID: \"e25df4be-6d20-469b-999e-ae4ffe346be8\") " pod="openstack/aodh-0" Dec 11 08:45:27 crc kubenswrapper[4881]: I1211 08:45:27.710984 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/e25df4be-6d20-469b-999e-ae4ffe346be8-internal-tls-certs\") pod \"aodh-0\" (UID: \"e25df4be-6d20-469b-999e-ae4ffe346be8\") " pod="openstack/aodh-0" Dec 11 08:45:27 crc kubenswrapper[4881]: I1211 08:45:27.814083 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e25df4be-6d20-469b-999e-ae4ffe346be8-config-data\") pod \"aodh-0\" (UID: \"e25df4be-6d20-469b-999e-ae4ffe346be8\") " pod="openstack/aodh-0" Dec 11 08:45:27 crc kubenswrapper[4881]: I1211 08:45:27.814354 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xjcrv\" (UniqueName: \"kubernetes.io/projected/e25df4be-6d20-469b-999e-ae4ffe346be8-kube-api-access-xjcrv\") pod \"aodh-0\" (UID: \"e25df4be-6d20-469b-999e-ae4ffe346be8\") " pod="openstack/aodh-0" Dec 11 08:45:27 crc kubenswrapper[4881]: I1211 08:45:27.814472 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e25df4be-6d20-469b-999e-ae4ffe346be8-combined-ca-bundle\") pod \"aodh-0\" (UID: \"e25df4be-6d20-469b-999e-ae4ffe346be8\") " pod="openstack/aodh-0" Dec 11 08:45:27 crc kubenswrapper[4881]: I1211 08:45:27.814518 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/e25df4be-6d20-469b-999e-ae4ffe346be8-public-tls-certs\") pod \"aodh-0\" (UID: \"e25df4be-6d20-469b-999e-ae4ffe346be8\") " pod="openstack/aodh-0" Dec 11 08:45:27 crc kubenswrapper[4881]: I1211 08:45:27.814552 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e25df4be-6d20-469b-999e-ae4ffe346be8-scripts\") pod \"aodh-0\" (UID: \"e25df4be-6d20-469b-999e-ae4ffe346be8\") " pod="openstack/aodh-0" Dec 11 08:45:27 crc kubenswrapper[4881]: I1211 08:45:27.814593 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/e25df4be-6d20-469b-999e-ae4ffe346be8-internal-tls-certs\") pod \"aodh-0\" (UID: \"e25df4be-6d20-469b-999e-ae4ffe346be8\") " pod="openstack/aodh-0" Dec 11 08:45:27 crc kubenswrapper[4881]: I1211 08:45:27.818439 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/e25df4be-6d20-469b-999e-ae4ffe346be8-internal-tls-certs\") pod \"aodh-0\" (UID: \"e25df4be-6d20-469b-999e-ae4ffe346be8\") " pod="openstack/aodh-0" Dec 11 08:45:27 crc kubenswrapper[4881]: I1211 08:45:27.818954 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e25df4be-6d20-469b-999e-ae4ffe346be8-combined-ca-bundle\") pod \"aodh-0\" (UID: \"e25df4be-6d20-469b-999e-ae4ffe346be8\") " pod="openstack/aodh-0" Dec 11 08:45:27 crc kubenswrapper[4881]: I1211 08:45:27.820966 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/e25df4be-6d20-469b-999e-ae4ffe346be8-public-tls-certs\") pod \"aodh-0\" (UID: \"e25df4be-6d20-469b-999e-ae4ffe346be8\") " pod="openstack/aodh-0" Dec 11 08:45:27 crc kubenswrapper[4881]: I1211 08:45:27.821207 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e25df4be-6d20-469b-999e-ae4ffe346be8-config-data\") pod \"aodh-0\" (UID: \"e25df4be-6d20-469b-999e-ae4ffe346be8\") " pod="openstack/aodh-0" Dec 11 08:45:27 crc kubenswrapper[4881]: I1211 08:45:27.821917 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e25df4be-6d20-469b-999e-ae4ffe346be8-scripts\") pod \"aodh-0\" (UID: \"e25df4be-6d20-469b-999e-ae4ffe346be8\") " pod="openstack/aodh-0" Dec 11 08:45:27 crc kubenswrapper[4881]: I1211 08:45:27.845175 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xjcrv\" (UniqueName: \"kubernetes.io/projected/e25df4be-6d20-469b-999e-ae4ffe346be8-kube-api-access-xjcrv\") pod \"aodh-0\" (UID: \"e25df4be-6d20-469b-999e-ae4ffe346be8\") " pod="openstack/aodh-0" Dec 11 08:45:27 crc kubenswrapper[4881]: I1211 08:45:27.953291 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-0" Dec 11 08:45:28 crc kubenswrapper[4881]: I1211 08:45:28.426302 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-0"] Dec 11 08:45:28 crc kubenswrapper[4881]: I1211 08:45:28.470591 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"e25df4be-6d20-469b-999e-ae4ffe346be8","Type":"ContainerStarted","Data":"fae5adfab3edcd342fbc45318d44d2bab161b319ef075ebefbff4a2aaccd7b8a"} Dec 11 08:45:29 crc kubenswrapper[4881]: I1211 08:45:29.023403 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="338db346-be5c-4382-9404-29d345bba595" path="/var/lib/kubelet/pods/338db346-be5c-4382-9404-29d345bba595/volumes" Dec 11 08:45:29 crc kubenswrapper[4881]: I1211 08:45:29.484040 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"e25df4be-6d20-469b-999e-ae4ffe346be8","Type":"ContainerStarted","Data":"8c2d8c55a14eea9f3b7408495702ccffdd1ff1d1a73f0bdf9f4f241399a38ba8"} Dec 11 08:45:31 crc kubenswrapper[4881]: I1211 08:45:31.506115 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"e25df4be-6d20-469b-999e-ae4ffe346be8","Type":"ContainerStarted","Data":"d8facfa9a52f36596608de213f18181ee3b73be4106aca07885c2bb40cc47dfc"} Dec 11 08:45:32 crc kubenswrapper[4881]: I1211 08:45:32.520762 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"e25df4be-6d20-469b-999e-ae4ffe346be8","Type":"ContainerStarted","Data":"0c00094365fc2be8f5ae3c9b1b992a09a700731a7f75e9a31af39e42b61654a6"} Dec 11 08:45:33 crc kubenswrapper[4881]: I1211 08:45:33.534188 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"e25df4be-6d20-469b-999e-ae4ffe346be8","Type":"ContainerStarted","Data":"5851846eb34730dd58d4138cc9cf71ce87db53644737dcd6d5f93d2ab2a471c0"} Dec 11 08:45:33 crc kubenswrapper[4881]: I1211 08:45:33.560037 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/aodh-0" podStartSLOduration=1.9346890220000001 podStartE2EDuration="6.560019315s" podCreationTimestamp="2025-12-11 08:45:27 +0000 UTC" firstStartedPulling="2025-12-11 08:45:28.434751628 +0000 UTC m=+1776.812120325" lastFinishedPulling="2025-12-11 08:45:33.060081921 +0000 UTC m=+1781.437450618" observedRunningTime="2025-12-11 08:45:33.55731683 +0000 UTC m=+1781.934685527" watchObservedRunningTime="2025-12-11 08:45:33.560019315 +0000 UTC m=+1781.937388012" Dec 11 08:45:37 crc kubenswrapper[4881]: I1211 08:45:37.006024 4881 scope.go:117] "RemoveContainer" containerID="6d4e8d739554b5cd95df4279914b6fd3590e9c30b07a9063a641aef8fdb20ae9" Dec 11 08:45:37 crc kubenswrapper[4881]: E1211 08:45:37.007638 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 08:45:50 crc kubenswrapper[4881]: I1211 08:45:50.006644 4881 scope.go:117] "RemoveContainer" containerID="6d4e8d739554b5cd95df4279914b6fd3590e9c30b07a9063a641aef8fdb20ae9" Dec 11 08:45:50 crc kubenswrapper[4881]: E1211 08:45:50.008358 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 08:46:03 crc kubenswrapper[4881]: I1211 08:46:03.016233 4881 scope.go:117] "RemoveContainer" containerID="6d4e8d739554b5cd95df4279914b6fd3590e9c30b07a9063a641aef8fdb20ae9" Dec 11 08:46:03 crc kubenswrapper[4881]: E1211 08:46:03.017125 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 08:46:16 crc kubenswrapper[4881]: I1211 08:46:16.006316 4881 scope.go:117] "RemoveContainer" containerID="6d4e8d739554b5cd95df4279914b6fd3590e9c30b07a9063a641aef8fdb20ae9" Dec 11 08:46:16 crc kubenswrapper[4881]: E1211 08:46:16.007045 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 08:46:29 crc kubenswrapper[4881]: I1211 08:46:29.006319 4881 scope.go:117] "RemoveContainer" containerID="6d4e8d739554b5cd95df4279914b6fd3590e9c30b07a9063a641aef8fdb20ae9" Dec 11 08:46:29 crc kubenswrapper[4881]: E1211 08:46:29.007155 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 08:46:30 crc kubenswrapper[4881]: I1211 08:46:30.050461 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-db-create-k7v7r"] Dec 11 08:46:30 crc kubenswrapper[4881]: I1211 08:46:30.066391 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-db-create-86k8z"] Dec 11 08:46:30 crc kubenswrapper[4881]: I1211 08:46:30.076556 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-db-create-9pp5h"] Dec 11 08:46:30 crc kubenswrapper[4881]: I1211 08:46:30.087848 4881 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-db-create-9pp5h"] Dec 11 08:46:30 crc kubenswrapper[4881]: I1211 08:46:30.097966 4881 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-db-create-86k8z"] Dec 11 08:46:30 crc kubenswrapper[4881]: I1211 08:46:30.108164 4881 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-db-create-k7v7r"] Dec 11 08:46:31 crc kubenswrapper[4881]: I1211 08:46:31.023494 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="574b7634-a0fa-484a-b0c5-4f1a6541d3a9" path="/var/lib/kubelet/pods/574b7634-a0fa-484a-b0c5-4f1a6541d3a9/volumes" Dec 11 08:46:31 crc kubenswrapper[4881]: I1211 08:46:31.025134 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a99bf32a-aa0f-4726-8fa2-9f9d8c503cf1" path="/var/lib/kubelet/pods/a99bf32a-aa0f-4726-8fa2-9f9d8c503cf1/volumes" Dec 11 08:46:31 crc kubenswrapper[4881]: I1211 08:46:31.025763 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cb33f8d2-c5b8-485b-8be2-505ba688ddc0" path="/var/lib/kubelet/pods/cb33f8d2-c5b8-485b-8be2-505ba688ddc0/volumes" Dec 11 08:46:31 crc kubenswrapper[4881]: I1211 08:46:31.036316 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mysqld-exporter-openstack-db-create-grwv8"] Dec 11 08:46:31 crc kubenswrapper[4881]: I1211 08:46:31.049025 4881 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/mysqld-exporter-openstack-db-create-grwv8"] Dec 11 08:46:33 crc kubenswrapper[4881]: I1211 08:46:33.021287 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="05cca6b4-3f02-4baf-8b58-3b7de77c5ec3" path="/var/lib/kubelet/pods/05cca6b4-3f02-4baf-8b58-3b7de77c5ec3/volumes" Dec 11 08:46:40 crc kubenswrapper[4881]: I1211 08:46:40.006698 4881 scope.go:117] "RemoveContainer" containerID="6d4e8d739554b5cd95df4279914b6fd3590e9c30b07a9063a641aef8fdb20ae9" Dec 11 08:46:40 crc kubenswrapper[4881]: E1211 08:46:40.007709 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 08:46:41 crc kubenswrapper[4881]: I1211 08:46:41.043290 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-ee83-account-create-7zk6g"] Dec 11 08:46:41 crc kubenswrapper[4881]: I1211 08:46:41.062279 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-9bb0-account-create-8rxjm"] Dec 11 08:46:41 crc kubenswrapper[4881]: I1211 08:46:41.073034 4881 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-9bb0-account-create-8rxjm"] Dec 11 08:46:41 crc kubenswrapper[4881]: I1211 08:46:41.085063 4881 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-ee83-account-create-7zk6g"] Dec 11 08:46:43 crc kubenswrapper[4881]: I1211 08:46:43.022999 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3718a33b-862b-4600-82d4-b54c568ae5a4" path="/var/lib/kubelet/pods/3718a33b-862b-4600-82d4-b54c568ae5a4/volumes" Dec 11 08:46:43 crc kubenswrapper[4881]: I1211 08:46:43.023899 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="73794710-ff03-415b-b6f7-48e86190e910" path="/var/lib/kubelet/pods/73794710-ff03-415b-b6f7-48e86190e910/volumes" Dec 11 08:46:44 crc kubenswrapper[4881]: I1211 08:46:44.067651 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mysqld-exporter-b509-account-create-tcvfc"] Dec 11 08:46:44 crc kubenswrapper[4881]: I1211 08:46:44.084811 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-0123-account-create-nxblk"] Dec 11 08:46:44 crc kubenswrapper[4881]: I1211 08:46:44.103012 4881 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/mysqld-exporter-b509-account-create-tcvfc"] Dec 11 08:46:44 crc kubenswrapper[4881]: I1211 08:46:44.116313 4881 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-0123-account-create-nxblk"] Dec 11 08:46:45 crc kubenswrapper[4881]: I1211 08:46:45.019605 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c0f8242c-de63-4ad0-9164-d941d7a1d67c" path="/var/lib/kubelet/pods/c0f8242c-de63-4ad0-9164-d941d7a1d67c/volumes" Dec 11 08:46:45 crc kubenswrapper[4881]: I1211 08:46:45.021064 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c528a5b2-04d9-4ec0-83a6-8fc7776fdcc3" path="/var/lib/kubelet/pods/c528a5b2-04d9-4ec0-83a6-8fc7776fdcc3/volumes" Dec 11 08:46:51 crc kubenswrapper[4881]: I1211 08:46:51.006784 4881 scope.go:117] "RemoveContainer" containerID="6d4e8d739554b5cd95df4279914b6fd3590e9c30b07a9063a641aef8fdb20ae9" Dec 11 08:46:51 crc kubenswrapper[4881]: E1211 08:46:51.007510 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 08:46:51 crc kubenswrapper[4881]: I1211 08:46:51.056463 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-db-create-8z5np"] Dec 11 08:46:51 crc kubenswrapper[4881]: I1211 08:46:51.068367 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-db-create-ql9dq"] Dec 11 08:46:51 crc kubenswrapper[4881]: I1211 08:46:51.079656 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-db-create-xznx2"] Dec 11 08:46:51 crc kubenswrapper[4881]: I1211 08:46:51.093456 4881 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-db-create-8z5np"] Dec 11 08:46:51 crc kubenswrapper[4881]: I1211 08:46:51.103955 4881 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-db-create-xznx2"] Dec 11 08:46:51 crc kubenswrapper[4881]: I1211 08:46:51.115390 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-db-create-vwbg2"] Dec 11 08:46:51 crc kubenswrapper[4881]: I1211 08:46:51.129778 4881 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-db-create-ql9dq"] Dec 11 08:46:51 crc kubenswrapper[4881]: I1211 08:46:51.144239 4881 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-db-create-vwbg2"] Dec 11 08:46:53 crc kubenswrapper[4881]: I1211 08:46:53.025491 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2a84e1c9-f761-496c-aae3-6ec403f26898" path="/var/lib/kubelet/pods/2a84e1c9-f761-496c-aae3-6ec403f26898/volumes" Dec 11 08:46:53 crc kubenswrapper[4881]: I1211 08:46:53.026649 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="30f00215-2531-44c8-80d6-7f3be540e71b" path="/var/lib/kubelet/pods/30f00215-2531-44c8-80d6-7f3be540e71b/volumes" Dec 11 08:46:53 crc kubenswrapper[4881]: I1211 08:46:53.027382 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5e6b6074-f624-4016-beb9-a926af51e986" path="/var/lib/kubelet/pods/5e6b6074-f624-4016-beb9-a926af51e986/volumes" Dec 11 08:46:53 crc kubenswrapper[4881]: I1211 08:46:53.028137 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f3b36526-9590-49d0-9b9a-0b86736538bb" path="/var/lib/kubelet/pods/f3b36526-9590-49d0-9b9a-0b86736538bb/volumes" Dec 11 08:46:57 crc kubenswrapper[4881]: I1211 08:46:57.041845 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mysqld-exporter-openstack-cell1-db-create-8mgxc"] Dec 11 08:46:57 crc kubenswrapper[4881]: I1211 08:46:57.060810 4881 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/mysqld-exporter-openstack-cell1-db-create-8mgxc"] Dec 11 08:46:59 crc kubenswrapper[4881]: I1211 08:46:59.020082 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cae2b44c-241b-44ec-b9c5-1cbdddf26008" path="/var/lib/kubelet/pods/cae2b44c-241b-44ec-b9c5-1cbdddf26008/volumes" Dec 11 08:47:02 crc kubenswrapper[4881]: I1211 08:47:02.005548 4881 scope.go:117] "RemoveContainer" containerID="6d4e8d739554b5cd95df4279914b6fd3590e9c30b07a9063a641aef8fdb20ae9" Dec 11 08:47:02 crc kubenswrapper[4881]: E1211 08:47:02.007786 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 08:47:06 crc kubenswrapper[4881]: I1211 08:47:06.707742 4881 scope.go:117] "RemoveContainer" containerID="134f45a5d8d70f6de6ef94067aa29106fdd31590a99d05a5028dabde451fac93" Dec 11 08:47:06 crc kubenswrapper[4881]: I1211 08:47:06.736096 4881 scope.go:117] "RemoveContainer" containerID="16efdf8e4070fcffa5289512b3f3b3d42643ad9087c9ee2d8117b428fe2aa04f" Dec 11 08:47:06 crc kubenswrapper[4881]: I1211 08:47:06.814533 4881 scope.go:117] "RemoveContainer" containerID="a65c0f2809c22b9782bc496952fd63040f08164a35d37c3c1a3f88b56392d6b0" Dec 11 08:47:06 crc kubenswrapper[4881]: I1211 08:47:06.838109 4881 scope.go:117] "RemoveContainer" containerID="2a3a1064d42f444ae15ac25f5dbf69b384a98da7dd40d5787d27be2c49036beb" Dec 11 08:47:06 crc kubenswrapper[4881]: I1211 08:47:06.896124 4881 scope.go:117] "RemoveContainer" containerID="dbe9533fef09f890147f548b4721e46534fc44694f09bb4ac1991822c0e746e9" Dec 11 08:47:06 crc kubenswrapper[4881]: I1211 08:47:06.930914 4881 scope.go:117] "RemoveContainer" containerID="ad72b995d12f058193d8a2117a39ff0cde2f61b432a29b220d444b2bebeeec8d" Dec 11 08:47:06 crc kubenswrapper[4881]: I1211 08:47:06.978798 4881 scope.go:117] "RemoveContainer" containerID="b5ff8cc8738894863ae31d50bce383dc0ccfab6019c4feec2a216b9dad415a4c" Dec 11 08:47:07 crc kubenswrapper[4881]: I1211 08:47:07.034990 4881 scope.go:117] "RemoveContainer" containerID="eee8c8fc77d74034183d1d910210913c2c8ccb0520b46f7e04968231b146b925" Dec 11 08:47:07 crc kubenswrapper[4881]: I1211 08:47:07.057803 4881 scope.go:117] "RemoveContainer" containerID="df00438a9f6b8c3f78ff888c7863b36178ddaeee4c61b4bcecf16d5bf631cb15" Dec 11 08:47:07 crc kubenswrapper[4881]: I1211 08:47:07.124556 4881 scope.go:117] "RemoveContainer" containerID="85965898509bda4c29184c354981309c5fb29a2ec7fe7fc941f70a6101d2fb54" Dec 11 08:47:07 crc kubenswrapper[4881]: I1211 08:47:07.147611 4881 scope.go:117] "RemoveContainer" containerID="ff5228f418e12b75f5f60d84275c714f444f0a72f409069b532d1e643bfd3198" Dec 11 08:47:07 crc kubenswrapper[4881]: I1211 08:47:07.166136 4881 scope.go:117] "RemoveContainer" containerID="526b94cabc1ab5f42375c717f3f78a7d65fcb4c9648f3d8d19d27530ff115210" Dec 11 08:47:07 crc kubenswrapper[4881]: I1211 08:47:07.184310 4881 scope.go:117] "RemoveContainer" containerID="182269b56b596c919a0f1fabff20d956dfda7c8f13eb2ead3394c9420f9c24f4" Dec 11 08:47:07 crc kubenswrapper[4881]: I1211 08:47:07.205218 4881 scope.go:117] "RemoveContainer" containerID="dce9f57d92ae5197a26df3a8ef60dbb7bcef7b674c984fbf3c2d26b3a31e6a2f" Dec 11 08:47:07 crc kubenswrapper[4881]: I1211 08:47:07.226715 4881 scope.go:117] "RemoveContainer" containerID="481a4016e07783be6ea1d0c3f9de05be655d98b6eccea5c06b271668fd81476f" Dec 11 08:47:07 crc kubenswrapper[4881]: I1211 08:47:07.249610 4881 scope.go:117] "RemoveContainer" containerID="a6b745bf3fd25c186456f376febb3fbfa8d631d8c3fd90b862f068dce33b7e57" Dec 11 08:47:07 crc kubenswrapper[4881]: I1211 08:47:07.271120 4881 scope.go:117] "RemoveContainer" containerID="aff65a6ce6d13d872dc7208fe947c7953af82bd5d3ee0e0bf4ee364cd3ee0f0b" Dec 11 08:47:11 crc kubenswrapper[4881]: I1211 08:47:11.032821 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-db-sync-s75l2"] Dec 11 08:47:11 crc kubenswrapper[4881]: I1211 08:47:11.045996 4881 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-db-sync-s75l2"] Dec 11 08:47:13 crc kubenswrapper[4881]: I1211 08:47:13.022143 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3f4a6ba9-97eb-425b-b2c1-54245c81a6df" path="/var/lib/kubelet/pods/3f4a6ba9-97eb-425b-b2c1-54245c81a6df/volumes" Dec 11 08:47:15 crc kubenswrapper[4881]: I1211 08:47:15.036527 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mysqld-exporter-dd28-account-create-v5xvr"] Dec 11 08:47:15 crc kubenswrapper[4881]: I1211 08:47:15.052594 4881 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/mysqld-exporter-dd28-account-create-v5xvr"] Dec 11 08:47:15 crc kubenswrapper[4881]: I1211 08:47:15.065899 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-1cc4-account-create-mq9q7"] Dec 11 08:47:15 crc kubenswrapper[4881]: I1211 08:47:15.084294 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-95cf-account-create-9q62w"] Dec 11 08:47:15 crc kubenswrapper[4881]: I1211 08:47:15.095050 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-c7b6-account-create-sbjdl"] Dec 11 08:47:15 crc kubenswrapper[4881]: I1211 08:47:15.106116 4881 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-95cf-account-create-9q62w"] Dec 11 08:47:15 crc kubenswrapper[4881]: I1211 08:47:15.116182 4881 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-1cc4-account-create-mq9q7"] Dec 11 08:47:15 crc kubenswrapper[4881]: I1211 08:47:15.126515 4881 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-c7b6-account-create-sbjdl"] Dec 11 08:47:15 crc kubenswrapper[4881]: I1211 08:47:15.137502 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-6956-account-create-xhfnq"] Dec 11 08:47:15 crc kubenswrapper[4881]: I1211 08:47:15.147857 4881 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-6956-account-create-xhfnq"] Dec 11 08:47:17 crc kubenswrapper[4881]: I1211 08:47:17.005153 4881 scope.go:117] "RemoveContainer" containerID="6d4e8d739554b5cd95df4279914b6fd3590e9c30b07a9063a641aef8fdb20ae9" Dec 11 08:47:17 crc kubenswrapper[4881]: E1211 08:47:17.006025 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 08:47:17 crc kubenswrapper[4881]: I1211 08:47:17.016548 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0f3dbda2-e652-483b-9ce4-d3bf05516f7b" path="/var/lib/kubelet/pods/0f3dbda2-e652-483b-9ce4-d3bf05516f7b/volumes" Dec 11 08:47:17 crc kubenswrapper[4881]: I1211 08:47:17.017315 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="11fc6ffa-3c79-4409-b646-1e658f40120e" path="/var/lib/kubelet/pods/11fc6ffa-3c79-4409-b646-1e658f40120e/volumes" Dec 11 08:47:17 crc kubenswrapper[4881]: I1211 08:47:17.018135 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2fbc6a44-ebbf-4a77-bad1-53d78b09d292" path="/var/lib/kubelet/pods/2fbc6a44-ebbf-4a77-bad1-53d78b09d292/volumes" Dec 11 08:47:17 crc kubenswrapper[4881]: I1211 08:47:17.018957 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="33f9ffca-ca21-41c8-b52e-5a5c7c786f4d" path="/var/lib/kubelet/pods/33f9ffca-ca21-41c8-b52e-5a5c7c786f4d/volumes" Dec 11 08:47:17 crc kubenswrapper[4881]: I1211 08:47:17.020284 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="be7726a5-588c-4057-823d-6a6c51348f2c" path="/var/lib/kubelet/pods/be7726a5-588c-4057-823d-6a6c51348f2c/volumes" Dec 11 08:47:30 crc kubenswrapper[4881]: I1211 08:47:30.063456 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-db-sync-lx525"] Dec 11 08:47:30 crc kubenswrapper[4881]: I1211 08:47:30.079225 4881 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-db-sync-lx525"] Dec 11 08:47:31 crc kubenswrapper[4881]: I1211 08:47:31.005924 4881 scope.go:117] "RemoveContainer" containerID="6d4e8d739554b5cd95df4279914b6fd3590e9c30b07a9063a641aef8fdb20ae9" Dec 11 08:47:31 crc kubenswrapper[4881]: E1211 08:47:31.006574 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 08:47:31 crc kubenswrapper[4881]: I1211 08:47:31.019580 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e87c3b7f-ebb4-4f97-bf3b-df5869f3c1ec" path="/var/lib/kubelet/pods/e87c3b7f-ebb4-4f97-bf3b-df5869f3c1ec/volumes" Dec 11 08:47:40 crc kubenswrapper[4881]: I1211 08:47:40.030686 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-db-sync-cb9l4"] Dec 11 08:47:40 crc kubenswrapper[4881]: I1211 08:47:40.041597 4881 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-db-sync-cb9l4"] Dec 11 08:47:41 crc kubenswrapper[4881]: I1211 08:47:41.024045 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4406be5c-0e4c-40ff-ac0f-4da87b36b145" path="/var/lib/kubelet/pods/4406be5c-0e4c-40ff-ac0f-4da87b36b145/volumes" Dec 11 08:47:43 crc kubenswrapper[4881]: I1211 08:47:43.012023 4881 scope.go:117] "RemoveContainer" containerID="6d4e8d739554b5cd95df4279914b6fd3590e9c30b07a9063a641aef8fdb20ae9" Dec 11 08:47:43 crc kubenswrapper[4881]: E1211 08:47:43.012560 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 08:47:52 crc kubenswrapper[4881]: I1211 08:47:52.042964 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-bootstrap-p9qkf"] Dec 11 08:47:52 crc kubenswrapper[4881]: I1211 08:47:52.061352 4881 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-bootstrap-p9qkf"] Dec 11 08:47:53 crc kubenswrapper[4881]: I1211 08:47:53.026895 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5e8aab09-6d6f-4c67-965c-38b8dd3bb7ce" path="/var/lib/kubelet/pods/5e8aab09-6d6f-4c67-965c-38b8dd3bb7ce/volumes" Dec 11 08:47:54 crc kubenswrapper[4881]: I1211 08:47:54.032983 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-db-sync-rhqlf"] Dec 11 08:47:54 crc kubenswrapper[4881]: I1211 08:47:54.044408 4881 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-db-sync-rhqlf"] Dec 11 08:47:55 crc kubenswrapper[4881]: I1211 08:47:55.019548 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c45356cd-f35d-41fa-98d3-6697e7a8100a" path="/var/lib/kubelet/pods/c45356cd-f35d-41fa-98d3-6697e7a8100a/volumes" Dec 11 08:47:56 crc kubenswrapper[4881]: I1211 08:47:56.005602 4881 scope.go:117] "RemoveContainer" containerID="6d4e8d739554b5cd95df4279914b6fd3590e9c30b07a9063a641aef8fdb20ae9" Dec 11 08:47:56 crc kubenswrapper[4881]: E1211 08:47:56.006893 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 08:48:07 crc kubenswrapper[4881]: I1211 08:48:07.033525 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-db-sync-vmjdf"] Dec 11 08:48:07 crc kubenswrapper[4881]: I1211 08:48:07.046364 4881 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-db-sync-vmjdf"] Dec 11 08:48:07 crc kubenswrapper[4881]: I1211 08:48:07.591676 4881 scope.go:117] "RemoveContainer" containerID="52b2419c29189f05c8ec1d616c9af2d71612b70012997c4e04970145d2b46d7d" Dec 11 08:48:07 crc kubenswrapper[4881]: I1211 08:48:07.626620 4881 scope.go:117] "RemoveContainer" containerID="4e55705f98615e6841e69340d70998afd92096e009f293d65b120d6e3bfdf3f3" Dec 11 08:48:07 crc kubenswrapper[4881]: I1211 08:48:07.660507 4881 scope.go:117] "RemoveContainer" containerID="d01e62e48f86201056e5b28c05d098fc51301f3c281251be1c7d7de1e4c7cdd6" Dec 11 08:48:07 crc kubenswrapper[4881]: I1211 08:48:07.725864 4881 scope.go:117] "RemoveContainer" containerID="1748598d01f3fc7123a05fdfb8c58dbc2731be3bb0dc3a1dff09fd1c0bf642db" Dec 11 08:48:07 crc kubenswrapper[4881]: I1211 08:48:07.764763 4881 scope.go:117] "RemoveContainer" containerID="12960751b4f82cf137ad111c7ced056d1e795a77f7be2976a44523a9c048a25c" Dec 11 08:48:07 crc kubenswrapper[4881]: I1211 08:48:07.813223 4881 scope.go:117] "RemoveContainer" containerID="5dff56a1b1956c2b7a9b0e6d40e74ee9c4053f7af8fb5ec838800e0d5bbf7456" Dec 11 08:48:07 crc kubenswrapper[4881]: I1211 08:48:07.851097 4881 scope.go:117] "RemoveContainer" containerID="f75e8a784092df0565ac791cab535d3d1d098203dcd0d357752cff3750eabb5d" Dec 11 08:48:07 crc kubenswrapper[4881]: I1211 08:48:07.913292 4881 scope.go:117] "RemoveContainer" containerID="25449211d300d61d95cde25b949606eb54f5698491343d8ec02f53aadc012933" Dec 11 08:48:07 crc kubenswrapper[4881]: I1211 08:48:07.973595 4881 scope.go:117] "RemoveContainer" containerID="3845b4845598a018e7d54d3a688631756bdb219853a748ec1617f3155be66256" Dec 11 08:48:08 crc kubenswrapper[4881]: I1211 08:48:08.000775 4881 scope.go:117] "RemoveContainer" containerID="1c0b7f76ecb7ef9f68ee6fa630ef01d8245a1bae5c7314a1f03f7d71cb50e613" Dec 11 08:48:08 crc kubenswrapper[4881]: I1211 08:48:08.053656 4881 scope.go:117] "RemoveContainer" containerID="8a6c34b4eb13c9b973c44a7403af19f6140199990c444843f4f0330be7b4fc30" Dec 11 08:48:08 crc kubenswrapper[4881]: I1211 08:48:08.087701 4881 scope.go:117] "RemoveContainer" containerID="f2fda50fe0522e806ea7b78ac99013d0af1df7ba3435818f78881969a91abf57" Dec 11 08:48:08 crc kubenswrapper[4881]: I1211 08:48:08.119572 4881 scope.go:117] "RemoveContainer" containerID="d7f250f05ad02b6d4c03f2f429d3e2ae61a035770d5d337eef1e949528c52c73" Dec 11 08:48:08 crc kubenswrapper[4881]: I1211 08:48:08.141795 4881 scope.go:117] "RemoveContainer" containerID="649bf814d8589ce30cf26c1a32e3bd9df9a66a3076ea58aa465831dfa51b1cc2" Dec 11 08:48:08 crc kubenswrapper[4881]: I1211 08:48:08.164651 4881 scope.go:117] "RemoveContainer" containerID="16b752fc75edd3f6fad9ee6e5c2ea00e27a43acb7a9a7a57685c4e2f9cba1ad1" Dec 11 08:48:08 crc kubenswrapper[4881]: I1211 08:48:08.194824 4881 scope.go:117] "RemoveContainer" containerID="ea4a69f217c879e0f90e6d0019162ddd7630229a61961e2190e17070080738fa" Dec 11 08:48:08 crc kubenswrapper[4881]: I1211 08:48:08.215958 4881 scope.go:117] "RemoveContainer" containerID="848fe4d3938e93638f3177a44497a800500651736f73ccaf195458f2ef728205" Dec 11 08:48:09 crc kubenswrapper[4881]: I1211 08:48:09.021994 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="422faa6a-f2ed-4015-87cd-7878bac246e4" path="/var/lib/kubelet/pods/422faa6a-f2ed-4015-87cd-7878bac246e4/volumes" Dec 11 08:48:10 crc kubenswrapper[4881]: I1211 08:48:10.005093 4881 scope.go:117] "RemoveContainer" containerID="6d4e8d739554b5cd95df4279914b6fd3590e9c30b07a9063a641aef8fdb20ae9" Dec 11 08:48:10 crc kubenswrapper[4881]: E1211 08:48:10.005772 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 08:48:23 crc kubenswrapper[4881]: I1211 08:48:23.014800 4881 scope.go:117] "RemoveContainer" containerID="6d4e8d739554b5cd95df4279914b6fd3590e9c30b07a9063a641aef8fdb20ae9" Dec 11 08:48:23 crc kubenswrapper[4881]: E1211 08:48:23.015650 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 08:48:29 crc kubenswrapper[4881]: I1211 08:48:29.051298 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-db-sync-bcjrt"] Dec 11 08:48:29 crc kubenswrapper[4881]: I1211 08:48:29.062377 4881 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-db-sync-bcjrt"] Dec 11 08:48:31 crc kubenswrapper[4881]: I1211 08:48:31.018901 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cc1ec075-9e84-4cfd-9f5a-b29d5af0d610" path="/var/lib/kubelet/pods/cc1ec075-9e84-4cfd-9f5a-b29d5af0d610/volumes" Dec 11 08:48:34 crc kubenswrapper[4881]: I1211 08:48:34.005469 4881 scope.go:117] "RemoveContainer" containerID="6d4e8d739554b5cd95df4279914b6fd3590e9c30b07a9063a641aef8fdb20ae9" Dec 11 08:48:34 crc kubenswrapper[4881]: I1211 08:48:34.779221 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" event={"ID":"56d69133-af36-4cbd-af7d-3a58cc4dd8ca","Type":"ContainerStarted","Data":"73cf77c8e58dfead623c00357f3020e90cfa5d92429139db8dcd67259d7f3aee"} Dec 11 08:48:40 crc kubenswrapper[4881]: I1211 08:48:40.866674 4881 generic.go:334] "Generic (PLEG): container finished" podID="e741b94a-ed71-4819-ba06-943aa25aaaf8" containerID="db530b92405f9baf5f6cf5e7132a65bf418aa76ba1b57d73abb4c67e1dc385c6" exitCode=0 Dec 11 08:48:40 crc kubenswrapper[4881]: I1211 08:48:40.867493 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-4n2kr" event={"ID":"e741b94a-ed71-4819-ba06-943aa25aaaf8","Type":"ContainerDied","Data":"db530b92405f9baf5f6cf5e7132a65bf418aa76ba1b57d73abb4c67e1dc385c6"} Dec 11 08:48:42 crc kubenswrapper[4881]: I1211 08:48:42.406069 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-4n2kr" Dec 11 08:48:42 crc kubenswrapper[4881]: I1211 08:48:42.451295 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4v4dc\" (UniqueName: \"kubernetes.io/projected/e741b94a-ed71-4819-ba06-943aa25aaaf8-kube-api-access-4v4dc\") pod \"e741b94a-ed71-4819-ba06-943aa25aaaf8\" (UID: \"e741b94a-ed71-4819-ba06-943aa25aaaf8\") " Dec 11 08:48:42 crc kubenswrapper[4881]: I1211 08:48:42.451453 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/e741b94a-ed71-4819-ba06-943aa25aaaf8-ssh-key\") pod \"e741b94a-ed71-4819-ba06-943aa25aaaf8\" (UID: \"e741b94a-ed71-4819-ba06-943aa25aaaf8\") " Dec 11 08:48:42 crc kubenswrapper[4881]: I1211 08:48:42.451559 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e741b94a-ed71-4819-ba06-943aa25aaaf8-bootstrap-combined-ca-bundle\") pod \"e741b94a-ed71-4819-ba06-943aa25aaaf8\" (UID: \"e741b94a-ed71-4819-ba06-943aa25aaaf8\") " Dec 11 08:48:42 crc kubenswrapper[4881]: I1211 08:48:42.451633 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e741b94a-ed71-4819-ba06-943aa25aaaf8-inventory\") pod \"e741b94a-ed71-4819-ba06-943aa25aaaf8\" (UID: \"e741b94a-ed71-4819-ba06-943aa25aaaf8\") " Dec 11 08:48:42 crc kubenswrapper[4881]: I1211 08:48:42.458730 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e741b94a-ed71-4819-ba06-943aa25aaaf8-kube-api-access-4v4dc" (OuterVolumeSpecName: "kube-api-access-4v4dc") pod "e741b94a-ed71-4819-ba06-943aa25aaaf8" (UID: "e741b94a-ed71-4819-ba06-943aa25aaaf8"). InnerVolumeSpecName "kube-api-access-4v4dc". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:48:42 crc kubenswrapper[4881]: I1211 08:48:42.461590 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e741b94a-ed71-4819-ba06-943aa25aaaf8-bootstrap-combined-ca-bundle" (OuterVolumeSpecName: "bootstrap-combined-ca-bundle") pod "e741b94a-ed71-4819-ba06-943aa25aaaf8" (UID: "e741b94a-ed71-4819-ba06-943aa25aaaf8"). InnerVolumeSpecName "bootstrap-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:48:42 crc kubenswrapper[4881]: I1211 08:48:42.494710 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e741b94a-ed71-4819-ba06-943aa25aaaf8-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "e741b94a-ed71-4819-ba06-943aa25aaaf8" (UID: "e741b94a-ed71-4819-ba06-943aa25aaaf8"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:48:42 crc kubenswrapper[4881]: I1211 08:48:42.504260 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e741b94a-ed71-4819-ba06-943aa25aaaf8-inventory" (OuterVolumeSpecName: "inventory") pod "e741b94a-ed71-4819-ba06-943aa25aaaf8" (UID: "e741b94a-ed71-4819-ba06-943aa25aaaf8"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:48:42 crc kubenswrapper[4881]: I1211 08:48:42.554424 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4v4dc\" (UniqueName: \"kubernetes.io/projected/e741b94a-ed71-4819-ba06-943aa25aaaf8-kube-api-access-4v4dc\") on node \"crc\" DevicePath \"\"" Dec 11 08:48:42 crc kubenswrapper[4881]: I1211 08:48:42.554473 4881 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/e741b94a-ed71-4819-ba06-943aa25aaaf8-ssh-key\") on node \"crc\" DevicePath \"\"" Dec 11 08:48:42 crc kubenswrapper[4881]: I1211 08:48:42.554489 4881 reconciler_common.go:293] "Volume detached for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e741b94a-ed71-4819-ba06-943aa25aaaf8-bootstrap-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 11 08:48:42 crc kubenswrapper[4881]: I1211 08:48:42.554502 4881 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e741b94a-ed71-4819-ba06-943aa25aaaf8-inventory\") on node \"crc\" DevicePath \"\"" Dec 11 08:48:42 crc kubenswrapper[4881]: I1211 08:48:42.667239 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-r9kd8"] Dec 11 08:48:42 crc kubenswrapper[4881]: E1211 08:48:42.667845 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e741b94a-ed71-4819-ba06-943aa25aaaf8" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Dec 11 08:48:42 crc kubenswrapper[4881]: I1211 08:48:42.667870 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="e741b94a-ed71-4819-ba06-943aa25aaaf8" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Dec 11 08:48:42 crc kubenswrapper[4881]: I1211 08:48:42.668108 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="e741b94a-ed71-4819-ba06-943aa25aaaf8" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Dec 11 08:48:42 crc kubenswrapper[4881]: I1211 08:48:42.669869 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-r9kd8" Dec 11 08:48:42 crc kubenswrapper[4881]: I1211 08:48:42.688535 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-r9kd8"] Dec 11 08:48:42 crc kubenswrapper[4881]: I1211 08:48:42.757356 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4r58b\" (UniqueName: \"kubernetes.io/projected/29f4a378-576b-44cc-bff9-f102544910ad-kube-api-access-4r58b\") pod \"community-operators-r9kd8\" (UID: \"29f4a378-576b-44cc-bff9-f102544910ad\") " pod="openshift-marketplace/community-operators-r9kd8" Dec 11 08:48:42 crc kubenswrapper[4881]: I1211 08:48:42.757441 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/29f4a378-576b-44cc-bff9-f102544910ad-catalog-content\") pod \"community-operators-r9kd8\" (UID: \"29f4a378-576b-44cc-bff9-f102544910ad\") " pod="openshift-marketplace/community-operators-r9kd8" Dec 11 08:48:42 crc kubenswrapper[4881]: I1211 08:48:42.757656 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/29f4a378-576b-44cc-bff9-f102544910ad-utilities\") pod \"community-operators-r9kd8\" (UID: \"29f4a378-576b-44cc-bff9-f102544910ad\") " pod="openshift-marketplace/community-operators-r9kd8" Dec 11 08:48:42 crc kubenswrapper[4881]: I1211 08:48:42.860010 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4r58b\" (UniqueName: \"kubernetes.io/projected/29f4a378-576b-44cc-bff9-f102544910ad-kube-api-access-4r58b\") pod \"community-operators-r9kd8\" (UID: \"29f4a378-576b-44cc-bff9-f102544910ad\") " pod="openshift-marketplace/community-operators-r9kd8" Dec 11 08:48:42 crc kubenswrapper[4881]: I1211 08:48:42.860171 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/29f4a378-576b-44cc-bff9-f102544910ad-catalog-content\") pod \"community-operators-r9kd8\" (UID: \"29f4a378-576b-44cc-bff9-f102544910ad\") " pod="openshift-marketplace/community-operators-r9kd8" Dec 11 08:48:42 crc kubenswrapper[4881]: I1211 08:48:42.860243 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/29f4a378-576b-44cc-bff9-f102544910ad-utilities\") pod \"community-operators-r9kd8\" (UID: \"29f4a378-576b-44cc-bff9-f102544910ad\") " pod="openshift-marketplace/community-operators-r9kd8" Dec 11 08:48:42 crc kubenswrapper[4881]: I1211 08:48:42.860726 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/29f4a378-576b-44cc-bff9-f102544910ad-catalog-content\") pod \"community-operators-r9kd8\" (UID: \"29f4a378-576b-44cc-bff9-f102544910ad\") " pod="openshift-marketplace/community-operators-r9kd8" Dec 11 08:48:42 crc kubenswrapper[4881]: I1211 08:48:42.860911 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/29f4a378-576b-44cc-bff9-f102544910ad-utilities\") pod \"community-operators-r9kd8\" (UID: \"29f4a378-576b-44cc-bff9-f102544910ad\") " pod="openshift-marketplace/community-operators-r9kd8" Dec 11 08:48:42 crc kubenswrapper[4881]: I1211 08:48:42.877961 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4r58b\" (UniqueName: \"kubernetes.io/projected/29f4a378-576b-44cc-bff9-f102544910ad-kube-api-access-4r58b\") pod \"community-operators-r9kd8\" (UID: \"29f4a378-576b-44cc-bff9-f102544910ad\") " pod="openshift-marketplace/community-operators-r9kd8" Dec 11 08:48:42 crc kubenswrapper[4881]: I1211 08:48:42.892564 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-4n2kr" event={"ID":"e741b94a-ed71-4819-ba06-943aa25aaaf8","Type":"ContainerDied","Data":"d192cb2536a3576b69b8e7044f9506a1d0cf2189697f817fb37f0f3891698dbe"} Dec 11 08:48:42 crc kubenswrapper[4881]: I1211 08:48:42.892607 4881 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d192cb2536a3576b69b8e7044f9506a1d0cf2189697f817fb37f0f3891698dbe" Dec 11 08:48:42 crc kubenswrapper[4881]: I1211 08:48:42.892643 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-4n2kr" Dec 11 08:48:42 crc kubenswrapper[4881]: I1211 08:48:42.989808 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-r9kd8" Dec 11 08:48:42 crc kubenswrapper[4881]: I1211 08:48:42.992940 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-nrjgn"] Dec 11 08:48:42 crc kubenswrapper[4881]: I1211 08:48:42.994929 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-nrjgn" Dec 11 08:48:42 crc kubenswrapper[4881]: I1211 08:48:42.997133 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Dec 11 08:48:42 crc kubenswrapper[4881]: I1211 08:48:42.997168 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Dec 11 08:48:42 crc kubenswrapper[4881]: I1211 08:48:42.997250 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Dec 11 08:48:42 crc kubenswrapper[4881]: I1211 08:48:42.997375 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-bnm72" Dec 11 08:48:43 crc kubenswrapper[4881]: I1211 08:48:43.024410 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-nrjgn"] Dec 11 08:48:43 crc kubenswrapper[4881]: I1211 08:48:43.067409 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/532ceac4-3c2d-4d4a-900f-498fa41192b1-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-nrjgn\" (UID: \"532ceac4-3c2d-4d4a-900f-498fa41192b1\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-nrjgn" Dec 11 08:48:43 crc kubenswrapper[4881]: I1211 08:48:43.068000 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/532ceac4-3c2d-4d4a-900f-498fa41192b1-ssh-key\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-nrjgn\" (UID: \"532ceac4-3c2d-4d4a-900f-498fa41192b1\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-nrjgn" Dec 11 08:48:43 crc kubenswrapper[4881]: I1211 08:48:43.068187 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qpd9t\" (UniqueName: \"kubernetes.io/projected/532ceac4-3c2d-4d4a-900f-498fa41192b1-kube-api-access-qpd9t\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-nrjgn\" (UID: \"532ceac4-3c2d-4d4a-900f-498fa41192b1\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-nrjgn" Dec 11 08:48:43 crc kubenswrapper[4881]: I1211 08:48:43.170510 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/532ceac4-3c2d-4d4a-900f-498fa41192b1-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-nrjgn\" (UID: \"532ceac4-3c2d-4d4a-900f-498fa41192b1\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-nrjgn" Dec 11 08:48:43 crc kubenswrapper[4881]: I1211 08:48:43.170585 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/532ceac4-3c2d-4d4a-900f-498fa41192b1-ssh-key\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-nrjgn\" (UID: \"532ceac4-3c2d-4d4a-900f-498fa41192b1\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-nrjgn" Dec 11 08:48:43 crc kubenswrapper[4881]: I1211 08:48:43.170654 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qpd9t\" (UniqueName: \"kubernetes.io/projected/532ceac4-3c2d-4d4a-900f-498fa41192b1-kube-api-access-qpd9t\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-nrjgn\" (UID: \"532ceac4-3c2d-4d4a-900f-498fa41192b1\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-nrjgn" Dec 11 08:48:43 crc kubenswrapper[4881]: I1211 08:48:43.174984 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/532ceac4-3c2d-4d4a-900f-498fa41192b1-ssh-key\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-nrjgn\" (UID: \"532ceac4-3c2d-4d4a-900f-498fa41192b1\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-nrjgn" Dec 11 08:48:43 crc kubenswrapper[4881]: I1211 08:48:43.175458 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/532ceac4-3c2d-4d4a-900f-498fa41192b1-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-nrjgn\" (UID: \"532ceac4-3c2d-4d4a-900f-498fa41192b1\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-nrjgn" Dec 11 08:48:43 crc kubenswrapper[4881]: I1211 08:48:43.187303 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qpd9t\" (UniqueName: \"kubernetes.io/projected/532ceac4-3c2d-4d4a-900f-498fa41192b1-kube-api-access-qpd9t\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-nrjgn\" (UID: \"532ceac4-3c2d-4d4a-900f-498fa41192b1\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-nrjgn" Dec 11 08:48:43 crc kubenswrapper[4881]: I1211 08:48:43.339938 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-nrjgn" Dec 11 08:48:44 crc kubenswrapper[4881]: I1211 08:48:44.968968 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-r9kd8"] Dec 11 08:48:45 crc kubenswrapper[4881]: I1211 08:48:45.053406 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-nrjgn"] Dec 11 08:48:45 crc kubenswrapper[4881]: W1211 08:48:45.055130 4881 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod532ceac4_3c2d_4d4a_900f_498fa41192b1.slice/crio-dc95ae861e0b74de69b4ff705300c0e2c8120aedc36fff1e9e7d3a535216d315 WatchSource:0}: Error finding container dc95ae861e0b74de69b4ff705300c0e2c8120aedc36fff1e9e7d3a535216d315: Status 404 returned error can't find the container with id dc95ae861e0b74de69b4ff705300c0e2c8120aedc36fff1e9e7d3a535216d315 Dec 11 08:48:45 crc kubenswrapper[4881]: I1211 08:48:45.057326 4881 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Dec 11 08:48:45 crc kubenswrapper[4881]: I1211 08:48:45.923262 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-nrjgn" event={"ID":"532ceac4-3c2d-4d4a-900f-498fa41192b1","Type":"ContainerStarted","Data":"dc95ae861e0b74de69b4ff705300c0e2c8120aedc36fff1e9e7d3a535216d315"} Dec 11 08:48:45 crc kubenswrapper[4881]: I1211 08:48:45.925392 4881 generic.go:334] "Generic (PLEG): container finished" podID="29f4a378-576b-44cc-bff9-f102544910ad" containerID="9a12fbf06f4f8d7a4a106694bc8dee28bb0f172b688cb81b25614cf8b1af2590" exitCode=0 Dec 11 08:48:45 crc kubenswrapper[4881]: I1211 08:48:45.925434 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-r9kd8" event={"ID":"29f4a378-576b-44cc-bff9-f102544910ad","Type":"ContainerDied","Data":"9a12fbf06f4f8d7a4a106694bc8dee28bb0f172b688cb81b25614cf8b1af2590"} Dec 11 08:48:45 crc kubenswrapper[4881]: I1211 08:48:45.925462 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-r9kd8" event={"ID":"29f4a378-576b-44cc-bff9-f102544910ad","Type":"ContainerStarted","Data":"fd684ce005489c304bc38a31a8aba01d876e4331c5dc2749b8f44404b9195047"} Dec 11 08:48:46 crc kubenswrapper[4881]: I1211 08:48:46.941897 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-nrjgn" event={"ID":"532ceac4-3c2d-4d4a-900f-498fa41192b1","Type":"ContainerStarted","Data":"2c7ec50392d84633596ded7c1a9561f57a292ed3f1dd83e9f037deaddbedf68e"} Dec 11 08:48:46 crc kubenswrapper[4881]: I1211 08:48:46.946421 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-r9kd8" event={"ID":"29f4a378-576b-44cc-bff9-f102544910ad","Type":"ContainerStarted","Data":"5e2b1def7656ba45e5fbd718182415e8f24edece0ce0721703fb4a27c806dabe"} Dec 11 08:48:46 crc kubenswrapper[4881]: I1211 08:48:46.963966 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-nrjgn" podStartSLOduration=4.178530034 podStartE2EDuration="4.963947346s" podCreationTimestamp="2025-12-11 08:48:42 +0000 UTC" firstStartedPulling="2025-12-11 08:48:45.05712935 +0000 UTC m=+1973.434498047" lastFinishedPulling="2025-12-11 08:48:45.842546662 +0000 UTC m=+1974.219915359" observedRunningTime="2025-12-11 08:48:46.959166908 +0000 UTC m=+1975.336535605" watchObservedRunningTime="2025-12-11 08:48:46.963947346 +0000 UTC m=+1975.341316043" Dec 11 08:48:48 crc kubenswrapper[4881]: I1211 08:48:48.968189 4881 generic.go:334] "Generic (PLEG): container finished" podID="29f4a378-576b-44cc-bff9-f102544910ad" containerID="5e2b1def7656ba45e5fbd718182415e8f24edece0ce0721703fb4a27c806dabe" exitCode=0 Dec 11 08:48:48 crc kubenswrapper[4881]: I1211 08:48:48.968768 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-r9kd8" event={"ID":"29f4a378-576b-44cc-bff9-f102544910ad","Type":"ContainerDied","Data":"5e2b1def7656ba45e5fbd718182415e8f24edece0ce0721703fb4a27c806dabe"} Dec 11 08:48:50 crc kubenswrapper[4881]: I1211 08:48:50.999080 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-r9kd8" event={"ID":"29f4a378-576b-44cc-bff9-f102544910ad","Type":"ContainerStarted","Data":"49321f6fa942616c7826b28ddc7506169e67ac5b260f865b0816db27d323baf1"} Dec 11 08:48:51 crc kubenswrapper[4881]: I1211 08:48:51.031769 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-r9kd8" podStartSLOduration=4.669943163 podStartE2EDuration="9.031748061s" podCreationTimestamp="2025-12-11 08:48:42 +0000 UTC" firstStartedPulling="2025-12-11 08:48:45.927368883 +0000 UTC m=+1974.304737580" lastFinishedPulling="2025-12-11 08:48:50.289173781 +0000 UTC m=+1978.666542478" observedRunningTime="2025-12-11 08:48:51.023962039 +0000 UTC m=+1979.401330736" watchObservedRunningTime="2025-12-11 08:48:51.031748061 +0000 UTC m=+1979.409116758" Dec 11 08:48:52 crc kubenswrapper[4881]: I1211 08:48:52.989906 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-r9kd8" Dec 11 08:48:52 crc kubenswrapper[4881]: I1211 08:48:52.991191 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-r9kd8" Dec 11 08:48:54 crc kubenswrapper[4881]: I1211 08:48:54.055908 4881 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-r9kd8" podUID="29f4a378-576b-44cc-bff9-f102544910ad" containerName="registry-server" probeResult="failure" output=< Dec 11 08:48:54 crc kubenswrapper[4881]: timeout: failed to connect service ":50051" within 1s Dec 11 08:48:54 crc kubenswrapper[4881]: > Dec 11 08:49:01 crc kubenswrapper[4881]: I1211 08:49:01.062553 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-db-create-bjwpq"] Dec 11 08:49:01 crc kubenswrapper[4881]: I1211 08:49:01.079660 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-db-create-gs9fw"] Dec 11 08:49:01 crc kubenswrapper[4881]: I1211 08:49:01.090453 4881 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-db-create-bjwpq"] Dec 11 08:49:01 crc kubenswrapper[4881]: I1211 08:49:01.101872 4881 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-db-create-gs9fw"] Dec 11 08:49:02 crc kubenswrapper[4881]: I1211 08:49:02.027347 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-db-create-lhqk7"] Dec 11 08:49:02 crc kubenswrapper[4881]: I1211 08:49:02.037945 4881 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-db-create-lhqk7"] Dec 11 08:49:03 crc kubenswrapper[4881]: I1211 08:49:03.022145 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0152813f-e688-49b2-88d2-afba5096bd0e" path="/var/lib/kubelet/pods/0152813f-e688-49b2-88d2-afba5096bd0e/volumes" Dec 11 08:49:03 crc kubenswrapper[4881]: I1211 08:49:03.023262 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="556e7646-0cc0-4686-875f-6738267e467e" path="/var/lib/kubelet/pods/556e7646-0cc0-4686-875f-6738267e467e/volumes" Dec 11 08:49:03 crc kubenswrapper[4881]: I1211 08:49:03.023979 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7a116558-2db5-4fe1-9f64-888d7cd93f57" path="/var/lib/kubelet/pods/7a116558-2db5-4fe1-9f64-888d7cd93f57/volumes" Dec 11 08:49:03 crc kubenswrapper[4881]: I1211 08:49:03.040425 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-r9kd8" Dec 11 08:49:03 crc kubenswrapper[4881]: I1211 08:49:03.106590 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-r9kd8" Dec 11 08:49:03 crc kubenswrapper[4881]: I1211 08:49:03.279324 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-r9kd8"] Dec 11 08:49:04 crc kubenswrapper[4881]: I1211 08:49:04.150984 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-r9kd8" podUID="29f4a378-576b-44cc-bff9-f102544910ad" containerName="registry-server" containerID="cri-o://49321f6fa942616c7826b28ddc7506169e67ac5b260f865b0816db27d323baf1" gracePeriod=2 Dec 11 08:49:05 crc kubenswrapper[4881]: I1211 08:49:05.171730 4881 generic.go:334] "Generic (PLEG): container finished" podID="29f4a378-576b-44cc-bff9-f102544910ad" containerID="49321f6fa942616c7826b28ddc7506169e67ac5b260f865b0816db27d323baf1" exitCode=0 Dec 11 08:49:05 crc kubenswrapper[4881]: I1211 08:49:05.171808 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-r9kd8" event={"ID":"29f4a378-576b-44cc-bff9-f102544910ad","Type":"ContainerDied","Data":"49321f6fa942616c7826b28ddc7506169e67ac5b260f865b0816db27d323baf1"} Dec 11 08:49:05 crc kubenswrapper[4881]: I1211 08:49:05.754795 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-r9kd8" Dec 11 08:49:05 crc kubenswrapper[4881]: I1211 08:49:05.814708 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4r58b\" (UniqueName: \"kubernetes.io/projected/29f4a378-576b-44cc-bff9-f102544910ad-kube-api-access-4r58b\") pod \"29f4a378-576b-44cc-bff9-f102544910ad\" (UID: \"29f4a378-576b-44cc-bff9-f102544910ad\") " Dec 11 08:49:05 crc kubenswrapper[4881]: I1211 08:49:05.814995 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/29f4a378-576b-44cc-bff9-f102544910ad-catalog-content\") pod \"29f4a378-576b-44cc-bff9-f102544910ad\" (UID: \"29f4a378-576b-44cc-bff9-f102544910ad\") " Dec 11 08:49:05 crc kubenswrapper[4881]: I1211 08:49:05.815047 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/29f4a378-576b-44cc-bff9-f102544910ad-utilities\") pod \"29f4a378-576b-44cc-bff9-f102544910ad\" (UID: \"29f4a378-576b-44cc-bff9-f102544910ad\") " Dec 11 08:49:05 crc kubenswrapper[4881]: I1211 08:49:05.816174 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/29f4a378-576b-44cc-bff9-f102544910ad-utilities" (OuterVolumeSpecName: "utilities") pod "29f4a378-576b-44cc-bff9-f102544910ad" (UID: "29f4a378-576b-44cc-bff9-f102544910ad"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 08:49:05 crc kubenswrapper[4881]: I1211 08:49:05.817095 4881 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/29f4a378-576b-44cc-bff9-f102544910ad-utilities\") on node \"crc\" DevicePath \"\"" Dec 11 08:49:05 crc kubenswrapper[4881]: I1211 08:49:05.837733 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/29f4a378-576b-44cc-bff9-f102544910ad-kube-api-access-4r58b" (OuterVolumeSpecName: "kube-api-access-4r58b") pod "29f4a378-576b-44cc-bff9-f102544910ad" (UID: "29f4a378-576b-44cc-bff9-f102544910ad"). InnerVolumeSpecName "kube-api-access-4r58b". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:49:05 crc kubenswrapper[4881]: I1211 08:49:05.889551 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/29f4a378-576b-44cc-bff9-f102544910ad-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "29f4a378-576b-44cc-bff9-f102544910ad" (UID: "29f4a378-576b-44cc-bff9-f102544910ad"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 08:49:05 crc kubenswrapper[4881]: I1211 08:49:05.920111 4881 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/29f4a378-576b-44cc-bff9-f102544910ad-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 11 08:49:05 crc kubenswrapper[4881]: I1211 08:49:05.920152 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4r58b\" (UniqueName: \"kubernetes.io/projected/29f4a378-576b-44cc-bff9-f102544910ad-kube-api-access-4r58b\") on node \"crc\" DevicePath \"\"" Dec 11 08:49:06 crc kubenswrapper[4881]: I1211 08:49:06.191995 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-r9kd8" event={"ID":"29f4a378-576b-44cc-bff9-f102544910ad","Type":"ContainerDied","Data":"fd684ce005489c304bc38a31a8aba01d876e4331c5dc2749b8f44404b9195047"} Dec 11 08:49:06 crc kubenswrapper[4881]: I1211 08:49:06.192047 4881 scope.go:117] "RemoveContainer" containerID="49321f6fa942616c7826b28ddc7506169e67ac5b260f865b0816db27d323baf1" Dec 11 08:49:06 crc kubenswrapper[4881]: I1211 08:49:06.192046 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-r9kd8" Dec 11 08:49:06 crc kubenswrapper[4881]: I1211 08:49:06.244415 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-r9kd8"] Dec 11 08:49:06 crc kubenswrapper[4881]: I1211 08:49:06.258284 4881 scope.go:117] "RemoveContainer" containerID="5e2b1def7656ba45e5fbd718182415e8f24edece0ce0721703fb4a27c806dabe" Dec 11 08:49:06 crc kubenswrapper[4881]: I1211 08:49:06.262301 4881 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-r9kd8"] Dec 11 08:49:06 crc kubenswrapper[4881]: I1211 08:49:06.288634 4881 scope.go:117] "RemoveContainer" containerID="9a12fbf06f4f8d7a4a106694bc8dee28bb0f172b688cb81b25614cf8b1af2590" Dec 11 08:49:07 crc kubenswrapper[4881]: I1211 08:49:07.017401 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="29f4a378-576b-44cc-bff9-f102544910ad" path="/var/lib/kubelet/pods/29f4a378-576b-44cc-bff9-f102544910ad/volumes" Dec 11 08:49:08 crc kubenswrapper[4881]: I1211 08:49:08.486244 4881 scope.go:117] "RemoveContainer" containerID="e93ae53a5a85e5ec74f3458cf16b09906e27fc5d031ed76c51db24d593d938a3" Dec 11 08:49:08 crc kubenswrapper[4881]: I1211 08:49:08.515196 4881 scope.go:117] "RemoveContainer" containerID="45fc249c3889799d7ee4d73940d28ed200dae5bc2594643f31c5bdfb3bc35802" Dec 11 08:49:08 crc kubenswrapper[4881]: I1211 08:49:08.573263 4881 scope.go:117] "RemoveContainer" containerID="c2b6075393d1d2f9e783c96ac6f361d451182813df29b4c7eee0930a516eb222" Dec 11 08:49:08 crc kubenswrapper[4881]: I1211 08:49:08.631471 4881 scope.go:117] "RemoveContainer" containerID="0bafc15cd1a4a159ece5b6186664af0b35f78a1f3442fe0b6442a32e1917de5f" Dec 11 08:49:08 crc kubenswrapper[4881]: I1211 08:49:08.678302 4881 scope.go:117] "RemoveContainer" containerID="47469da148bd551d6a297aec48b47e90f615ba6d1655f095fdb9f1d26bb23515" Dec 11 08:49:22 crc kubenswrapper[4881]: I1211 08:49:22.042907 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-ebaf-account-create-f5l7l"] Dec 11 08:49:22 crc kubenswrapper[4881]: I1211 08:49:22.055785 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-2f0d-account-create-tp8rt"] Dec 11 08:49:22 crc kubenswrapper[4881]: I1211 08:49:22.067679 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-ea12-account-create-lwkvg"] Dec 11 08:49:22 crc kubenswrapper[4881]: I1211 08:49:22.078525 4881 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-ebaf-account-create-f5l7l"] Dec 11 08:49:22 crc kubenswrapper[4881]: I1211 08:49:22.088041 4881 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-ea12-account-create-lwkvg"] Dec 11 08:49:22 crc kubenswrapper[4881]: I1211 08:49:22.100611 4881 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-2f0d-account-create-tp8rt"] Dec 11 08:49:23 crc kubenswrapper[4881]: I1211 08:49:23.019374 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="07a8ad09-4a57-4bec-bba2-682ce3e405b6" path="/var/lib/kubelet/pods/07a8ad09-4a57-4bec-bba2-682ce3e405b6/volumes" Dec 11 08:49:23 crc kubenswrapper[4881]: I1211 08:49:23.020196 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="90e20f17-6c3d-4da8-91d5-6a4fcbb8832d" path="/var/lib/kubelet/pods/90e20f17-6c3d-4da8-91d5-6a4fcbb8832d/volumes" Dec 11 08:49:23 crc kubenswrapper[4881]: I1211 08:49:23.020850 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f9014fc6-5e9d-4535-9961-e9cc2ab2f357" path="/var/lib/kubelet/pods/f9014fc6-5e9d-4535-9961-e9cc2ab2f357/volumes" Dec 11 08:50:08 crc kubenswrapper[4881]: I1211 08:50:08.864035 4881 scope.go:117] "RemoveContainer" containerID="4ac8978a8e4c4f8a5cfd12cdaf4ae9daf1f35a19aad61efc1a4a52e6202a8e26" Dec 11 08:50:08 crc kubenswrapper[4881]: I1211 08:50:08.888288 4881 scope.go:117] "RemoveContainer" containerID="dad2be3c2ee493ae66d709cd28efe8b57e81164c9f30af2f89c5213a7a48c548" Dec 11 08:50:08 crc kubenswrapper[4881]: I1211 08:50:08.913869 4881 scope.go:117] "RemoveContainer" containerID="d1c391fe68303db94bb518644ce9868d9cdae96e140449fce263f67a00d21ef6" Dec 11 08:50:08 crc kubenswrapper[4881]: I1211 08:50:08.970405 4881 scope.go:117] "RemoveContainer" containerID="409a7b69ec180cfe52d183933482c661d00615b699e81fbd86cc02ea15fde517" Dec 11 08:50:09 crc kubenswrapper[4881]: I1211 08:50:09.047429 4881 scope.go:117] "RemoveContainer" containerID="7b423d2507e65b7394dd7ac25996fb90ee191ddf5e87efc3a144549a1ecc7d2a" Dec 11 08:50:09 crc kubenswrapper[4881]: I1211 08:50:09.093163 4881 scope.go:117] "RemoveContainer" containerID="72d1dc2c98e64f523a5745f2c2591b06edcf5d04adae77d22a52c46b8841f0df" Dec 11 08:50:09 crc kubenswrapper[4881]: I1211 08:50:09.432007 4881 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/swift-proxy-5f6c547b6c-rjk9h" podUID="910014af-7b9e-49b8-99e3-b80a15d72faf" containerName="proxy-server" probeResult="failure" output="HTTP probe failed with statuscode: 502" Dec 11 08:50:17 crc kubenswrapper[4881]: I1211 08:50:17.040277 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/aodh-db-create-n8lqz"] Dec 11 08:50:17 crc kubenswrapper[4881]: I1211 08:50:17.055465 4881 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/aodh-db-create-n8lqz"] Dec 11 08:50:19 crc kubenswrapper[4881]: I1211 08:50:19.018629 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="82b87f02-5631-4cbe-be7a-22cf0e321e23" path="/var/lib/kubelet/pods/82b87f02-5631-4cbe-be7a-22cf0e321e23/volumes" Dec 11 08:50:39 crc kubenswrapper[4881]: I1211 08:50:39.048299 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/aodh-3e52-account-create-82sqk"] Dec 11 08:50:39 crc kubenswrapper[4881]: I1211 08:50:39.059473 4881 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/aodh-3e52-account-create-82sqk"] Dec 11 08:50:41 crc kubenswrapper[4881]: I1211 08:50:41.022446 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="04df1111-8ddd-4102-9ab8-103767f09410" path="/var/lib/kubelet/pods/04df1111-8ddd-4102-9ab8-103767f09410/volumes" Dec 11 08:50:53 crc kubenswrapper[4881]: I1211 08:50:53.061480 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-6jtqx"] Dec 11 08:50:53 crc kubenswrapper[4881]: I1211 08:50:53.073659 4881 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-6jtqx"] Dec 11 08:50:55 crc kubenswrapper[4881]: I1211 08:50:55.022511 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="87e2a6fa-8d80-4bde-9f33-fe0d4f34d933" path="/var/lib/kubelet/pods/87e2a6fa-8d80-4bde-9f33-fe0d4f34d933/volumes" Dec 11 08:50:59 crc kubenswrapper[4881]: I1211 08:50:59.396927 4881 patch_prober.go:28] interesting pod/machine-config-daemon-z9nnh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 11 08:50:59 crc kubenswrapper[4881]: I1211 08:50:59.397449 4881 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 11 08:51:09 crc kubenswrapper[4881]: I1211 08:51:09.236149 4881 scope.go:117] "RemoveContainer" containerID="4aad9c5989f63736bb1737df392cb932ad9795365978ce16a585b9700510a0fa" Dec 11 08:51:09 crc kubenswrapper[4881]: I1211 08:51:09.264403 4881 scope.go:117] "RemoveContainer" containerID="e34cb666a0b9bfe93c61de10e1acebf64b2f12870292850ba40f91470fbebf50" Dec 11 08:51:09 crc kubenswrapper[4881]: I1211 08:51:09.386145 4881 scope.go:117] "RemoveContainer" containerID="4f6ff7738d68f7848ce185b77c8dd6b81634f3f43ebca3007990f2d203d86fb2" Dec 11 08:51:13 crc kubenswrapper[4881]: I1211 08:51:13.457153 4881 generic.go:334] "Generic (PLEG): container finished" podID="532ceac4-3c2d-4d4a-900f-498fa41192b1" containerID="2c7ec50392d84633596ded7c1a9561f57a292ed3f1dd83e9f037deaddbedf68e" exitCode=0 Dec 11 08:51:13 crc kubenswrapper[4881]: I1211 08:51:13.457243 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-nrjgn" event={"ID":"532ceac4-3c2d-4d4a-900f-498fa41192b1","Type":"ContainerDied","Data":"2c7ec50392d84633596ded7c1a9561f57a292ed3f1dd83e9f037deaddbedf68e"} Dec 11 08:51:15 crc kubenswrapper[4881]: I1211 08:51:15.129598 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-nrjgn" Dec 11 08:51:15 crc kubenswrapper[4881]: I1211 08:51:15.220597 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qpd9t\" (UniqueName: \"kubernetes.io/projected/532ceac4-3c2d-4d4a-900f-498fa41192b1-kube-api-access-qpd9t\") pod \"532ceac4-3c2d-4d4a-900f-498fa41192b1\" (UID: \"532ceac4-3c2d-4d4a-900f-498fa41192b1\") " Dec 11 08:51:15 crc kubenswrapper[4881]: I1211 08:51:15.221128 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/532ceac4-3c2d-4d4a-900f-498fa41192b1-ssh-key\") pod \"532ceac4-3c2d-4d4a-900f-498fa41192b1\" (UID: \"532ceac4-3c2d-4d4a-900f-498fa41192b1\") " Dec 11 08:51:15 crc kubenswrapper[4881]: I1211 08:51:15.221381 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/532ceac4-3c2d-4d4a-900f-498fa41192b1-inventory\") pod \"532ceac4-3c2d-4d4a-900f-498fa41192b1\" (UID: \"532ceac4-3c2d-4d4a-900f-498fa41192b1\") " Dec 11 08:51:15 crc kubenswrapper[4881]: I1211 08:51:15.227544 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/532ceac4-3c2d-4d4a-900f-498fa41192b1-kube-api-access-qpd9t" (OuterVolumeSpecName: "kube-api-access-qpd9t") pod "532ceac4-3c2d-4d4a-900f-498fa41192b1" (UID: "532ceac4-3c2d-4d4a-900f-498fa41192b1"). InnerVolumeSpecName "kube-api-access-qpd9t". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:51:15 crc kubenswrapper[4881]: I1211 08:51:15.258347 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/532ceac4-3c2d-4d4a-900f-498fa41192b1-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "532ceac4-3c2d-4d4a-900f-498fa41192b1" (UID: "532ceac4-3c2d-4d4a-900f-498fa41192b1"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:51:15 crc kubenswrapper[4881]: I1211 08:51:15.261503 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/532ceac4-3c2d-4d4a-900f-498fa41192b1-inventory" (OuterVolumeSpecName: "inventory") pod "532ceac4-3c2d-4d4a-900f-498fa41192b1" (UID: "532ceac4-3c2d-4d4a-900f-498fa41192b1"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:51:15 crc kubenswrapper[4881]: I1211 08:51:15.325593 4881 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/532ceac4-3c2d-4d4a-900f-498fa41192b1-inventory\") on node \"crc\" DevicePath \"\"" Dec 11 08:51:15 crc kubenswrapper[4881]: I1211 08:51:15.325641 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qpd9t\" (UniqueName: \"kubernetes.io/projected/532ceac4-3c2d-4d4a-900f-498fa41192b1-kube-api-access-qpd9t\") on node \"crc\" DevicePath \"\"" Dec 11 08:51:15 crc kubenswrapper[4881]: I1211 08:51:15.325657 4881 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/532ceac4-3c2d-4d4a-900f-498fa41192b1-ssh-key\") on node \"crc\" DevicePath \"\"" Dec 11 08:51:15 crc kubenswrapper[4881]: I1211 08:51:15.480678 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-nrjgn" event={"ID":"532ceac4-3c2d-4d4a-900f-498fa41192b1","Type":"ContainerDied","Data":"dc95ae861e0b74de69b4ff705300c0e2c8120aedc36fff1e9e7d3a535216d315"} Dec 11 08:51:15 crc kubenswrapper[4881]: I1211 08:51:15.480730 4881 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="dc95ae861e0b74de69b4ff705300c0e2c8120aedc36fff1e9e7d3a535216d315" Dec 11 08:51:15 crc kubenswrapper[4881]: I1211 08:51:15.480742 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-nrjgn" Dec 11 08:51:15 crc kubenswrapper[4881]: I1211 08:51:15.570435 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-6mhvp"] Dec 11 08:51:15 crc kubenswrapper[4881]: E1211 08:51:15.571156 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="29f4a378-576b-44cc-bff9-f102544910ad" containerName="registry-server" Dec 11 08:51:15 crc kubenswrapper[4881]: I1211 08:51:15.571177 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="29f4a378-576b-44cc-bff9-f102544910ad" containerName="registry-server" Dec 11 08:51:15 crc kubenswrapper[4881]: E1211 08:51:15.571200 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="29f4a378-576b-44cc-bff9-f102544910ad" containerName="extract-utilities" Dec 11 08:51:15 crc kubenswrapper[4881]: I1211 08:51:15.571209 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="29f4a378-576b-44cc-bff9-f102544910ad" containerName="extract-utilities" Dec 11 08:51:15 crc kubenswrapper[4881]: E1211 08:51:15.571230 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="532ceac4-3c2d-4d4a-900f-498fa41192b1" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Dec 11 08:51:15 crc kubenswrapper[4881]: I1211 08:51:15.571237 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="532ceac4-3c2d-4d4a-900f-498fa41192b1" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Dec 11 08:51:15 crc kubenswrapper[4881]: E1211 08:51:15.571275 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="29f4a378-576b-44cc-bff9-f102544910ad" containerName="extract-content" Dec 11 08:51:15 crc kubenswrapper[4881]: I1211 08:51:15.571282 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="29f4a378-576b-44cc-bff9-f102544910ad" containerName="extract-content" Dec 11 08:51:15 crc kubenswrapper[4881]: I1211 08:51:15.571544 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="532ceac4-3c2d-4d4a-900f-498fa41192b1" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Dec 11 08:51:15 crc kubenswrapper[4881]: I1211 08:51:15.571572 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="29f4a378-576b-44cc-bff9-f102544910ad" containerName="registry-server" Dec 11 08:51:15 crc kubenswrapper[4881]: I1211 08:51:15.572493 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-6mhvp" Dec 11 08:51:15 crc kubenswrapper[4881]: I1211 08:51:15.576440 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Dec 11 08:51:15 crc kubenswrapper[4881]: I1211 08:51:15.577249 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Dec 11 08:51:15 crc kubenswrapper[4881]: I1211 08:51:15.577306 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Dec 11 08:51:15 crc kubenswrapper[4881]: I1211 08:51:15.577428 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-bnm72" Dec 11 08:51:15 crc kubenswrapper[4881]: I1211 08:51:15.585696 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-6mhvp"] Dec 11 08:51:15 crc kubenswrapper[4881]: I1211 08:51:15.637121 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/7dd4872c-380b-4dcc-bd46-ad6a624a2d34-ssh-key\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-6mhvp\" (UID: \"7dd4872c-380b-4dcc-bd46-ad6a624a2d34\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-6mhvp" Dec 11 08:51:15 crc kubenswrapper[4881]: I1211 08:51:15.637607 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7dd4872c-380b-4dcc-bd46-ad6a624a2d34-inventory\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-6mhvp\" (UID: \"7dd4872c-380b-4dcc-bd46-ad6a624a2d34\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-6mhvp" Dec 11 08:51:15 crc kubenswrapper[4881]: I1211 08:51:15.637921 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qdftr\" (UniqueName: \"kubernetes.io/projected/7dd4872c-380b-4dcc-bd46-ad6a624a2d34-kube-api-access-qdftr\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-6mhvp\" (UID: \"7dd4872c-380b-4dcc-bd46-ad6a624a2d34\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-6mhvp" Dec 11 08:51:15 crc kubenswrapper[4881]: I1211 08:51:15.740164 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qdftr\" (UniqueName: \"kubernetes.io/projected/7dd4872c-380b-4dcc-bd46-ad6a624a2d34-kube-api-access-qdftr\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-6mhvp\" (UID: \"7dd4872c-380b-4dcc-bd46-ad6a624a2d34\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-6mhvp" Dec 11 08:51:15 crc kubenswrapper[4881]: I1211 08:51:15.740323 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/7dd4872c-380b-4dcc-bd46-ad6a624a2d34-ssh-key\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-6mhvp\" (UID: \"7dd4872c-380b-4dcc-bd46-ad6a624a2d34\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-6mhvp" Dec 11 08:51:15 crc kubenswrapper[4881]: I1211 08:51:15.740412 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7dd4872c-380b-4dcc-bd46-ad6a624a2d34-inventory\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-6mhvp\" (UID: \"7dd4872c-380b-4dcc-bd46-ad6a624a2d34\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-6mhvp" Dec 11 08:51:15 crc kubenswrapper[4881]: I1211 08:51:15.745278 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/7dd4872c-380b-4dcc-bd46-ad6a624a2d34-ssh-key\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-6mhvp\" (UID: \"7dd4872c-380b-4dcc-bd46-ad6a624a2d34\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-6mhvp" Dec 11 08:51:15 crc kubenswrapper[4881]: I1211 08:51:15.756417 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7dd4872c-380b-4dcc-bd46-ad6a624a2d34-inventory\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-6mhvp\" (UID: \"7dd4872c-380b-4dcc-bd46-ad6a624a2d34\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-6mhvp" Dec 11 08:51:15 crc kubenswrapper[4881]: I1211 08:51:15.756585 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qdftr\" (UniqueName: \"kubernetes.io/projected/7dd4872c-380b-4dcc-bd46-ad6a624a2d34-kube-api-access-qdftr\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-6mhvp\" (UID: \"7dd4872c-380b-4dcc-bd46-ad6a624a2d34\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-6mhvp" Dec 11 08:51:15 crc kubenswrapper[4881]: I1211 08:51:15.893007 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-6mhvp" Dec 11 08:51:16 crc kubenswrapper[4881]: I1211 08:51:16.489129 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-6mhvp"] Dec 11 08:51:17 crc kubenswrapper[4881]: I1211 08:51:17.504612 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-6mhvp" event={"ID":"7dd4872c-380b-4dcc-bd46-ad6a624a2d34","Type":"ContainerStarted","Data":"9e03c25a537811b3e9ff1caa90e2beaa6bd1a0881d115a04485233f535b2615a"} Dec 11 08:51:17 crc kubenswrapper[4881]: I1211 08:51:17.506075 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-6mhvp" event={"ID":"7dd4872c-380b-4dcc-bd46-ad6a624a2d34","Type":"ContainerStarted","Data":"587ba71d2d055df1a4a7df79d6c84ca42841e30c2dae0810d01efa283a025082"} Dec 11 08:51:18 crc kubenswrapper[4881]: I1211 08:51:18.552699 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-6mhvp" podStartSLOduration=3.0716139670000002 podStartE2EDuration="3.552675633s" podCreationTimestamp="2025-12-11 08:51:15 +0000 UTC" firstStartedPulling="2025-12-11 08:51:16.491451827 +0000 UTC m=+2124.868820524" lastFinishedPulling="2025-12-11 08:51:16.972513493 +0000 UTC m=+2125.349882190" observedRunningTime="2025-12-11 08:51:18.541693273 +0000 UTC m=+2126.919061990" watchObservedRunningTime="2025-12-11 08:51:18.552675633 +0000 UTC m=+2126.930044330" Dec 11 08:51:25 crc kubenswrapper[4881]: I1211 08:51:25.056255 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-cell-mapping-4h7jd"] Dec 11 08:51:25 crc kubenswrapper[4881]: I1211 08:51:25.069290 4881 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-cell-mapping-4h7jd"] Dec 11 08:51:27 crc kubenswrapper[4881]: I1211 08:51:27.019816 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6d5fc48-a707-4ab1-a8f9-392295486185" path="/var/lib/kubelet/pods/b6d5fc48-a707-4ab1-a8f9-392295486185/volumes" Dec 11 08:51:28 crc kubenswrapper[4881]: I1211 08:51:28.323596 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-tww5m"] Dec 11 08:51:28 crc kubenswrapper[4881]: I1211 08:51:28.326557 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-tww5m" Dec 11 08:51:28 crc kubenswrapper[4881]: I1211 08:51:28.336678 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-tww5m"] Dec 11 08:51:28 crc kubenswrapper[4881]: I1211 08:51:28.461268 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7ee89d65-d146-42b8-a58f-26abfa66d3e8-utilities\") pod \"redhat-operators-tww5m\" (UID: \"7ee89d65-d146-42b8-a58f-26abfa66d3e8\") " pod="openshift-marketplace/redhat-operators-tww5m" Dec 11 08:51:28 crc kubenswrapper[4881]: I1211 08:51:28.461488 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7ee89d65-d146-42b8-a58f-26abfa66d3e8-catalog-content\") pod \"redhat-operators-tww5m\" (UID: \"7ee89d65-d146-42b8-a58f-26abfa66d3e8\") " pod="openshift-marketplace/redhat-operators-tww5m" Dec 11 08:51:28 crc kubenswrapper[4881]: I1211 08:51:28.461518 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wr9lk\" (UniqueName: \"kubernetes.io/projected/7ee89d65-d146-42b8-a58f-26abfa66d3e8-kube-api-access-wr9lk\") pod \"redhat-operators-tww5m\" (UID: \"7ee89d65-d146-42b8-a58f-26abfa66d3e8\") " pod="openshift-marketplace/redhat-operators-tww5m" Dec 11 08:51:28 crc kubenswrapper[4881]: I1211 08:51:28.564199 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wr9lk\" (UniqueName: \"kubernetes.io/projected/7ee89d65-d146-42b8-a58f-26abfa66d3e8-kube-api-access-wr9lk\") pod \"redhat-operators-tww5m\" (UID: \"7ee89d65-d146-42b8-a58f-26abfa66d3e8\") " pod="openshift-marketplace/redhat-operators-tww5m" Dec 11 08:51:28 crc kubenswrapper[4881]: I1211 08:51:28.564451 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7ee89d65-d146-42b8-a58f-26abfa66d3e8-utilities\") pod \"redhat-operators-tww5m\" (UID: \"7ee89d65-d146-42b8-a58f-26abfa66d3e8\") " pod="openshift-marketplace/redhat-operators-tww5m" Dec 11 08:51:28 crc kubenswrapper[4881]: I1211 08:51:28.564558 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7ee89d65-d146-42b8-a58f-26abfa66d3e8-catalog-content\") pod \"redhat-operators-tww5m\" (UID: \"7ee89d65-d146-42b8-a58f-26abfa66d3e8\") " pod="openshift-marketplace/redhat-operators-tww5m" Dec 11 08:51:28 crc kubenswrapper[4881]: I1211 08:51:28.565372 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7ee89d65-d146-42b8-a58f-26abfa66d3e8-utilities\") pod \"redhat-operators-tww5m\" (UID: \"7ee89d65-d146-42b8-a58f-26abfa66d3e8\") " pod="openshift-marketplace/redhat-operators-tww5m" Dec 11 08:51:28 crc kubenswrapper[4881]: I1211 08:51:28.565446 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7ee89d65-d146-42b8-a58f-26abfa66d3e8-catalog-content\") pod \"redhat-operators-tww5m\" (UID: \"7ee89d65-d146-42b8-a58f-26abfa66d3e8\") " pod="openshift-marketplace/redhat-operators-tww5m" Dec 11 08:51:28 crc kubenswrapper[4881]: I1211 08:51:28.583874 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wr9lk\" (UniqueName: \"kubernetes.io/projected/7ee89d65-d146-42b8-a58f-26abfa66d3e8-kube-api-access-wr9lk\") pod \"redhat-operators-tww5m\" (UID: \"7ee89d65-d146-42b8-a58f-26abfa66d3e8\") " pod="openshift-marketplace/redhat-operators-tww5m" Dec 11 08:51:28 crc kubenswrapper[4881]: I1211 08:51:28.663824 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-tww5m" Dec 11 08:51:29 crc kubenswrapper[4881]: I1211 08:51:29.160725 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-tww5m"] Dec 11 08:51:29 crc kubenswrapper[4881]: W1211 08:51:29.174913 4881 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod7ee89d65_d146_42b8_a58f_26abfa66d3e8.slice/crio-ff8fc3bcfd9e4e3f5c49a0ffff22eaa6536d9d702de3d39b04441730b0557ecf WatchSource:0}: Error finding container ff8fc3bcfd9e4e3f5c49a0ffff22eaa6536d9d702de3d39b04441730b0557ecf: Status 404 returned error can't find the container with id ff8fc3bcfd9e4e3f5c49a0ffff22eaa6536d9d702de3d39b04441730b0557ecf Dec 11 08:51:29 crc kubenswrapper[4881]: I1211 08:51:29.396902 4881 patch_prober.go:28] interesting pod/machine-config-daemon-z9nnh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 11 08:51:29 crc kubenswrapper[4881]: I1211 08:51:29.396965 4881 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 11 08:51:29 crc kubenswrapper[4881]: I1211 08:51:29.648170 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-tww5m" event={"ID":"7ee89d65-d146-42b8-a58f-26abfa66d3e8","Type":"ContainerStarted","Data":"ff8fc3bcfd9e4e3f5c49a0ffff22eaa6536d9d702de3d39b04441730b0557ecf"} Dec 11 08:51:30 crc kubenswrapper[4881]: I1211 08:51:30.658893 4881 generic.go:334] "Generic (PLEG): container finished" podID="7ee89d65-d146-42b8-a58f-26abfa66d3e8" containerID="f95fadc679135f6c8010b6dd8aef2aafb9795c9b9d17cf95e2d5177940b32137" exitCode=0 Dec 11 08:51:30 crc kubenswrapper[4881]: I1211 08:51:30.658946 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-tww5m" event={"ID":"7ee89d65-d146-42b8-a58f-26abfa66d3e8","Type":"ContainerDied","Data":"f95fadc679135f6c8010b6dd8aef2aafb9795c9b9d17cf95e2d5177940b32137"} Dec 11 08:51:32 crc kubenswrapper[4881]: I1211 08:51:32.698441 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-tww5m" event={"ID":"7ee89d65-d146-42b8-a58f-26abfa66d3e8","Type":"ContainerStarted","Data":"b328f49288ad726ff845b7cd32e0b906e725b6b45dde7b857e8f96a4da8982fb"} Dec 11 08:51:40 crc kubenswrapper[4881]: I1211 08:51:40.778057 4881 generic.go:334] "Generic (PLEG): container finished" podID="7ee89d65-d146-42b8-a58f-26abfa66d3e8" containerID="b328f49288ad726ff845b7cd32e0b906e725b6b45dde7b857e8f96a4da8982fb" exitCode=0 Dec 11 08:51:40 crc kubenswrapper[4881]: I1211 08:51:40.778142 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-tww5m" event={"ID":"7ee89d65-d146-42b8-a58f-26abfa66d3e8","Type":"ContainerDied","Data":"b328f49288ad726ff845b7cd32e0b906e725b6b45dde7b857e8f96a4da8982fb"} Dec 11 08:51:42 crc kubenswrapper[4881]: I1211 08:51:42.804632 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-tww5m" event={"ID":"7ee89d65-d146-42b8-a58f-26abfa66d3e8","Type":"ContainerStarted","Data":"71483bc4c11765109061676c9667f6615b17a5d07aba4e1af8eb37215316cb06"} Dec 11 08:51:42 crc kubenswrapper[4881]: I1211 08:51:42.837212 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-tww5m" podStartSLOduration=3.921199021 podStartE2EDuration="14.837171807s" podCreationTimestamp="2025-12-11 08:51:28 +0000 UTC" firstStartedPulling="2025-12-11 08:51:30.661450404 +0000 UTC m=+2139.038819101" lastFinishedPulling="2025-12-11 08:51:41.57742318 +0000 UTC m=+2149.954791887" observedRunningTime="2025-12-11 08:51:42.825850119 +0000 UTC m=+2151.203218816" watchObservedRunningTime="2025-12-11 08:51:42.837171807 +0000 UTC m=+2151.214540504" Dec 11 08:51:48 crc kubenswrapper[4881]: I1211 08:51:48.664077 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-tww5m" Dec 11 08:51:48 crc kubenswrapper[4881]: I1211 08:51:48.664646 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-tww5m" Dec 11 08:51:49 crc kubenswrapper[4881]: I1211 08:51:49.716481 4881 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-tww5m" podUID="7ee89d65-d146-42b8-a58f-26abfa66d3e8" containerName="registry-server" probeResult="failure" output=< Dec 11 08:51:49 crc kubenswrapper[4881]: timeout: failed to connect service ":50051" within 1s Dec 11 08:51:49 crc kubenswrapper[4881]: > Dec 11 08:51:53 crc kubenswrapper[4881]: I1211 08:51:53.056503 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-dgdhn"] Dec 11 08:51:53 crc kubenswrapper[4881]: I1211 08:51:53.069969 4881 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-dgdhn"] Dec 11 08:51:55 crc kubenswrapper[4881]: I1211 08:51:55.030146 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4ce4c4b4-5f9a-4e64-af32-6c3d805a01fc" path="/var/lib/kubelet/pods/4ce4c4b4-5f9a-4e64-af32-6c3d805a01fc/volumes" Dec 11 08:51:58 crc kubenswrapper[4881]: I1211 08:51:58.717134 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-tww5m" Dec 11 08:51:58 crc kubenswrapper[4881]: I1211 08:51:58.771480 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-tww5m" Dec 11 08:51:59 crc kubenswrapper[4881]: I1211 08:51:59.396946 4881 patch_prober.go:28] interesting pod/machine-config-daemon-z9nnh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 11 08:51:59 crc kubenswrapper[4881]: I1211 08:51:59.397004 4881 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 11 08:51:59 crc kubenswrapper[4881]: I1211 08:51:59.397051 4881 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" Dec 11 08:51:59 crc kubenswrapper[4881]: I1211 08:51:59.397980 4881 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"73cf77c8e58dfead623c00357f3020e90cfa5d92429139db8dcd67259d7f3aee"} pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Dec 11 08:51:59 crc kubenswrapper[4881]: I1211 08:51:59.398102 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" containerName="machine-config-daemon" containerID="cri-o://73cf77c8e58dfead623c00357f3020e90cfa5d92429139db8dcd67259d7f3aee" gracePeriod=600 Dec 11 08:51:59 crc kubenswrapper[4881]: I1211 08:51:59.530498 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-tww5m"] Dec 11 08:51:59 crc kubenswrapper[4881]: I1211 08:51:59.976025 4881 generic.go:334] "Generic (PLEG): container finished" podID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" containerID="73cf77c8e58dfead623c00357f3020e90cfa5d92429139db8dcd67259d7f3aee" exitCode=0 Dec 11 08:51:59 crc kubenswrapper[4881]: I1211 08:51:59.976098 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" event={"ID":"56d69133-af36-4cbd-af7d-3a58cc4dd8ca","Type":"ContainerDied","Data":"73cf77c8e58dfead623c00357f3020e90cfa5d92429139db8dcd67259d7f3aee"} Dec 11 08:51:59 crc kubenswrapper[4881]: I1211 08:51:59.976172 4881 scope.go:117] "RemoveContainer" containerID="6d4e8d739554b5cd95df4279914b6fd3590e9c30b07a9063a641aef8fdb20ae9" Dec 11 08:51:59 crc kubenswrapper[4881]: I1211 08:51:59.976271 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-tww5m" podUID="7ee89d65-d146-42b8-a58f-26abfa66d3e8" containerName="registry-server" containerID="cri-o://71483bc4c11765109061676c9667f6615b17a5d07aba4e1af8eb37215316cb06" gracePeriod=2 Dec 11 08:52:00 crc kubenswrapper[4881]: I1211 08:52:00.666638 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-tww5m" Dec 11 08:52:00 crc kubenswrapper[4881]: I1211 08:52:00.831112 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wr9lk\" (UniqueName: \"kubernetes.io/projected/7ee89d65-d146-42b8-a58f-26abfa66d3e8-kube-api-access-wr9lk\") pod \"7ee89d65-d146-42b8-a58f-26abfa66d3e8\" (UID: \"7ee89d65-d146-42b8-a58f-26abfa66d3e8\") " Dec 11 08:52:00 crc kubenswrapper[4881]: I1211 08:52:00.831532 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7ee89d65-d146-42b8-a58f-26abfa66d3e8-catalog-content\") pod \"7ee89d65-d146-42b8-a58f-26abfa66d3e8\" (UID: \"7ee89d65-d146-42b8-a58f-26abfa66d3e8\") " Dec 11 08:52:00 crc kubenswrapper[4881]: I1211 08:52:00.831901 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7ee89d65-d146-42b8-a58f-26abfa66d3e8-utilities\") pod \"7ee89d65-d146-42b8-a58f-26abfa66d3e8\" (UID: \"7ee89d65-d146-42b8-a58f-26abfa66d3e8\") " Dec 11 08:52:00 crc kubenswrapper[4881]: I1211 08:52:00.832767 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7ee89d65-d146-42b8-a58f-26abfa66d3e8-utilities" (OuterVolumeSpecName: "utilities") pod "7ee89d65-d146-42b8-a58f-26abfa66d3e8" (UID: "7ee89d65-d146-42b8-a58f-26abfa66d3e8"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 08:52:00 crc kubenswrapper[4881]: I1211 08:52:00.840625 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7ee89d65-d146-42b8-a58f-26abfa66d3e8-kube-api-access-wr9lk" (OuterVolumeSpecName: "kube-api-access-wr9lk") pod "7ee89d65-d146-42b8-a58f-26abfa66d3e8" (UID: "7ee89d65-d146-42b8-a58f-26abfa66d3e8"). InnerVolumeSpecName "kube-api-access-wr9lk". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:52:00 crc kubenswrapper[4881]: I1211 08:52:00.934646 4881 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7ee89d65-d146-42b8-a58f-26abfa66d3e8-utilities\") on node \"crc\" DevicePath \"\"" Dec 11 08:52:00 crc kubenswrapper[4881]: I1211 08:52:00.934678 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wr9lk\" (UniqueName: \"kubernetes.io/projected/7ee89d65-d146-42b8-a58f-26abfa66d3e8-kube-api-access-wr9lk\") on node \"crc\" DevicePath \"\"" Dec 11 08:52:00 crc kubenswrapper[4881]: I1211 08:52:00.952015 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7ee89d65-d146-42b8-a58f-26abfa66d3e8-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "7ee89d65-d146-42b8-a58f-26abfa66d3e8" (UID: "7ee89d65-d146-42b8-a58f-26abfa66d3e8"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 08:52:00 crc kubenswrapper[4881]: I1211 08:52:00.989715 4881 generic.go:334] "Generic (PLEG): container finished" podID="7ee89d65-d146-42b8-a58f-26abfa66d3e8" containerID="71483bc4c11765109061676c9667f6615b17a5d07aba4e1af8eb37215316cb06" exitCode=0 Dec 11 08:52:00 crc kubenswrapper[4881]: I1211 08:52:00.989803 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-tww5m" event={"ID":"7ee89d65-d146-42b8-a58f-26abfa66d3e8","Type":"ContainerDied","Data":"71483bc4c11765109061676c9667f6615b17a5d07aba4e1af8eb37215316cb06"} Dec 11 08:52:00 crc kubenswrapper[4881]: I1211 08:52:00.989826 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-tww5m" Dec 11 08:52:00 crc kubenswrapper[4881]: I1211 08:52:00.989834 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-tww5m" event={"ID":"7ee89d65-d146-42b8-a58f-26abfa66d3e8","Type":"ContainerDied","Data":"ff8fc3bcfd9e4e3f5c49a0ffff22eaa6536d9d702de3d39b04441730b0557ecf"} Dec 11 08:52:00 crc kubenswrapper[4881]: I1211 08:52:00.989859 4881 scope.go:117] "RemoveContainer" containerID="71483bc4c11765109061676c9667f6615b17a5d07aba4e1af8eb37215316cb06" Dec 11 08:52:00 crc kubenswrapper[4881]: I1211 08:52:00.996126 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" event={"ID":"56d69133-af36-4cbd-af7d-3a58cc4dd8ca","Type":"ContainerStarted","Data":"2c1b273a42ceaf987004a76e656f9e43f9531662645d97e9202713754e0053d2"} Dec 11 08:52:01 crc kubenswrapper[4881]: I1211 08:52:01.013967 4881 scope.go:117] "RemoveContainer" containerID="b328f49288ad726ff845b7cd32e0b906e725b6b45dde7b857e8f96a4da8982fb" Dec 11 08:52:01 crc kubenswrapper[4881]: I1211 08:52:01.036917 4881 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7ee89d65-d146-42b8-a58f-26abfa66d3e8-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 11 08:52:01 crc kubenswrapper[4881]: I1211 08:52:01.039273 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-tww5m"] Dec 11 08:52:01 crc kubenswrapper[4881]: I1211 08:52:01.047687 4881 scope.go:117] "RemoveContainer" containerID="f95fadc679135f6c8010b6dd8aef2aafb9795c9b9d17cf95e2d5177940b32137" Dec 11 08:52:01 crc kubenswrapper[4881]: I1211 08:52:01.051707 4881 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-tww5m"] Dec 11 08:52:01 crc kubenswrapper[4881]: I1211 08:52:01.112596 4881 scope.go:117] "RemoveContainer" containerID="71483bc4c11765109061676c9667f6615b17a5d07aba4e1af8eb37215316cb06" Dec 11 08:52:01 crc kubenswrapper[4881]: E1211 08:52:01.113324 4881 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"71483bc4c11765109061676c9667f6615b17a5d07aba4e1af8eb37215316cb06\": container with ID starting with 71483bc4c11765109061676c9667f6615b17a5d07aba4e1af8eb37215316cb06 not found: ID does not exist" containerID="71483bc4c11765109061676c9667f6615b17a5d07aba4e1af8eb37215316cb06" Dec 11 08:52:01 crc kubenswrapper[4881]: I1211 08:52:01.113371 4881 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"71483bc4c11765109061676c9667f6615b17a5d07aba4e1af8eb37215316cb06"} err="failed to get container status \"71483bc4c11765109061676c9667f6615b17a5d07aba4e1af8eb37215316cb06\": rpc error: code = NotFound desc = could not find container \"71483bc4c11765109061676c9667f6615b17a5d07aba4e1af8eb37215316cb06\": container with ID starting with 71483bc4c11765109061676c9667f6615b17a5d07aba4e1af8eb37215316cb06 not found: ID does not exist" Dec 11 08:52:01 crc kubenswrapper[4881]: I1211 08:52:01.113393 4881 scope.go:117] "RemoveContainer" containerID="b328f49288ad726ff845b7cd32e0b906e725b6b45dde7b857e8f96a4da8982fb" Dec 11 08:52:01 crc kubenswrapper[4881]: E1211 08:52:01.113642 4881 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b328f49288ad726ff845b7cd32e0b906e725b6b45dde7b857e8f96a4da8982fb\": container with ID starting with b328f49288ad726ff845b7cd32e0b906e725b6b45dde7b857e8f96a4da8982fb not found: ID does not exist" containerID="b328f49288ad726ff845b7cd32e0b906e725b6b45dde7b857e8f96a4da8982fb" Dec 11 08:52:01 crc kubenswrapper[4881]: I1211 08:52:01.113663 4881 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b328f49288ad726ff845b7cd32e0b906e725b6b45dde7b857e8f96a4da8982fb"} err="failed to get container status \"b328f49288ad726ff845b7cd32e0b906e725b6b45dde7b857e8f96a4da8982fb\": rpc error: code = NotFound desc = could not find container \"b328f49288ad726ff845b7cd32e0b906e725b6b45dde7b857e8f96a4da8982fb\": container with ID starting with b328f49288ad726ff845b7cd32e0b906e725b6b45dde7b857e8f96a4da8982fb not found: ID does not exist" Dec 11 08:52:01 crc kubenswrapper[4881]: I1211 08:52:01.113677 4881 scope.go:117] "RemoveContainer" containerID="f95fadc679135f6c8010b6dd8aef2aafb9795c9b9d17cf95e2d5177940b32137" Dec 11 08:52:01 crc kubenswrapper[4881]: E1211 08:52:01.113916 4881 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f95fadc679135f6c8010b6dd8aef2aafb9795c9b9d17cf95e2d5177940b32137\": container with ID starting with f95fadc679135f6c8010b6dd8aef2aafb9795c9b9d17cf95e2d5177940b32137 not found: ID does not exist" containerID="f95fadc679135f6c8010b6dd8aef2aafb9795c9b9d17cf95e2d5177940b32137" Dec 11 08:52:01 crc kubenswrapper[4881]: I1211 08:52:01.113939 4881 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f95fadc679135f6c8010b6dd8aef2aafb9795c9b9d17cf95e2d5177940b32137"} err="failed to get container status \"f95fadc679135f6c8010b6dd8aef2aafb9795c9b9d17cf95e2d5177940b32137\": rpc error: code = NotFound desc = could not find container \"f95fadc679135f6c8010b6dd8aef2aafb9795c9b9d17cf95e2d5177940b32137\": container with ID starting with f95fadc679135f6c8010b6dd8aef2aafb9795c9b9d17cf95e2d5177940b32137 not found: ID does not exist" Dec 11 08:52:03 crc kubenswrapper[4881]: I1211 08:52:03.019716 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7ee89d65-d146-42b8-a58f-26abfa66d3e8" path="/var/lib/kubelet/pods/7ee89d65-d146-42b8-a58f-26abfa66d3e8/volumes" Dec 11 08:52:09 crc kubenswrapper[4881]: I1211 08:52:09.597299 4881 scope.go:117] "RemoveContainer" containerID="59913bd1ac6e438f295e06b92ae8b703b58c3f2270de6ae1db0d0b8aada64c33" Dec 11 08:52:09 crc kubenswrapper[4881]: I1211 08:52:09.629293 4881 scope.go:117] "RemoveContainer" containerID="f97911d88b636654f9534271dbb427322400e2484df7a386c23013f0fa9bfd8a" Dec 11 08:52:12 crc kubenswrapper[4881]: I1211 08:52:12.606047 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-qprsd"] Dec 11 08:52:12 crc kubenswrapper[4881]: E1211 08:52:12.606901 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7ee89d65-d146-42b8-a58f-26abfa66d3e8" containerName="extract-content" Dec 11 08:52:12 crc kubenswrapper[4881]: I1211 08:52:12.606919 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="7ee89d65-d146-42b8-a58f-26abfa66d3e8" containerName="extract-content" Dec 11 08:52:12 crc kubenswrapper[4881]: E1211 08:52:12.606961 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7ee89d65-d146-42b8-a58f-26abfa66d3e8" containerName="extract-utilities" Dec 11 08:52:12 crc kubenswrapper[4881]: I1211 08:52:12.606968 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="7ee89d65-d146-42b8-a58f-26abfa66d3e8" containerName="extract-utilities" Dec 11 08:52:12 crc kubenswrapper[4881]: E1211 08:52:12.606981 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7ee89d65-d146-42b8-a58f-26abfa66d3e8" containerName="registry-server" Dec 11 08:52:12 crc kubenswrapper[4881]: I1211 08:52:12.606989 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="7ee89d65-d146-42b8-a58f-26abfa66d3e8" containerName="registry-server" Dec 11 08:52:12 crc kubenswrapper[4881]: I1211 08:52:12.607209 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="7ee89d65-d146-42b8-a58f-26abfa66d3e8" containerName="registry-server" Dec 11 08:52:12 crc kubenswrapper[4881]: I1211 08:52:12.610817 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-qprsd" Dec 11 08:52:12 crc kubenswrapper[4881]: I1211 08:52:12.621673 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-qprsd"] Dec 11 08:52:12 crc kubenswrapper[4881]: I1211 08:52:12.723298 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/438e8226-eac2-4fbf-8f27-cfdcec4dca1a-catalog-content\") pod \"redhat-marketplace-qprsd\" (UID: \"438e8226-eac2-4fbf-8f27-cfdcec4dca1a\") " pod="openshift-marketplace/redhat-marketplace-qprsd" Dec 11 08:52:12 crc kubenswrapper[4881]: I1211 08:52:12.723846 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/438e8226-eac2-4fbf-8f27-cfdcec4dca1a-utilities\") pod \"redhat-marketplace-qprsd\" (UID: \"438e8226-eac2-4fbf-8f27-cfdcec4dca1a\") " pod="openshift-marketplace/redhat-marketplace-qprsd" Dec 11 08:52:12 crc kubenswrapper[4881]: I1211 08:52:12.723932 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5hp5s\" (UniqueName: \"kubernetes.io/projected/438e8226-eac2-4fbf-8f27-cfdcec4dca1a-kube-api-access-5hp5s\") pod \"redhat-marketplace-qprsd\" (UID: \"438e8226-eac2-4fbf-8f27-cfdcec4dca1a\") " pod="openshift-marketplace/redhat-marketplace-qprsd" Dec 11 08:52:12 crc kubenswrapper[4881]: I1211 08:52:12.825993 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/438e8226-eac2-4fbf-8f27-cfdcec4dca1a-utilities\") pod \"redhat-marketplace-qprsd\" (UID: \"438e8226-eac2-4fbf-8f27-cfdcec4dca1a\") " pod="openshift-marketplace/redhat-marketplace-qprsd" Dec 11 08:52:12 crc kubenswrapper[4881]: I1211 08:52:12.826056 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5hp5s\" (UniqueName: \"kubernetes.io/projected/438e8226-eac2-4fbf-8f27-cfdcec4dca1a-kube-api-access-5hp5s\") pod \"redhat-marketplace-qprsd\" (UID: \"438e8226-eac2-4fbf-8f27-cfdcec4dca1a\") " pod="openshift-marketplace/redhat-marketplace-qprsd" Dec 11 08:52:12 crc kubenswrapper[4881]: I1211 08:52:12.826244 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/438e8226-eac2-4fbf-8f27-cfdcec4dca1a-catalog-content\") pod \"redhat-marketplace-qprsd\" (UID: \"438e8226-eac2-4fbf-8f27-cfdcec4dca1a\") " pod="openshift-marketplace/redhat-marketplace-qprsd" Dec 11 08:52:12 crc kubenswrapper[4881]: I1211 08:52:12.826576 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/438e8226-eac2-4fbf-8f27-cfdcec4dca1a-utilities\") pod \"redhat-marketplace-qprsd\" (UID: \"438e8226-eac2-4fbf-8f27-cfdcec4dca1a\") " pod="openshift-marketplace/redhat-marketplace-qprsd" Dec 11 08:52:12 crc kubenswrapper[4881]: I1211 08:52:12.826698 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/438e8226-eac2-4fbf-8f27-cfdcec4dca1a-catalog-content\") pod \"redhat-marketplace-qprsd\" (UID: \"438e8226-eac2-4fbf-8f27-cfdcec4dca1a\") " pod="openshift-marketplace/redhat-marketplace-qprsd" Dec 11 08:52:12 crc kubenswrapper[4881]: I1211 08:52:12.848257 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5hp5s\" (UniqueName: \"kubernetes.io/projected/438e8226-eac2-4fbf-8f27-cfdcec4dca1a-kube-api-access-5hp5s\") pod \"redhat-marketplace-qprsd\" (UID: \"438e8226-eac2-4fbf-8f27-cfdcec4dca1a\") " pod="openshift-marketplace/redhat-marketplace-qprsd" Dec 11 08:52:12 crc kubenswrapper[4881]: I1211 08:52:12.929254 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-qprsd" Dec 11 08:52:13 crc kubenswrapper[4881]: I1211 08:52:13.447576 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-qprsd"] Dec 11 08:52:14 crc kubenswrapper[4881]: I1211 08:52:14.142565 4881 generic.go:334] "Generic (PLEG): container finished" podID="438e8226-eac2-4fbf-8f27-cfdcec4dca1a" containerID="bbe40eb39882da0051163eae6d171ac8ec89647fc015fcb3e0b9ff13aefde137" exitCode=0 Dec 11 08:52:14 crc kubenswrapper[4881]: I1211 08:52:14.142609 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qprsd" event={"ID":"438e8226-eac2-4fbf-8f27-cfdcec4dca1a","Type":"ContainerDied","Data":"bbe40eb39882da0051163eae6d171ac8ec89647fc015fcb3e0b9ff13aefde137"} Dec 11 08:52:14 crc kubenswrapper[4881]: I1211 08:52:14.142634 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qprsd" event={"ID":"438e8226-eac2-4fbf-8f27-cfdcec4dca1a","Type":"ContainerStarted","Data":"8f9b0227e40984da9b9d1c37a6843515e719c668778751e75c1816f97c207574"} Dec 11 08:52:16 crc kubenswrapper[4881]: I1211 08:52:16.165476 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qprsd" event={"ID":"438e8226-eac2-4fbf-8f27-cfdcec4dca1a","Type":"ContainerStarted","Data":"4d1c08016cf6126c05c9866c5c361b260677f75ed80440c18bd4da177e247394"} Dec 11 08:52:17 crc kubenswrapper[4881]: I1211 08:52:17.177395 4881 generic.go:334] "Generic (PLEG): container finished" podID="438e8226-eac2-4fbf-8f27-cfdcec4dca1a" containerID="4d1c08016cf6126c05c9866c5c361b260677f75ed80440c18bd4da177e247394" exitCode=0 Dec 11 08:52:17 crc kubenswrapper[4881]: I1211 08:52:17.177452 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qprsd" event={"ID":"438e8226-eac2-4fbf-8f27-cfdcec4dca1a","Type":"ContainerDied","Data":"4d1c08016cf6126c05c9866c5c361b260677f75ed80440c18bd4da177e247394"} Dec 11 08:52:18 crc kubenswrapper[4881]: I1211 08:52:18.195025 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qprsd" event={"ID":"438e8226-eac2-4fbf-8f27-cfdcec4dca1a","Type":"ContainerStarted","Data":"157d7f92c70dcc83b8d808ef9d8e31022a3565fc27b1af5a048f2d739cb366a1"} Dec 11 08:52:18 crc kubenswrapper[4881]: I1211 08:52:18.227787 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-qprsd" podStartSLOduration=2.666757682 podStartE2EDuration="6.227763075s" podCreationTimestamp="2025-12-11 08:52:12 +0000 UTC" firstStartedPulling="2025-12-11 08:52:14.145020187 +0000 UTC m=+2182.522388884" lastFinishedPulling="2025-12-11 08:52:17.70602558 +0000 UTC m=+2186.083394277" observedRunningTime="2025-12-11 08:52:18.215978875 +0000 UTC m=+2186.593347572" watchObservedRunningTime="2025-12-11 08:52:18.227763075 +0000 UTC m=+2186.605131792" Dec 11 08:52:22 crc kubenswrapper[4881]: I1211 08:52:22.930301 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-qprsd" Dec 11 08:52:22 crc kubenswrapper[4881]: I1211 08:52:22.930918 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-qprsd" Dec 11 08:52:23 crc kubenswrapper[4881]: I1211 08:52:23.001343 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-qprsd" Dec 11 08:52:23 crc kubenswrapper[4881]: I1211 08:52:23.356631 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-qprsd" Dec 11 08:52:24 crc kubenswrapper[4881]: I1211 08:52:24.065136 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-cell-mapping-s5tsr"] Dec 11 08:52:24 crc kubenswrapper[4881]: I1211 08:52:24.076790 4881 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-cell-mapping-s5tsr"] Dec 11 08:52:25 crc kubenswrapper[4881]: I1211 08:52:25.018620 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6b6f1a51-4774-4356-a5ef-5e901d75d889" path="/var/lib/kubelet/pods/6b6f1a51-4774-4356-a5ef-5e901d75d889/volumes" Dec 11 08:52:26 crc kubenswrapper[4881]: I1211 08:52:26.592738 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-qprsd"] Dec 11 08:52:26 crc kubenswrapper[4881]: I1211 08:52:26.593434 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-qprsd" podUID="438e8226-eac2-4fbf-8f27-cfdcec4dca1a" containerName="registry-server" containerID="cri-o://157d7f92c70dcc83b8d808ef9d8e31022a3565fc27b1af5a048f2d739cb366a1" gracePeriod=2 Dec 11 08:52:27 crc kubenswrapper[4881]: I1211 08:52:27.307838 4881 generic.go:334] "Generic (PLEG): container finished" podID="438e8226-eac2-4fbf-8f27-cfdcec4dca1a" containerID="157d7f92c70dcc83b8d808ef9d8e31022a3565fc27b1af5a048f2d739cb366a1" exitCode=0 Dec 11 08:52:27 crc kubenswrapper[4881]: I1211 08:52:27.307886 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qprsd" event={"ID":"438e8226-eac2-4fbf-8f27-cfdcec4dca1a","Type":"ContainerDied","Data":"157d7f92c70dcc83b8d808ef9d8e31022a3565fc27b1af5a048f2d739cb366a1"} Dec 11 08:52:27 crc kubenswrapper[4881]: I1211 08:52:27.800236 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-qprsd" Dec 11 08:52:27 crc kubenswrapper[4881]: I1211 08:52:27.929730 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5hp5s\" (UniqueName: \"kubernetes.io/projected/438e8226-eac2-4fbf-8f27-cfdcec4dca1a-kube-api-access-5hp5s\") pod \"438e8226-eac2-4fbf-8f27-cfdcec4dca1a\" (UID: \"438e8226-eac2-4fbf-8f27-cfdcec4dca1a\") " Dec 11 08:52:27 crc kubenswrapper[4881]: I1211 08:52:27.930025 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/438e8226-eac2-4fbf-8f27-cfdcec4dca1a-catalog-content\") pod \"438e8226-eac2-4fbf-8f27-cfdcec4dca1a\" (UID: \"438e8226-eac2-4fbf-8f27-cfdcec4dca1a\") " Dec 11 08:52:27 crc kubenswrapper[4881]: I1211 08:52:27.930071 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/438e8226-eac2-4fbf-8f27-cfdcec4dca1a-utilities\") pod \"438e8226-eac2-4fbf-8f27-cfdcec4dca1a\" (UID: \"438e8226-eac2-4fbf-8f27-cfdcec4dca1a\") " Dec 11 08:52:27 crc kubenswrapper[4881]: I1211 08:52:27.930976 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/438e8226-eac2-4fbf-8f27-cfdcec4dca1a-utilities" (OuterVolumeSpecName: "utilities") pod "438e8226-eac2-4fbf-8f27-cfdcec4dca1a" (UID: "438e8226-eac2-4fbf-8f27-cfdcec4dca1a"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 08:52:27 crc kubenswrapper[4881]: I1211 08:52:27.931877 4881 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/438e8226-eac2-4fbf-8f27-cfdcec4dca1a-utilities\") on node \"crc\" DevicePath \"\"" Dec 11 08:52:27 crc kubenswrapper[4881]: I1211 08:52:27.936642 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/438e8226-eac2-4fbf-8f27-cfdcec4dca1a-kube-api-access-5hp5s" (OuterVolumeSpecName: "kube-api-access-5hp5s") pod "438e8226-eac2-4fbf-8f27-cfdcec4dca1a" (UID: "438e8226-eac2-4fbf-8f27-cfdcec4dca1a"). InnerVolumeSpecName "kube-api-access-5hp5s". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:52:27 crc kubenswrapper[4881]: I1211 08:52:27.955483 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/438e8226-eac2-4fbf-8f27-cfdcec4dca1a-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "438e8226-eac2-4fbf-8f27-cfdcec4dca1a" (UID: "438e8226-eac2-4fbf-8f27-cfdcec4dca1a"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 08:52:28 crc kubenswrapper[4881]: I1211 08:52:28.034172 4881 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/438e8226-eac2-4fbf-8f27-cfdcec4dca1a-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 11 08:52:28 crc kubenswrapper[4881]: I1211 08:52:28.034213 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5hp5s\" (UniqueName: \"kubernetes.io/projected/438e8226-eac2-4fbf-8f27-cfdcec4dca1a-kube-api-access-5hp5s\") on node \"crc\" DevicePath \"\"" Dec 11 08:52:28 crc kubenswrapper[4881]: I1211 08:52:28.322877 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qprsd" event={"ID":"438e8226-eac2-4fbf-8f27-cfdcec4dca1a","Type":"ContainerDied","Data":"8f9b0227e40984da9b9d1c37a6843515e719c668778751e75c1816f97c207574"} Dec 11 08:52:28 crc kubenswrapper[4881]: I1211 08:52:28.323197 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-qprsd" Dec 11 08:52:28 crc kubenswrapper[4881]: I1211 08:52:28.323221 4881 scope.go:117] "RemoveContainer" containerID="157d7f92c70dcc83b8d808ef9d8e31022a3565fc27b1af5a048f2d739cb366a1" Dec 11 08:52:28 crc kubenswrapper[4881]: I1211 08:52:28.350767 4881 scope.go:117] "RemoveContainer" containerID="4d1c08016cf6126c05c9866c5c361b260677f75ed80440c18bd4da177e247394" Dec 11 08:52:28 crc kubenswrapper[4881]: I1211 08:52:28.374054 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-qprsd"] Dec 11 08:52:28 crc kubenswrapper[4881]: I1211 08:52:28.387747 4881 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-qprsd"] Dec 11 08:52:28 crc kubenswrapper[4881]: I1211 08:52:28.409417 4881 scope.go:117] "RemoveContainer" containerID="bbe40eb39882da0051163eae6d171ac8ec89647fc015fcb3e0b9ff13aefde137" Dec 11 08:52:29 crc kubenswrapper[4881]: I1211 08:52:29.019010 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="438e8226-eac2-4fbf-8f27-cfdcec4dca1a" path="/var/lib/kubelet/pods/438e8226-eac2-4fbf-8f27-cfdcec4dca1a/volumes" Dec 11 08:52:33 crc kubenswrapper[4881]: I1211 08:52:33.385857 4881 generic.go:334] "Generic (PLEG): container finished" podID="7dd4872c-380b-4dcc-bd46-ad6a624a2d34" containerID="9e03c25a537811b3e9ff1caa90e2beaa6bd1a0881d115a04485233f535b2615a" exitCode=0 Dec 11 08:52:33 crc kubenswrapper[4881]: I1211 08:52:33.385951 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-6mhvp" event={"ID":"7dd4872c-380b-4dcc-bd46-ad6a624a2d34","Type":"ContainerDied","Data":"9e03c25a537811b3e9ff1caa90e2beaa6bd1a0881d115a04485233f535b2615a"} Dec 11 08:52:34 crc kubenswrapper[4881]: I1211 08:52:34.986277 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-6mhvp" Dec 11 08:52:35 crc kubenswrapper[4881]: I1211 08:52:35.101588 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7dd4872c-380b-4dcc-bd46-ad6a624a2d34-inventory\") pod \"7dd4872c-380b-4dcc-bd46-ad6a624a2d34\" (UID: \"7dd4872c-380b-4dcc-bd46-ad6a624a2d34\") " Dec 11 08:52:35 crc kubenswrapper[4881]: I1211 08:52:35.101709 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/7dd4872c-380b-4dcc-bd46-ad6a624a2d34-ssh-key\") pod \"7dd4872c-380b-4dcc-bd46-ad6a624a2d34\" (UID: \"7dd4872c-380b-4dcc-bd46-ad6a624a2d34\") " Dec 11 08:52:35 crc kubenswrapper[4881]: I1211 08:52:35.101964 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qdftr\" (UniqueName: \"kubernetes.io/projected/7dd4872c-380b-4dcc-bd46-ad6a624a2d34-kube-api-access-qdftr\") pod \"7dd4872c-380b-4dcc-bd46-ad6a624a2d34\" (UID: \"7dd4872c-380b-4dcc-bd46-ad6a624a2d34\") " Dec 11 08:52:35 crc kubenswrapper[4881]: I1211 08:52:35.107075 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7dd4872c-380b-4dcc-bd46-ad6a624a2d34-kube-api-access-qdftr" (OuterVolumeSpecName: "kube-api-access-qdftr") pod "7dd4872c-380b-4dcc-bd46-ad6a624a2d34" (UID: "7dd4872c-380b-4dcc-bd46-ad6a624a2d34"). InnerVolumeSpecName "kube-api-access-qdftr". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:52:35 crc kubenswrapper[4881]: I1211 08:52:35.140766 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7dd4872c-380b-4dcc-bd46-ad6a624a2d34-inventory" (OuterVolumeSpecName: "inventory") pod "7dd4872c-380b-4dcc-bd46-ad6a624a2d34" (UID: "7dd4872c-380b-4dcc-bd46-ad6a624a2d34"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:52:35 crc kubenswrapper[4881]: I1211 08:52:35.140811 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7dd4872c-380b-4dcc-bd46-ad6a624a2d34-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "7dd4872c-380b-4dcc-bd46-ad6a624a2d34" (UID: "7dd4872c-380b-4dcc-bd46-ad6a624a2d34"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:52:35 crc kubenswrapper[4881]: I1211 08:52:35.206174 4881 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/7dd4872c-380b-4dcc-bd46-ad6a624a2d34-ssh-key\") on node \"crc\" DevicePath \"\"" Dec 11 08:52:35 crc kubenswrapper[4881]: I1211 08:52:35.206223 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qdftr\" (UniqueName: \"kubernetes.io/projected/7dd4872c-380b-4dcc-bd46-ad6a624a2d34-kube-api-access-qdftr\") on node \"crc\" DevicePath \"\"" Dec 11 08:52:35 crc kubenswrapper[4881]: I1211 08:52:35.206239 4881 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7dd4872c-380b-4dcc-bd46-ad6a624a2d34-inventory\") on node \"crc\" DevicePath \"\"" Dec 11 08:52:35 crc kubenswrapper[4881]: I1211 08:52:35.415415 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-6mhvp" event={"ID":"7dd4872c-380b-4dcc-bd46-ad6a624a2d34","Type":"ContainerDied","Data":"587ba71d2d055df1a4a7df79d6c84ca42841e30c2dae0810d01efa283a025082"} Dec 11 08:52:35 crc kubenswrapper[4881]: I1211 08:52:35.415783 4881 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="587ba71d2d055df1a4a7df79d6c84ca42841e30c2dae0810d01efa283a025082" Dec 11 08:52:35 crc kubenswrapper[4881]: I1211 08:52:35.415488 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-6mhvp" Dec 11 08:52:35 crc kubenswrapper[4881]: I1211 08:52:35.516766 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-ktsnq"] Dec 11 08:52:35 crc kubenswrapper[4881]: E1211 08:52:35.517384 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="438e8226-eac2-4fbf-8f27-cfdcec4dca1a" containerName="extract-utilities" Dec 11 08:52:35 crc kubenswrapper[4881]: I1211 08:52:35.517406 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="438e8226-eac2-4fbf-8f27-cfdcec4dca1a" containerName="extract-utilities" Dec 11 08:52:35 crc kubenswrapper[4881]: E1211 08:52:35.517428 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="438e8226-eac2-4fbf-8f27-cfdcec4dca1a" containerName="registry-server" Dec 11 08:52:35 crc kubenswrapper[4881]: I1211 08:52:35.517436 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="438e8226-eac2-4fbf-8f27-cfdcec4dca1a" containerName="registry-server" Dec 11 08:52:35 crc kubenswrapper[4881]: E1211 08:52:35.517460 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="438e8226-eac2-4fbf-8f27-cfdcec4dca1a" containerName="extract-content" Dec 11 08:52:35 crc kubenswrapper[4881]: I1211 08:52:35.517466 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="438e8226-eac2-4fbf-8f27-cfdcec4dca1a" containerName="extract-content" Dec 11 08:52:35 crc kubenswrapper[4881]: E1211 08:52:35.517491 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7dd4872c-380b-4dcc-bd46-ad6a624a2d34" containerName="configure-network-edpm-deployment-openstack-edpm-ipam" Dec 11 08:52:35 crc kubenswrapper[4881]: I1211 08:52:35.517498 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="7dd4872c-380b-4dcc-bd46-ad6a624a2d34" containerName="configure-network-edpm-deployment-openstack-edpm-ipam" Dec 11 08:52:35 crc kubenswrapper[4881]: I1211 08:52:35.517750 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="7dd4872c-380b-4dcc-bd46-ad6a624a2d34" containerName="configure-network-edpm-deployment-openstack-edpm-ipam" Dec 11 08:52:35 crc kubenswrapper[4881]: I1211 08:52:35.517770 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="438e8226-eac2-4fbf-8f27-cfdcec4dca1a" containerName="registry-server" Dec 11 08:52:35 crc kubenswrapper[4881]: I1211 08:52:35.518678 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-ktsnq" Dec 11 08:52:35 crc kubenswrapper[4881]: I1211 08:52:35.522888 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-bnm72" Dec 11 08:52:35 crc kubenswrapper[4881]: I1211 08:52:35.522942 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Dec 11 08:52:35 crc kubenswrapper[4881]: I1211 08:52:35.522959 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Dec 11 08:52:35 crc kubenswrapper[4881]: I1211 08:52:35.523094 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Dec 11 08:52:35 crc kubenswrapper[4881]: I1211 08:52:35.547823 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-ktsnq"] Dec 11 08:52:35 crc kubenswrapper[4881]: I1211 08:52:35.616114 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/f4e51bed-808f-4037-b472-88fbe64bd15f-ssh-key\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-ktsnq\" (UID: \"f4e51bed-808f-4037-b472-88fbe64bd15f\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-ktsnq" Dec 11 08:52:35 crc kubenswrapper[4881]: I1211 08:52:35.616204 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/f4e51bed-808f-4037-b472-88fbe64bd15f-inventory\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-ktsnq\" (UID: \"f4e51bed-808f-4037-b472-88fbe64bd15f\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-ktsnq" Dec 11 08:52:35 crc kubenswrapper[4881]: I1211 08:52:35.616275 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fcttr\" (UniqueName: \"kubernetes.io/projected/f4e51bed-808f-4037-b472-88fbe64bd15f-kube-api-access-fcttr\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-ktsnq\" (UID: \"f4e51bed-808f-4037-b472-88fbe64bd15f\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-ktsnq" Dec 11 08:52:35 crc kubenswrapper[4881]: I1211 08:52:35.718285 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fcttr\" (UniqueName: \"kubernetes.io/projected/f4e51bed-808f-4037-b472-88fbe64bd15f-kube-api-access-fcttr\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-ktsnq\" (UID: \"f4e51bed-808f-4037-b472-88fbe64bd15f\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-ktsnq" Dec 11 08:52:35 crc kubenswrapper[4881]: I1211 08:52:35.718507 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/f4e51bed-808f-4037-b472-88fbe64bd15f-ssh-key\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-ktsnq\" (UID: \"f4e51bed-808f-4037-b472-88fbe64bd15f\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-ktsnq" Dec 11 08:52:35 crc kubenswrapper[4881]: I1211 08:52:35.718607 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/f4e51bed-808f-4037-b472-88fbe64bd15f-inventory\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-ktsnq\" (UID: \"f4e51bed-808f-4037-b472-88fbe64bd15f\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-ktsnq" Dec 11 08:52:35 crc kubenswrapper[4881]: I1211 08:52:35.723359 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/f4e51bed-808f-4037-b472-88fbe64bd15f-ssh-key\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-ktsnq\" (UID: \"f4e51bed-808f-4037-b472-88fbe64bd15f\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-ktsnq" Dec 11 08:52:35 crc kubenswrapper[4881]: I1211 08:52:35.728882 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/f4e51bed-808f-4037-b472-88fbe64bd15f-inventory\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-ktsnq\" (UID: \"f4e51bed-808f-4037-b472-88fbe64bd15f\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-ktsnq" Dec 11 08:52:35 crc kubenswrapper[4881]: I1211 08:52:35.736847 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fcttr\" (UniqueName: \"kubernetes.io/projected/f4e51bed-808f-4037-b472-88fbe64bd15f-kube-api-access-fcttr\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-ktsnq\" (UID: \"f4e51bed-808f-4037-b472-88fbe64bd15f\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-ktsnq" Dec 11 08:52:35 crc kubenswrapper[4881]: I1211 08:52:35.844164 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-ktsnq" Dec 11 08:52:36 crc kubenswrapper[4881]: I1211 08:52:36.432360 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-ktsnq"] Dec 11 08:52:37 crc kubenswrapper[4881]: I1211 08:52:37.459492 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-ktsnq" event={"ID":"f4e51bed-808f-4037-b472-88fbe64bd15f","Type":"ContainerStarted","Data":"e120f78e08bf219cdbf1dcf23092ea7983b95dceae1127a55217f54a3d48f52f"} Dec 11 08:52:37 crc kubenswrapper[4881]: I1211 08:52:37.460097 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-ktsnq" event={"ID":"f4e51bed-808f-4037-b472-88fbe64bd15f","Type":"ContainerStarted","Data":"439ea0f93112cf5e62f0b2f67f3a5ca60122e46f0b8f63c565541b2e51bb10d0"} Dec 11 08:52:37 crc kubenswrapper[4881]: I1211 08:52:37.478273 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-ktsnq" podStartSLOduration=1.857780137 podStartE2EDuration="2.478252315s" podCreationTimestamp="2025-12-11 08:52:35 +0000 UTC" firstStartedPulling="2025-12-11 08:52:36.443308905 +0000 UTC m=+2204.820677602" lastFinishedPulling="2025-12-11 08:52:37.063781083 +0000 UTC m=+2205.441149780" observedRunningTime="2025-12-11 08:52:37.477874495 +0000 UTC m=+2205.855243192" watchObservedRunningTime="2025-12-11 08:52:37.478252315 +0000 UTC m=+2205.855621022" Dec 11 08:52:43 crc kubenswrapper[4881]: I1211 08:52:43.520163 4881 generic.go:334] "Generic (PLEG): container finished" podID="f4e51bed-808f-4037-b472-88fbe64bd15f" containerID="e120f78e08bf219cdbf1dcf23092ea7983b95dceae1127a55217f54a3d48f52f" exitCode=0 Dec 11 08:52:43 crc kubenswrapper[4881]: I1211 08:52:43.520276 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-ktsnq" event={"ID":"f4e51bed-808f-4037-b472-88fbe64bd15f","Type":"ContainerDied","Data":"e120f78e08bf219cdbf1dcf23092ea7983b95dceae1127a55217f54a3d48f52f"} Dec 11 08:52:46 crc kubenswrapper[4881]: I1211 08:52:46.305162 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-ktsnq" Dec 11 08:52:46 crc kubenswrapper[4881]: I1211 08:52:46.390610 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fcttr\" (UniqueName: \"kubernetes.io/projected/f4e51bed-808f-4037-b472-88fbe64bd15f-kube-api-access-fcttr\") pod \"f4e51bed-808f-4037-b472-88fbe64bd15f\" (UID: \"f4e51bed-808f-4037-b472-88fbe64bd15f\") " Dec 11 08:52:46 crc kubenswrapper[4881]: I1211 08:52:46.390743 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/f4e51bed-808f-4037-b472-88fbe64bd15f-inventory\") pod \"f4e51bed-808f-4037-b472-88fbe64bd15f\" (UID: \"f4e51bed-808f-4037-b472-88fbe64bd15f\") " Dec 11 08:52:46 crc kubenswrapper[4881]: I1211 08:52:46.390824 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/f4e51bed-808f-4037-b472-88fbe64bd15f-ssh-key\") pod \"f4e51bed-808f-4037-b472-88fbe64bd15f\" (UID: \"f4e51bed-808f-4037-b472-88fbe64bd15f\") " Dec 11 08:52:46 crc kubenswrapper[4881]: I1211 08:52:46.399156 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f4e51bed-808f-4037-b472-88fbe64bd15f-kube-api-access-fcttr" (OuterVolumeSpecName: "kube-api-access-fcttr") pod "f4e51bed-808f-4037-b472-88fbe64bd15f" (UID: "f4e51bed-808f-4037-b472-88fbe64bd15f"). InnerVolumeSpecName "kube-api-access-fcttr". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:52:46 crc kubenswrapper[4881]: I1211 08:52:46.441076 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f4e51bed-808f-4037-b472-88fbe64bd15f-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "f4e51bed-808f-4037-b472-88fbe64bd15f" (UID: "f4e51bed-808f-4037-b472-88fbe64bd15f"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:52:46 crc kubenswrapper[4881]: I1211 08:52:46.446438 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f4e51bed-808f-4037-b472-88fbe64bd15f-inventory" (OuterVolumeSpecName: "inventory") pod "f4e51bed-808f-4037-b472-88fbe64bd15f" (UID: "f4e51bed-808f-4037-b472-88fbe64bd15f"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:52:46 crc kubenswrapper[4881]: I1211 08:52:46.494826 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fcttr\" (UniqueName: \"kubernetes.io/projected/f4e51bed-808f-4037-b472-88fbe64bd15f-kube-api-access-fcttr\") on node \"crc\" DevicePath \"\"" Dec 11 08:52:46 crc kubenswrapper[4881]: I1211 08:52:46.494884 4881 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/f4e51bed-808f-4037-b472-88fbe64bd15f-inventory\") on node \"crc\" DevicePath \"\"" Dec 11 08:52:46 crc kubenswrapper[4881]: I1211 08:52:46.494896 4881 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/f4e51bed-808f-4037-b472-88fbe64bd15f-ssh-key\") on node \"crc\" DevicePath \"\"" Dec 11 08:52:46 crc kubenswrapper[4881]: I1211 08:52:46.552504 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-ktsnq" event={"ID":"f4e51bed-808f-4037-b472-88fbe64bd15f","Type":"ContainerDied","Data":"439ea0f93112cf5e62f0b2f67f3a5ca60122e46f0b8f63c565541b2e51bb10d0"} Dec 11 08:52:46 crc kubenswrapper[4881]: I1211 08:52:46.552751 4881 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="439ea0f93112cf5e62f0b2f67f3a5ca60122e46f0b8f63c565541b2e51bb10d0" Dec 11 08:52:46 crc kubenswrapper[4881]: I1211 08:52:46.552549 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-ktsnq" Dec 11 08:52:47 crc kubenswrapper[4881]: I1211 08:52:47.393781 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-p978t"] Dec 11 08:52:47 crc kubenswrapper[4881]: E1211 08:52:47.394398 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4e51bed-808f-4037-b472-88fbe64bd15f" containerName="validate-network-edpm-deployment-openstack-edpm-ipam" Dec 11 08:52:47 crc kubenswrapper[4881]: I1211 08:52:47.394413 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4e51bed-808f-4037-b472-88fbe64bd15f" containerName="validate-network-edpm-deployment-openstack-edpm-ipam" Dec 11 08:52:47 crc kubenswrapper[4881]: I1211 08:52:47.394718 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4e51bed-808f-4037-b472-88fbe64bd15f" containerName="validate-network-edpm-deployment-openstack-edpm-ipam" Dec 11 08:52:47 crc kubenswrapper[4881]: I1211 08:52:47.395970 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-p978t" Dec 11 08:52:47 crc kubenswrapper[4881]: I1211 08:52:47.400483 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Dec 11 08:52:47 crc kubenswrapper[4881]: I1211 08:52:47.401079 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Dec 11 08:52:47 crc kubenswrapper[4881]: I1211 08:52:47.401128 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Dec 11 08:52:47 crc kubenswrapper[4881]: I1211 08:52:47.405242 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-p978t"] Dec 11 08:52:47 crc kubenswrapper[4881]: I1211 08:52:47.437993 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-bnm72" Dec 11 08:52:47 crc kubenswrapper[4881]: I1211 08:52:47.438801 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/482d67c1-02c6-4526-99fb-2bc546471c4d-inventory\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-p978t\" (UID: \"482d67c1-02c6-4526-99fb-2bc546471c4d\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-p978t" Dec 11 08:52:47 crc kubenswrapper[4881]: I1211 08:52:47.438934 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mkpp7\" (UniqueName: \"kubernetes.io/projected/482d67c1-02c6-4526-99fb-2bc546471c4d-kube-api-access-mkpp7\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-p978t\" (UID: \"482d67c1-02c6-4526-99fb-2bc546471c4d\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-p978t" Dec 11 08:52:47 crc kubenswrapper[4881]: I1211 08:52:47.438964 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/482d67c1-02c6-4526-99fb-2bc546471c4d-ssh-key\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-p978t\" (UID: \"482d67c1-02c6-4526-99fb-2bc546471c4d\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-p978t" Dec 11 08:52:47 crc kubenswrapper[4881]: I1211 08:52:47.543566 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/482d67c1-02c6-4526-99fb-2bc546471c4d-inventory\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-p978t\" (UID: \"482d67c1-02c6-4526-99fb-2bc546471c4d\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-p978t" Dec 11 08:52:47 crc kubenswrapper[4881]: I1211 08:52:47.543699 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mkpp7\" (UniqueName: \"kubernetes.io/projected/482d67c1-02c6-4526-99fb-2bc546471c4d-kube-api-access-mkpp7\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-p978t\" (UID: \"482d67c1-02c6-4526-99fb-2bc546471c4d\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-p978t" Dec 11 08:52:47 crc kubenswrapper[4881]: I1211 08:52:47.543730 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/482d67c1-02c6-4526-99fb-2bc546471c4d-ssh-key\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-p978t\" (UID: \"482d67c1-02c6-4526-99fb-2bc546471c4d\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-p978t" Dec 11 08:52:47 crc kubenswrapper[4881]: I1211 08:52:47.553608 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/482d67c1-02c6-4526-99fb-2bc546471c4d-inventory\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-p978t\" (UID: \"482d67c1-02c6-4526-99fb-2bc546471c4d\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-p978t" Dec 11 08:52:47 crc kubenswrapper[4881]: I1211 08:52:47.565977 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/482d67c1-02c6-4526-99fb-2bc546471c4d-ssh-key\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-p978t\" (UID: \"482d67c1-02c6-4526-99fb-2bc546471c4d\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-p978t" Dec 11 08:52:47 crc kubenswrapper[4881]: I1211 08:52:47.591986 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mkpp7\" (UniqueName: \"kubernetes.io/projected/482d67c1-02c6-4526-99fb-2bc546471c4d-kube-api-access-mkpp7\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-p978t\" (UID: \"482d67c1-02c6-4526-99fb-2bc546471c4d\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-p978t" Dec 11 08:52:47 crc kubenswrapper[4881]: I1211 08:52:47.758598 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-p978t" Dec 11 08:52:48 crc kubenswrapper[4881]: I1211 08:52:48.320887 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-p978t"] Dec 11 08:52:48 crc kubenswrapper[4881]: I1211 08:52:48.602275 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-p978t" event={"ID":"482d67c1-02c6-4526-99fb-2bc546471c4d","Type":"ContainerStarted","Data":"8af917258d83e33b92d50208277384bcac041186f868befc1b1f5d8bcbe30389"} Dec 11 08:52:50 crc kubenswrapper[4881]: I1211 08:52:50.632046 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-p978t" event={"ID":"482d67c1-02c6-4526-99fb-2bc546471c4d","Type":"ContainerStarted","Data":"ec40ffc77360785a4b37cd30aaec6271759ea1759a7bbb1103138a272e9ba1ae"} Dec 11 08:52:50 crc kubenswrapper[4881]: I1211 08:52:50.658573 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-p978t" podStartSLOduration=2.446304612 podStartE2EDuration="3.658552303s" podCreationTimestamp="2025-12-11 08:52:47 +0000 UTC" firstStartedPulling="2025-12-11 08:52:48.33465529 +0000 UTC m=+2216.712023987" lastFinishedPulling="2025-12-11 08:52:49.546902991 +0000 UTC m=+2217.924271678" observedRunningTime="2025-12-11 08:52:50.646479817 +0000 UTC m=+2219.023848504" watchObservedRunningTime="2025-12-11 08:52:50.658552303 +0000 UTC m=+2219.035920990" Dec 11 08:53:09 crc kubenswrapper[4881]: I1211 08:53:09.766588 4881 scope.go:117] "RemoveContainer" containerID="1f15ea09d95add8e3ec9a347353f72d9f4e1b86913da909c37a877b3ce297048" Dec 11 08:53:27 crc kubenswrapper[4881]: I1211 08:53:27.994750 4881 generic.go:334] "Generic (PLEG): container finished" podID="482d67c1-02c6-4526-99fb-2bc546471c4d" containerID="ec40ffc77360785a4b37cd30aaec6271759ea1759a7bbb1103138a272e9ba1ae" exitCode=0 Dec 11 08:53:27 crc kubenswrapper[4881]: I1211 08:53:27.994829 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-p978t" event={"ID":"482d67c1-02c6-4526-99fb-2bc546471c4d","Type":"ContainerDied","Data":"ec40ffc77360785a4b37cd30aaec6271759ea1759a7bbb1103138a272e9ba1ae"} Dec 11 08:53:29 crc kubenswrapper[4881]: I1211 08:53:29.505609 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-p978t" Dec 11 08:53:29 crc kubenswrapper[4881]: I1211 08:53:29.624321 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/482d67c1-02c6-4526-99fb-2bc546471c4d-ssh-key\") pod \"482d67c1-02c6-4526-99fb-2bc546471c4d\" (UID: \"482d67c1-02c6-4526-99fb-2bc546471c4d\") " Dec 11 08:53:29 crc kubenswrapper[4881]: I1211 08:53:29.624428 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/482d67c1-02c6-4526-99fb-2bc546471c4d-inventory\") pod \"482d67c1-02c6-4526-99fb-2bc546471c4d\" (UID: \"482d67c1-02c6-4526-99fb-2bc546471c4d\") " Dec 11 08:53:29 crc kubenswrapper[4881]: I1211 08:53:29.624648 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mkpp7\" (UniqueName: \"kubernetes.io/projected/482d67c1-02c6-4526-99fb-2bc546471c4d-kube-api-access-mkpp7\") pod \"482d67c1-02c6-4526-99fb-2bc546471c4d\" (UID: \"482d67c1-02c6-4526-99fb-2bc546471c4d\") " Dec 11 08:53:29 crc kubenswrapper[4881]: I1211 08:53:29.630440 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/482d67c1-02c6-4526-99fb-2bc546471c4d-kube-api-access-mkpp7" (OuterVolumeSpecName: "kube-api-access-mkpp7") pod "482d67c1-02c6-4526-99fb-2bc546471c4d" (UID: "482d67c1-02c6-4526-99fb-2bc546471c4d"). InnerVolumeSpecName "kube-api-access-mkpp7". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:53:29 crc kubenswrapper[4881]: I1211 08:53:29.664867 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/482d67c1-02c6-4526-99fb-2bc546471c4d-inventory" (OuterVolumeSpecName: "inventory") pod "482d67c1-02c6-4526-99fb-2bc546471c4d" (UID: "482d67c1-02c6-4526-99fb-2bc546471c4d"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:53:29 crc kubenswrapper[4881]: I1211 08:53:29.666518 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/482d67c1-02c6-4526-99fb-2bc546471c4d-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "482d67c1-02c6-4526-99fb-2bc546471c4d" (UID: "482d67c1-02c6-4526-99fb-2bc546471c4d"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:53:29 crc kubenswrapper[4881]: I1211 08:53:29.728305 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mkpp7\" (UniqueName: \"kubernetes.io/projected/482d67c1-02c6-4526-99fb-2bc546471c4d-kube-api-access-mkpp7\") on node \"crc\" DevicePath \"\"" Dec 11 08:53:29 crc kubenswrapper[4881]: I1211 08:53:29.728374 4881 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/482d67c1-02c6-4526-99fb-2bc546471c4d-ssh-key\") on node \"crc\" DevicePath \"\"" Dec 11 08:53:29 crc kubenswrapper[4881]: I1211 08:53:29.728388 4881 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/482d67c1-02c6-4526-99fb-2bc546471c4d-inventory\") on node \"crc\" DevicePath \"\"" Dec 11 08:53:30 crc kubenswrapper[4881]: I1211 08:53:30.031123 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-p978t" event={"ID":"482d67c1-02c6-4526-99fb-2bc546471c4d","Type":"ContainerDied","Data":"8af917258d83e33b92d50208277384bcac041186f868befc1b1f5d8bcbe30389"} Dec 11 08:53:30 crc kubenswrapper[4881]: I1211 08:53:30.031231 4881 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8af917258d83e33b92d50208277384bcac041186f868befc1b1f5d8bcbe30389" Dec 11 08:53:30 crc kubenswrapper[4881]: I1211 08:53:30.031160 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-p978t" Dec 11 08:53:30 crc kubenswrapper[4881]: I1211 08:53:30.121957 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-98j7p"] Dec 11 08:53:30 crc kubenswrapper[4881]: E1211 08:53:30.123660 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="482d67c1-02c6-4526-99fb-2bc546471c4d" containerName="install-os-edpm-deployment-openstack-edpm-ipam" Dec 11 08:53:30 crc kubenswrapper[4881]: I1211 08:53:30.123756 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="482d67c1-02c6-4526-99fb-2bc546471c4d" containerName="install-os-edpm-deployment-openstack-edpm-ipam" Dec 11 08:53:30 crc kubenswrapper[4881]: I1211 08:53:30.124295 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="482d67c1-02c6-4526-99fb-2bc546471c4d" containerName="install-os-edpm-deployment-openstack-edpm-ipam" Dec 11 08:53:30 crc kubenswrapper[4881]: I1211 08:53:30.125916 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-98j7p" Dec 11 08:53:30 crc kubenswrapper[4881]: I1211 08:53:30.128819 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-bnm72" Dec 11 08:53:30 crc kubenswrapper[4881]: I1211 08:53:30.129237 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Dec 11 08:53:30 crc kubenswrapper[4881]: I1211 08:53:30.130226 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Dec 11 08:53:30 crc kubenswrapper[4881]: I1211 08:53:30.130753 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Dec 11 08:53:30 crc kubenswrapper[4881]: I1211 08:53:30.133706 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-98j7p"] Dec 11 08:53:30 crc kubenswrapper[4881]: I1211 08:53:30.240861 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/97426780-cfa1-43ea-9cba-e4268c17b4c3-ssh-key\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-98j7p\" (UID: \"97426780-cfa1-43ea-9cba-e4268c17b4c3\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-98j7p" Dec 11 08:53:30 crc kubenswrapper[4881]: I1211 08:53:30.240913 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/97426780-cfa1-43ea-9cba-e4268c17b4c3-inventory\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-98j7p\" (UID: \"97426780-cfa1-43ea-9cba-e4268c17b4c3\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-98j7p" Dec 11 08:53:30 crc kubenswrapper[4881]: I1211 08:53:30.241074 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dx5xn\" (UniqueName: \"kubernetes.io/projected/97426780-cfa1-43ea-9cba-e4268c17b4c3-kube-api-access-dx5xn\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-98j7p\" (UID: \"97426780-cfa1-43ea-9cba-e4268c17b4c3\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-98j7p" Dec 11 08:53:30 crc kubenswrapper[4881]: I1211 08:53:30.343873 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/97426780-cfa1-43ea-9cba-e4268c17b4c3-ssh-key\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-98j7p\" (UID: \"97426780-cfa1-43ea-9cba-e4268c17b4c3\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-98j7p" Dec 11 08:53:30 crc kubenswrapper[4881]: I1211 08:53:30.343948 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/97426780-cfa1-43ea-9cba-e4268c17b4c3-inventory\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-98j7p\" (UID: \"97426780-cfa1-43ea-9cba-e4268c17b4c3\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-98j7p" Dec 11 08:53:30 crc kubenswrapper[4881]: I1211 08:53:30.344210 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dx5xn\" (UniqueName: \"kubernetes.io/projected/97426780-cfa1-43ea-9cba-e4268c17b4c3-kube-api-access-dx5xn\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-98j7p\" (UID: \"97426780-cfa1-43ea-9cba-e4268c17b4c3\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-98j7p" Dec 11 08:53:30 crc kubenswrapper[4881]: I1211 08:53:30.347884 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/97426780-cfa1-43ea-9cba-e4268c17b4c3-ssh-key\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-98j7p\" (UID: \"97426780-cfa1-43ea-9cba-e4268c17b4c3\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-98j7p" Dec 11 08:53:30 crc kubenswrapper[4881]: I1211 08:53:30.348030 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/97426780-cfa1-43ea-9cba-e4268c17b4c3-inventory\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-98j7p\" (UID: \"97426780-cfa1-43ea-9cba-e4268c17b4c3\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-98j7p" Dec 11 08:53:30 crc kubenswrapper[4881]: I1211 08:53:30.366155 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dx5xn\" (UniqueName: \"kubernetes.io/projected/97426780-cfa1-43ea-9cba-e4268c17b4c3-kube-api-access-dx5xn\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-98j7p\" (UID: \"97426780-cfa1-43ea-9cba-e4268c17b4c3\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-98j7p" Dec 11 08:53:30 crc kubenswrapper[4881]: I1211 08:53:30.516216 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-98j7p" Dec 11 08:53:31 crc kubenswrapper[4881]: I1211 08:53:31.095675 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-98j7p"] Dec 11 08:53:32 crc kubenswrapper[4881]: I1211 08:53:32.050804 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-98j7p" event={"ID":"97426780-cfa1-43ea-9cba-e4268c17b4c3","Type":"ContainerStarted","Data":"3a0eeaa4d214fbef55a24a5eae2b8f9a1b14a92b2b8928f5079fc996118ff629"} Dec 11 08:53:33 crc kubenswrapper[4881]: I1211 08:53:33.068445 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-98j7p" event={"ID":"97426780-cfa1-43ea-9cba-e4268c17b4c3","Type":"ContainerStarted","Data":"1c579fd3a0104c33911d07ebc0e122efeb1970aa55f6c3f6424da20bc8543223"} Dec 11 08:53:33 crc kubenswrapper[4881]: I1211 08:53:33.094297 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-98j7p" podStartSLOduration=2.428541491 podStartE2EDuration="3.094268339s" podCreationTimestamp="2025-12-11 08:53:30 +0000 UTC" firstStartedPulling="2025-12-11 08:53:31.102701513 +0000 UTC m=+2259.480070210" lastFinishedPulling="2025-12-11 08:53:31.768428361 +0000 UTC m=+2260.145797058" observedRunningTime="2025-12-11 08:53:33.081014364 +0000 UTC m=+2261.458383061" watchObservedRunningTime="2025-12-11 08:53:33.094268339 +0000 UTC m=+2261.471637036" Dec 11 08:53:45 crc kubenswrapper[4881]: I1211 08:53:45.026275 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-kjpvq"] Dec 11 08:53:45 crc kubenswrapper[4881]: I1211 08:53:45.030205 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-kjpvq" Dec 11 08:53:45 crc kubenswrapper[4881]: I1211 08:53:45.036859 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-kjpvq"] Dec 11 08:53:45 crc kubenswrapper[4881]: I1211 08:53:45.088792 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cd4xw\" (UniqueName: \"kubernetes.io/projected/a34f667a-b2a1-4dcb-865e-e88a0fd2985e-kube-api-access-cd4xw\") pod \"certified-operators-kjpvq\" (UID: \"a34f667a-b2a1-4dcb-865e-e88a0fd2985e\") " pod="openshift-marketplace/certified-operators-kjpvq" Dec 11 08:53:45 crc kubenswrapper[4881]: I1211 08:53:45.089548 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a34f667a-b2a1-4dcb-865e-e88a0fd2985e-catalog-content\") pod \"certified-operators-kjpvq\" (UID: \"a34f667a-b2a1-4dcb-865e-e88a0fd2985e\") " pod="openshift-marketplace/certified-operators-kjpvq" Dec 11 08:53:45 crc kubenswrapper[4881]: I1211 08:53:45.090069 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a34f667a-b2a1-4dcb-865e-e88a0fd2985e-utilities\") pod \"certified-operators-kjpvq\" (UID: \"a34f667a-b2a1-4dcb-865e-e88a0fd2985e\") " pod="openshift-marketplace/certified-operators-kjpvq" Dec 11 08:53:45 crc kubenswrapper[4881]: I1211 08:53:45.192210 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a34f667a-b2a1-4dcb-865e-e88a0fd2985e-catalog-content\") pod \"certified-operators-kjpvq\" (UID: \"a34f667a-b2a1-4dcb-865e-e88a0fd2985e\") " pod="openshift-marketplace/certified-operators-kjpvq" Dec 11 08:53:45 crc kubenswrapper[4881]: I1211 08:53:45.192414 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a34f667a-b2a1-4dcb-865e-e88a0fd2985e-utilities\") pod \"certified-operators-kjpvq\" (UID: \"a34f667a-b2a1-4dcb-865e-e88a0fd2985e\") " pod="openshift-marketplace/certified-operators-kjpvq" Dec 11 08:53:45 crc kubenswrapper[4881]: I1211 08:53:45.192516 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cd4xw\" (UniqueName: \"kubernetes.io/projected/a34f667a-b2a1-4dcb-865e-e88a0fd2985e-kube-api-access-cd4xw\") pod \"certified-operators-kjpvq\" (UID: \"a34f667a-b2a1-4dcb-865e-e88a0fd2985e\") " pod="openshift-marketplace/certified-operators-kjpvq" Dec 11 08:53:45 crc kubenswrapper[4881]: I1211 08:53:45.192801 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a34f667a-b2a1-4dcb-865e-e88a0fd2985e-catalog-content\") pod \"certified-operators-kjpvq\" (UID: \"a34f667a-b2a1-4dcb-865e-e88a0fd2985e\") " pod="openshift-marketplace/certified-operators-kjpvq" Dec 11 08:53:45 crc kubenswrapper[4881]: I1211 08:53:45.192916 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a34f667a-b2a1-4dcb-865e-e88a0fd2985e-utilities\") pod \"certified-operators-kjpvq\" (UID: \"a34f667a-b2a1-4dcb-865e-e88a0fd2985e\") " pod="openshift-marketplace/certified-operators-kjpvq" Dec 11 08:53:45 crc kubenswrapper[4881]: I1211 08:53:45.219449 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cd4xw\" (UniqueName: \"kubernetes.io/projected/a34f667a-b2a1-4dcb-865e-e88a0fd2985e-kube-api-access-cd4xw\") pod \"certified-operators-kjpvq\" (UID: \"a34f667a-b2a1-4dcb-865e-e88a0fd2985e\") " pod="openshift-marketplace/certified-operators-kjpvq" Dec 11 08:53:45 crc kubenswrapper[4881]: I1211 08:53:45.363628 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-kjpvq" Dec 11 08:53:45 crc kubenswrapper[4881]: I1211 08:53:45.929149 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-kjpvq"] Dec 11 08:53:46 crc kubenswrapper[4881]: I1211 08:53:46.195896 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-kjpvq" event={"ID":"a34f667a-b2a1-4dcb-865e-e88a0fd2985e","Type":"ContainerStarted","Data":"6e5ab13efb04e28e12c34a7bd2df902308b0568c9d0ce6dab58c537dba41a043"} Dec 11 08:53:48 crc kubenswrapper[4881]: I1211 08:53:48.221526 4881 generic.go:334] "Generic (PLEG): container finished" podID="a34f667a-b2a1-4dcb-865e-e88a0fd2985e" containerID="50e31bab43851eab903aacf067313fcad8a0086c585973583eb3d17ed380b709" exitCode=0 Dec 11 08:53:48 crc kubenswrapper[4881]: I1211 08:53:48.222111 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-kjpvq" event={"ID":"a34f667a-b2a1-4dcb-865e-e88a0fd2985e","Type":"ContainerDied","Data":"50e31bab43851eab903aacf067313fcad8a0086c585973583eb3d17ed380b709"} Dec 11 08:53:48 crc kubenswrapper[4881]: I1211 08:53:48.234987 4881 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Dec 11 08:53:50 crc kubenswrapper[4881]: I1211 08:53:50.243749 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-kjpvq" event={"ID":"a34f667a-b2a1-4dcb-865e-e88a0fd2985e","Type":"ContainerStarted","Data":"75437796ec84f7d3c720a61fd117211283087a2611ae662aefc9c291a59de970"} Dec 11 08:53:52 crc kubenswrapper[4881]: I1211 08:53:52.880848 4881 patch_prober.go:28] interesting pod/nmstate-webhook-f8fb84555-mfvb7 container/nmstate-webhook namespace/openshift-nmstate: Readiness probe status=failure output="Get \"https://10.217.0.88:9443/readyz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Dec 11 08:53:52 crc kubenswrapper[4881]: I1211 08:53:52.881429 4881 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-nmstate/nmstate-webhook-f8fb84555-mfvb7" podUID="dbbccd2c-ccc0-4501-b4b4-b85621051f5f" containerName="nmstate-webhook" probeResult="failure" output="Get \"https://10.217.0.88:9443/readyz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Dec 11 08:53:53 crc kubenswrapper[4881]: I1211 08:53:53.277462 4881 generic.go:334] "Generic (PLEG): container finished" podID="a34f667a-b2a1-4dcb-865e-e88a0fd2985e" containerID="75437796ec84f7d3c720a61fd117211283087a2611ae662aefc9c291a59de970" exitCode=0 Dec 11 08:53:53 crc kubenswrapper[4881]: I1211 08:53:53.277524 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-kjpvq" event={"ID":"a34f667a-b2a1-4dcb-865e-e88a0fd2985e","Type":"ContainerDied","Data":"75437796ec84f7d3c720a61fd117211283087a2611ae662aefc9c291a59de970"} Dec 11 08:53:54 crc kubenswrapper[4881]: I1211 08:53:54.292020 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-kjpvq" event={"ID":"a34f667a-b2a1-4dcb-865e-e88a0fd2985e","Type":"ContainerStarted","Data":"ba80b510d8e6805aa78f0205e77f978a9e7c40cf45d67e093babcdd607de2a8b"} Dec 11 08:53:54 crc kubenswrapper[4881]: I1211 08:53:54.319108 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-kjpvq" podStartSLOduration=4.686807609 podStartE2EDuration="10.319084028s" podCreationTimestamp="2025-12-11 08:53:44 +0000 UTC" firstStartedPulling="2025-12-11 08:53:48.234681524 +0000 UTC m=+2276.612050221" lastFinishedPulling="2025-12-11 08:53:53.866957943 +0000 UTC m=+2282.244326640" observedRunningTime="2025-12-11 08:53:54.309753199 +0000 UTC m=+2282.687121896" watchObservedRunningTime="2025-12-11 08:53:54.319084028 +0000 UTC m=+2282.696452725" Dec 11 08:53:55 crc kubenswrapper[4881]: I1211 08:53:55.364743 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-kjpvq" Dec 11 08:53:55 crc kubenswrapper[4881]: I1211 08:53:55.365256 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-kjpvq" Dec 11 08:53:56 crc kubenswrapper[4881]: I1211 08:53:56.418569 4881 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/certified-operators-kjpvq" podUID="a34f667a-b2a1-4dcb-865e-e88a0fd2985e" containerName="registry-server" probeResult="failure" output=< Dec 11 08:53:56 crc kubenswrapper[4881]: timeout: failed to connect service ":50051" within 1s Dec 11 08:53:56 crc kubenswrapper[4881]: > Dec 11 08:54:05 crc kubenswrapper[4881]: I1211 08:54:05.412900 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-kjpvq" Dec 11 08:54:05 crc kubenswrapper[4881]: I1211 08:54:05.470207 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-kjpvq" Dec 11 08:54:05 crc kubenswrapper[4881]: I1211 08:54:05.651409 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-kjpvq"] Dec 11 08:54:07 crc kubenswrapper[4881]: I1211 08:54:07.444320 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-kjpvq" podUID="a34f667a-b2a1-4dcb-865e-e88a0fd2985e" containerName="registry-server" containerID="cri-o://ba80b510d8e6805aa78f0205e77f978a9e7c40cf45d67e093babcdd607de2a8b" gracePeriod=2 Dec 11 08:54:08 crc kubenswrapper[4881]: I1211 08:54:08.464880 4881 generic.go:334] "Generic (PLEG): container finished" podID="a34f667a-b2a1-4dcb-865e-e88a0fd2985e" containerID="ba80b510d8e6805aa78f0205e77f978a9e7c40cf45d67e093babcdd607de2a8b" exitCode=0 Dec 11 08:54:08 crc kubenswrapper[4881]: I1211 08:54:08.465148 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-kjpvq" event={"ID":"a34f667a-b2a1-4dcb-865e-e88a0fd2985e","Type":"ContainerDied","Data":"ba80b510d8e6805aa78f0205e77f978a9e7c40cf45d67e093babcdd607de2a8b"} Dec 11 08:54:08 crc kubenswrapper[4881]: I1211 08:54:08.467378 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-kjpvq" event={"ID":"a34f667a-b2a1-4dcb-865e-e88a0fd2985e","Type":"ContainerDied","Data":"6e5ab13efb04e28e12c34a7bd2df902308b0568c9d0ce6dab58c537dba41a043"} Dec 11 08:54:08 crc kubenswrapper[4881]: I1211 08:54:08.467411 4881 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6e5ab13efb04e28e12c34a7bd2df902308b0568c9d0ce6dab58c537dba41a043" Dec 11 08:54:08 crc kubenswrapper[4881]: I1211 08:54:08.496588 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-kjpvq" Dec 11 08:54:08 crc kubenswrapper[4881]: I1211 08:54:08.645402 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a34f667a-b2a1-4dcb-865e-e88a0fd2985e-catalog-content\") pod \"a34f667a-b2a1-4dcb-865e-e88a0fd2985e\" (UID: \"a34f667a-b2a1-4dcb-865e-e88a0fd2985e\") " Dec 11 08:54:08 crc kubenswrapper[4881]: I1211 08:54:08.645581 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cd4xw\" (UniqueName: \"kubernetes.io/projected/a34f667a-b2a1-4dcb-865e-e88a0fd2985e-kube-api-access-cd4xw\") pod \"a34f667a-b2a1-4dcb-865e-e88a0fd2985e\" (UID: \"a34f667a-b2a1-4dcb-865e-e88a0fd2985e\") " Dec 11 08:54:08 crc kubenswrapper[4881]: I1211 08:54:08.645660 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a34f667a-b2a1-4dcb-865e-e88a0fd2985e-utilities\") pod \"a34f667a-b2a1-4dcb-865e-e88a0fd2985e\" (UID: \"a34f667a-b2a1-4dcb-865e-e88a0fd2985e\") " Dec 11 08:54:08 crc kubenswrapper[4881]: I1211 08:54:08.646655 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a34f667a-b2a1-4dcb-865e-e88a0fd2985e-utilities" (OuterVolumeSpecName: "utilities") pod "a34f667a-b2a1-4dcb-865e-e88a0fd2985e" (UID: "a34f667a-b2a1-4dcb-865e-e88a0fd2985e"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 08:54:08 crc kubenswrapper[4881]: I1211 08:54:08.651497 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a34f667a-b2a1-4dcb-865e-e88a0fd2985e-kube-api-access-cd4xw" (OuterVolumeSpecName: "kube-api-access-cd4xw") pod "a34f667a-b2a1-4dcb-865e-e88a0fd2985e" (UID: "a34f667a-b2a1-4dcb-865e-e88a0fd2985e"). InnerVolumeSpecName "kube-api-access-cd4xw". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:54:08 crc kubenswrapper[4881]: I1211 08:54:08.698800 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a34f667a-b2a1-4dcb-865e-e88a0fd2985e-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "a34f667a-b2a1-4dcb-865e-e88a0fd2985e" (UID: "a34f667a-b2a1-4dcb-865e-e88a0fd2985e"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 08:54:08 crc kubenswrapper[4881]: I1211 08:54:08.748446 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cd4xw\" (UniqueName: \"kubernetes.io/projected/a34f667a-b2a1-4dcb-865e-e88a0fd2985e-kube-api-access-cd4xw\") on node \"crc\" DevicePath \"\"" Dec 11 08:54:08 crc kubenswrapper[4881]: I1211 08:54:08.748551 4881 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a34f667a-b2a1-4dcb-865e-e88a0fd2985e-utilities\") on node \"crc\" DevicePath \"\"" Dec 11 08:54:08 crc kubenswrapper[4881]: I1211 08:54:08.748569 4881 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a34f667a-b2a1-4dcb-865e-e88a0fd2985e-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 11 08:54:09 crc kubenswrapper[4881]: I1211 08:54:09.063283 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-db-sync-mx8mp"] Dec 11 08:54:09 crc kubenswrapper[4881]: I1211 08:54:09.077830 4881 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-db-sync-mx8mp"] Dec 11 08:54:09 crc kubenswrapper[4881]: I1211 08:54:09.476250 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-kjpvq" Dec 11 08:54:09 crc kubenswrapper[4881]: I1211 08:54:09.501476 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-kjpvq"] Dec 11 08:54:09 crc kubenswrapper[4881]: I1211 08:54:09.513546 4881 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-kjpvq"] Dec 11 08:54:11 crc kubenswrapper[4881]: I1211 08:54:11.019102 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a34f667a-b2a1-4dcb-865e-e88a0fd2985e" path="/var/lib/kubelet/pods/a34f667a-b2a1-4dcb-865e-e88a0fd2985e/volumes" Dec 11 08:54:11 crc kubenswrapper[4881]: I1211 08:54:11.021328 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f6950faa-9468-4d23-9834-73fb64506367" path="/var/lib/kubelet/pods/f6950faa-9468-4d23-9834-73fb64506367/volumes" Dec 11 08:54:21 crc kubenswrapper[4881]: I1211 08:54:21.621296 4881 generic.go:334] "Generic (PLEG): container finished" podID="97426780-cfa1-43ea-9cba-e4268c17b4c3" containerID="1c579fd3a0104c33911d07ebc0e122efeb1970aa55f6c3f6424da20bc8543223" exitCode=0 Dec 11 08:54:21 crc kubenswrapper[4881]: I1211 08:54:21.621367 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-98j7p" event={"ID":"97426780-cfa1-43ea-9cba-e4268c17b4c3","Type":"ContainerDied","Data":"1c579fd3a0104c33911d07ebc0e122efeb1970aa55f6c3f6424da20bc8543223"} Dec 11 08:54:23 crc kubenswrapper[4881]: I1211 08:54:23.065005 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-98j7p" Dec 11 08:54:23 crc kubenswrapper[4881]: I1211 08:54:23.215527 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dx5xn\" (UniqueName: \"kubernetes.io/projected/97426780-cfa1-43ea-9cba-e4268c17b4c3-kube-api-access-dx5xn\") pod \"97426780-cfa1-43ea-9cba-e4268c17b4c3\" (UID: \"97426780-cfa1-43ea-9cba-e4268c17b4c3\") " Dec 11 08:54:23 crc kubenswrapper[4881]: I1211 08:54:23.215652 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/97426780-cfa1-43ea-9cba-e4268c17b4c3-inventory\") pod \"97426780-cfa1-43ea-9cba-e4268c17b4c3\" (UID: \"97426780-cfa1-43ea-9cba-e4268c17b4c3\") " Dec 11 08:54:23 crc kubenswrapper[4881]: I1211 08:54:23.215868 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/97426780-cfa1-43ea-9cba-e4268c17b4c3-ssh-key\") pod \"97426780-cfa1-43ea-9cba-e4268c17b4c3\" (UID: \"97426780-cfa1-43ea-9cba-e4268c17b4c3\") " Dec 11 08:54:23 crc kubenswrapper[4881]: I1211 08:54:23.221124 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/97426780-cfa1-43ea-9cba-e4268c17b4c3-kube-api-access-dx5xn" (OuterVolumeSpecName: "kube-api-access-dx5xn") pod "97426780-cfa1-43ea-9cba-e4268c17b4c3" (UID: "97426780-cfa1-43ea-9cba-e4268c17b4c3"). InnerVolumeSpecName "kube-api-access-dx5xn". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:54:23 crc kubenswrapper[4881]: I1211 08:54:23.248413 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/97426780-cfa1-43ea-9cba-e4268c17b4c3-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "97426780-cfa1-43ea-9cba-e4268c17b4c3" (UID: "97426780-cfa1-43ea-9cba-e4268c17b4c3"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:54:23 crc kubenswrapper[4881]: I1211 08:54:23.253859 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/97426780-cfa1-43ea-9cba-e4268c17b4c3-inventory" (OuterVolumeSpecName: "inventory") pod "97426780-cfa1-43ea-9cba-e4268c17b4c3" (UID: "97426780-cfa1-43ea-9cba-e4268c17b4c3"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:54:23 crc kubenswrapper[4881]: I1211 08:54:23.318761 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dx5xn\" (UniqueName: \"kubernetes.io/projected/97426780-cfa1-43ea-9cba-e4268c17b4c3-kube-api-access-dx5xn\") on node \"crc\" DevicePath \"\"" Dec 11 08:54:23 crc kubenswrapper[4881]: I1211 08:54:23.319040 4881 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/97426780-cfa1-43ea-9cba-e4268c17b4c3-inventory\") on node \"crc\" DevicePath \"\"" Dec 11 08:54:23 crc kubenswrapper[4881]: I1211 08:54:23.319051 4881 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/97426780-cfa1-43ea-9cba-e4268c17b4c3-ssh-key\") on node \"crc\" DevicePath \"\"" Dec 11 08:54:23 crc kubenswrapper[4881]: I1211 08:54:23.643167 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-98j7p" event={"ID":"97426780-cfa1-43ea-9cba-e4268c17b4c3","Type":"ContainerDied","Data":"3a0eeaa4d214fbef55a24a5eae2b8f9a1b14a92b2b8928f5079fc996118ff629"} Dec 11 08:54:23 crc kubenswrapper[4881]: I1211 08:54:23.643227 4881 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3a0eeaa4d214fbef55a24a5eae2b8f9a1b14a92b2b8928f5079fc996118ff629" Dec 11 08:54:23 crc kubenswrapper[4881]: I1211 08:54:23.643505 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-98j7p" Dec 11 08:54:23 crc kubenswrapper[4881]: I1211 08:54:23.857218 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-f79qk"] Dec 11 08:54:23 crc kubenswrapper[4881]: E1211 08:54:23.857704 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a34f667a-b2a1-4dcb-865e-e88a0fd2985e" containerName="extract-utilities" Dec 11 08:54:23 crc kubenswrapper[4881]: I1211 08:54:23.857721 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="a34f667a-b2a1-4dcb-865e-e88a0fd2985e" containerName="extract-utilities" Dec 11 08:54:23 crc kubenswrapper[4881]: E1211 08:54:23.857740 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="97426780-cfa1-43ea-9cba-e4268c17b4c3" containerName="configure-os-edpm-deployment-openstack-edpm-ipam" Dec 11 08:54:23 crc kubenswrapper[4881]: I1211 08:54:23.857747 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="97426780-cfa1-43ea-9cba-e4268c17b4c3" containerName="configure-os-edpm-deployment-openstack-edpm-ipam" Dec 11 08:54:23 crc kubenswrapper[4881]: E1211 08:54:23.857762 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a34f667a-b2a1-4dcb-865e-e88a0fd2985e" containerName="registry-server" Dec 11 08:54:23 crc kubenswrapper[4881]: I1211 08:54:23.857769 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="a34f667a-b2a1-4dcb-865e-e88a0fd2985e" containerName="registry-server" Dec 11 08:54:23 crc kubenswrapper[4881]: E1211 08:54:23.857783 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a34f667a-b2a1-4dcb-865e-e88a0fd2985e" containerName="extract-content" Dec 11 08:54:23 crc kubenswrapper[4881]: I1211 08:54:23.857788 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="a34f667a-b2a1-4dcb-865e-e88a0fd2985e" containerName="extract-content" Dec 11 08:54:23 crc kubenswrapper[4881]: I1211 08:54:23.858010 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="a34f667a-b2a1-4dcb-865e-e88a0fd2985e" containerName="registry-server" Dec 11 08:54:23 crc kubenswrapper[4881]: I1211 08:54:23.858033 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="97426780-cfa1-43ea-9cba-e4268c17b4c3" containerName="configure-os-edpm-deployment-openstack-edpm-ipam" Dec 11 08:54:23 crc kubenswrapper[4881]: I1211 08:54:23.858858 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-f79qk" Dec 11 08:54:23 crc kubenswrapper[4881]: I1211 08:54:23.862144 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-bnm72" Dec 11 08:54:23 crc kubenswrapper[4881]: I1211 08:54:23.862529 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Dec 11 08:54:23 crc kubenswrapper[4881]: I1211 08:54:23.862676 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Dec 11 08:54:23 crc kubenswrapper[4881]: I1211 08:54:23.869569 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Dec 11 08:54:23 crc kubenswrapper[4881]: I1211 08:54:23.881532 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-f79qk"] Dec 11 08:54:24 crc kubenswrapper[4881]: I1211 08:54:24.041571 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/1e3366c8-0354-47fb-af1a-f579ed757f2b-inventory-0\") pod \"ssh-known-hosts-edpm-deployment-f79qk\" (UID: \"1e3366c8-0354-47fb-af1a-f579ed757f2b\") " pod="openstack/ssh-known-hosts-edpm-deployment-f79qk" Dec 11 08:54:24 crc kubenswrapper[4881]: I1211 08:54:24.041661 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rkpdk\" (UniqueName: \"kubernetes.io/projected/1e3366c8-0354-47fb-af1a-f579ed757f2b-kube-api-access-rkpdk\") pod \"ssh-known-hosts-edpm-deployment-f79qk\" (UID: \"1e3366c8-0354-47fb-af1a-f579ed757f2b\") " pod="openstack/ssh-known-hosts-edpm-deployment-f79qk" Dec 11 08:54:24 crc kubenswrapper[4881]: I1211 08:54:24.041718 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/1e3366c8-0354-47fb-af1a-f579ed757f2b-ssh-key-openstack-edpm-ipam\") pod \"ssh-known-hosts-edpm-deployment-f79qk\" (UID: \"1e3366c8-0354-47fb-af1a-f579ed757f2b\") " pod="openstack/ssh-known-hosts-edpm-deployment-f79qk" Dec 11 08:54:24 crc kubenswrapper[4881]: I1211 08:54:24.143528 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rkpdk\" (UniqueName: \"kubernetes.io/projected/1e3366c8-0354-47fb-af1a-f579ed757f2b-kube-api-access-rkpdk\") pod \"ssh-known-hosts-edpm-deployment-f79qk\" (UID: \"1e3366c8-0354-47fb-af1a-f579ed757f2b\") " pod="openstack/ssh-known-hosts-edpm-deployment-f79qk" Dec 11 08:54:24 crc kubenswrapper[4881]: I1211 08:54:24.144602 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/1e3366c8-0354-47fb-af1a-f579ed757f2b-ssh-key-openstack-edpm-ipam\") pod \"ssh-known-hosts-edpm-deployment-f79qk\" (UID: \"1e3366c8-0354-47fb-af1a-f579ed757f2b\") " pod="openstack/ssh-known-hosts-edpm-deployment-f79qk" Dec 11 08:54:24 crc kubenswrapper[4881]: I1211 08:54:24.145633 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/1e3366c8-0354-47fb-af1a-f579ed757f2b-inventory-0\") pod \"ssh-known-hosts-edpm-deployment-f79qk\" (UID: \"1e3366c8-0354-47fb-af1a-f579ed757f2b\") " pod="openstack/ssh-known-hosts-edpm-deployment-f79qk" Dec 11 08:54:24 crc kubenswrapper[4881]: I1211 08:54:24.148822 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/1e3366c8-0354-47fb-af1a-f579ed757f2b-ssh-key-openstack-edpm-ipam\") pod \"ssh-known-hosts-edpm-deployment-f79qk\" (UID: \"1e3366c8-0354-47fb-af1a-f579ed757f2b\") " pod="openstack/ssh-known-hosts-edpm-deployment-f79qk" Dec 11 08:54:24 crc kubenswrapper[4881]: I1211 08:54:24.150027 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/1e3366c8-0354-47fb-af1a-f579ed757f2b-inventory-0\") pod \"ssh-known-hosts-edpm-deployment-f79qk\" (UID: \"1e3366c8-0354-47fb-af1a-f579ed757f2b\") " pod="openstack/ssh-known-hosts-edpm-deployment-f79qk" Dec 11 08:54:24 crc kubenswrapper[4881]: I1211 08:54:24.161882 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rkpdk\" (UniqueName: \"kubernetes.io/projected/1e3366c8-0354-47fb-af1a-f579ed757f2b-kube-api-access-rkpdk\") pod \"ssh-known-hosts-edpm-deployment-f79qk\" (UID: \"1e3366c8-0354-47fb-af1a-f579ed757f2b\") " pod="openstack/ssh-known-hosts-edpm-deployment-f79qk" Dec 11 08:54:24 crc kubenswrapper[4881]: I1211 08:54:24.185505 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-f79qk" Dec 11 08:54:24 crc kubenswrapper[4881]: I1211 08:54:24.751729 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-f79qk"] Dec 11 08:54:25 crc kubenswrapper[4881]: I1211 08:54:25.669227 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-f79qk" event={"ID":"1e3366c8-0354-47fb-af1a-f579ed757f2b","Type":"ContainerStarted","Data":"9bd0ff93d501c04c499ac78d65e1dab8d0c39e960fa9441519d6f88d5a4a3e71"} Dec 11 08:54:28 crc kubenswrapper[4881]: I1211 08:54:28.704092 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-f79qk" event={"ID":"1e3366c8-0354-47fb-af1a-f579ed757f2b","Type":"ContainerStarted","Data":"ad0637977921cdc6fe87e49ff5c907342e9809a2ac5728545d49887d2f07fcf5"} Dec 11 08:54:28 crc kubenswrapper[4881]: I1211 08:54:28.725698 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ssh-known-hosts-edpm-deployment-f79qk" podStartSLOduration=3.1423757979999998 podStartE2EDuration="5.725676013s" podCreationTimestamp="2025-12-11 08:54:23 +0000 UTC" firstStartedPulling="2025-12-11 08:54:24.759525887 +0000 UTC m=+2313.136894584" lastFinishedPulling="2025-12-11 08:54:27.342826102 +0000 UTC m=+2315.720194799" observedRunningTime="2025-12-11 08:54:28.72030644 +0000 UTC m=+2317.097675147" watchObservedRunningTime="2025-12-11 08:54:28.725676013 +0000 UTC m=+2317.103044710" Dec 11 08:54:29 crc kubenswrapper[4881]: I1211 08:54:29.397403 4881 patch_prober.go:28] interesting pod/machine-config-daemon-z9nnh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 11 08:54:29 crc kubenswrapper[4881]: I1211 08:54:29.397483 4881 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 11 08:54:34 crc kubenswrapper[4881]: I1211 08:54:34.766292 4881 generic.go:334] "Generic (PLEG): container finished" podID="1e3366c8-0354-47fb-af1a-f579ed757f2b" containerID="ad0637977921cdc6fe87e49ff5c907342e9809a2ac5728545d49887d2f07fcf5" exitCode=0 Dec 11 08:54:34 crc kubenswrapper[4881]: I1211 08:54:34.766451 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-f79qk" event={"ID":"1e3366c8-0354-47fb-af1a-f579ed757f2b","Type":"ContainerDied","Data":"ad0637977921cdc6fe87e49ff5c907342e9809a2ac5728545d49887d2f07fcf5"} Dec 11 08:54:36 crc kubenswrapper[4881]: I1211 08:54:36.252464 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-f79qk" Dec 11 08:54:36 crc kubenswrapper[4881]: I1211 08:54:36.284406 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/1e3366c8-0354-47fb-af1a-f579ed757f2b-inventory-0\") pod \"1e3366c8-0354-47fb-af1a-f579ed757f2b\" (UID: \"1e3366c8-0354-47fb-af1a-f579ed757f2b\") " Dec 11 08:54:36 crc kubenswrapper[4881]: I1211 08:54:36.284679 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rkpdk\" (UniqueName: \"kubernetes.io/projected/1e3366c8-0354-47fb-af1a-f579ed757f2b-kube-api-access-rkpdk\") pod \"1e3366c8-0354-47fb-af1a-f579ed757f2b\" (UID: \"1e3366c8-0354-47fb-af1a-f579ed757f2b\") " Dec 11 08:54:36 crc kubenswrapper[4881]: I1211 08:54:36.284852 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/1e3366c8-0354-47fb-af1a-f579ed757f2b-ssh-key-openstack-edpm-ipam\") pod \"1e3366c8-0354-47fb-af1a-f579ed757f2b\" (UID: \"1e3366c8-0354-47fb-af1a-f579ed757f2b\") " Dec 11 08:54:36 crc kubenswrapper[4881]: I1211 08:54:36.291969 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1e3366c8-0354-47fb-af1a-f579ed757f2b-kube-api-access-rkpdk" (OuterVolumeSpecName: "kube-api-access-rkpdk") pod "1e3366c8-0354-47fb-af1a-f579ed757f2b" (UID: "1e3366c8-0354-47fb-af1a-f579ed757f2b"). InnerVolumeSpecName "kube-api-access-rkpdk". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:54:36 crc kubenswrapper[4881]: I1211 08:54:36.323676 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1e3366c8-0354-47fb-af1a-f579ed757f2b-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "1e3366c8-0354-47fb-af1a-f579ed757f2b" (UID: "1e3366c8-0354-47fb-af1a-f579ed757f2b"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:54:36 crc kubenswrapper[4881]: I1211 08:54:36.324170 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1e3366c8-0354-47fb-af1a-f579ed757f2b-inventory-0" (OuterVolumeSpecName: "inventory-0") pod "1e3366c8-0354-47fb-af1a-f579ed757f2b" (UID: "1e3366c8-0354-47fb-af1a-f579ed757f2b"). InnerVolumeSpecName "inventory-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:54:36 crc kubenswrapper[4881]: I1211 08:54:36.387427 4881 reconciler_common.go:293] "Volume detached for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/1e3366c8-0354-47fb-af1a-f579ed757f2b-inventory-0\") on node \"crc\" DevicePath \"\"" Dec 11 08:54:36 crc kubenswrapper[4881]: I1211 08:54:36.387467 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rkpdk\" (UniqueName: \"kubernetes.io/projected/1e3366c8-0354-47fb-af1a-f579ed757f2b-kube-api-access-rkpdk\") on node \"crc\" DevicePath \"\"" Dec 11 08:54:36 crc kubenswrapper[4881]: I1211 08:54:36.387489 4881 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/1e3366c8-0354-47fb-af1a-f579ed757f2b-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Dec 11 08:54:36 crc kubenswrapper[4881]: I1211 08:54:36.786598 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-f79qk" event={"ID":"1e3366c8-0354-47fb-af1a-f579ed757f2b","Type":"ContainerDied","Data":"9bd0ff93d501c04c499ac78d65e1dab8d0c39e960fa9441519d6f88d5a4a3e71"} Dec 11 08:54:36 crc kubenswrapper[4881]: I1211 08:54:36.786635 4881 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9bd0ff93d501c04c499ac78d65e1dab8d0c39e960fa9441519d6f88d5a4a3e71" Dec 11 08:54:36 crc kubenswrapper[4881]: I1211 08:54:36.786687 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-f79qk" Dec 11 08:54:36 crc kubenswrapper[4881]: I1211 08:54:36.874005 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-xjxv2"] Dec 11 08:54:36 crc kubenswrapper[4881]: E1211 08:54:36.874706 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1e3366c8-0354-47fb-af1a-f579ed757f2b" containerName="ssh-known-hosts-edpm-deployment" Dec 11 08:54:36 crc kubenswrapper[4881]: I1211 08:54:36.874728 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="1e3366c8-0354-47fb-af1a-f579ed757f2b" containerName="ssh-known-hosts-edpm-deployment" Dec 11 08:54:36 crc kubenswrapper[4881]: I1211 08:54:36.875081 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="1e3366c8-0354-47fb-af1a-f579ed757f2b" containerName="ssh-known-hosts-edpm-deployment" Dec 11 08:54:36 crc kubenswrapper[4881]: I1211 08:54:36.876158 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-xjxv2" Dec 11 08:54:36 crc kubenswrapper[4881]: I1211 08:54:36.878583 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-bnm72" Dec 11 08:54:36 crc kubenswrapper[4881]: I1211 08:54:36.879032 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Dec 11 08:54:36 crc kubenswrapper[4881]: I1211 08:54:36.879211 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Dec 11 08:54:36 crc kubenswrapper[4881]: I1211 08:54:36.881727 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Dec 11 08:54:36 crc kubenswrapper[4881]: I1211 08:54:36.888943 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-xjxv2"] Dec 11 08:54:36 crc kubenswrapper[4881]: I1211 08:54:36.998309 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/0b72500e-98a3-4e2a-895b-422da6f81a8c-ssh-key\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-xjxv2\" (UID: \"0b72500e-98a3-4e2a-895b-422da6f81a8c\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-xjxv2" Dec 11 08:54:36 crc kubenswrapper[4881]: I1211 08:54:36.998387 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/0b72500e-98a3-4e2a-895b-422da6f81a8c-inventory\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-xjxv2\" (UID: \"0b72500e-98a3-4e2a-895b-422da6f81a8c\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-xjxv2" Dec 11 08:54:36 crc kubenswrapper[4881]: I1211 08:54:36.998733 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cb7tq\" (UniqueName: \"kubernetes.io/projected/0b72500e-98a3-4e2a-895b-422da6f81a8c-kube-api-access-cb7tq\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-xjxv2\" (UID: \"0b72500e-98a3-4e2a-895b-422da6f81a8c\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-xjxv2" Dec 11 08:54:37 crc kubenswrapper[4881]: I1211 08:54:37.101997 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cb7tq\" (UniqueName: \"kubernetes.io/projected/0b72500e-98a3-4e2a-895b-422da6f81a8c-kube-api-access-cb7tq\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-xjxv2\" (UID: \"0b72500e-98a3-4e2a-895b-422da6f81a8c\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-xjxv2" Dec 11 08:54:37 crc kubenswrapper[4881]: I1211 08:54:37.102269 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/0b72500e-98a3-4e2a-895b-422da6f81a8c-ssh-key\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-xjxv2\" (UID: \"0b72500e-98a3-4e2a-895b-422da6f81a8c\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-xjxv2" Dec 11 08:54:37 crc kubenswrapper[4881]: I1211 08:54:37.102307 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/0b72500e-98a3-4e2a-895b-422da6f81a8c-inventory\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-xjxv2\" (UID: \"0b72500e-98a3-4e2a-895b-422da6f81a8c\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-xjxv2" Dec 11 08:54:37 crc kubenswrapper[4881]: I1211 08:54:37.119210 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/0b72500e-98a3-4e2a-895b-422da6f81a8c-ssh-key\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-xjxv2\" (UID: \"0b72500e-98a3-4e2a-895b-422da6f81a8c\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-xjxv2" Dec 11 08:54:37 crc kubenswrapper[4881]: I1211 08:54:37.120907 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/0b72500e-98a3-4e2a-895b-422da6f81a8c-inventory\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-xjxv2\" (UID: \"0b72500e-98a3-4e2a-895b-422da6f81a8c\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-xjxv2" Dec 11 08:54:37 crc kubenswrapper[4881]: I1211 08:54:37.121417 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cb7tq\" (UniqueName: \"kubernetes.io/projected/0b72500e-98a3-4e2a-895b-422da6f81a8c-kube-api-access-cb7tq\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-xjxv2\" (UID: \"0b72500e-98a3-4e2a-895b-422da6f81a8c\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-xjxv2" Dec 11 08:54:37 crc kubenswrapper[4881]: I1211 08:54:37.202359 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-xjxv2" Dec 11 08:54:37 crc kubenswrapper[4881]: I1211 08:54:37.810321 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-xjxv2"] Dec 11 08:54:38 crc kubenswrapper[4881]: I1211 08:54:38.816277 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-xjxv2" event={"ID":"0b72500e-98a3-4e2a-895b-422da6f81a8c","Type":"ContainerStarted","Data":"5409e6966f2356ea52c66e10d28ab662f43a6675f9134f6c7cdc616530675f16"} Dec 11 08:54:49 crc kubenswrapper[4881]: I1211 08:54:49.966279 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-xjxv2" event={"ID":"0b72500e-98a3-4e2a-895b-422da6f81a8c","Type":"ContainerStarted","Data":"5e03a96d1629ebd5fb7587e5c08130d5c83fd9d4348bf19f8f6f3a6ba2f7a648"} Dec 11 08:54:50 crc kubenswrapper[4881]: I1211 08:54:50.003064 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-xjxv2" podStartSLOduration=3.016810315 podStartE2EDuration="14.003037298s" podCreationTimestamp="2025-12-11 08:54:36 +0000 UTC" firstStartedPulling="2025-12-11 08:54:37.809370588 +0000 UTC m=+2326.186739285" lastFinishedPulling="2025-12-11 08:54:48.795597571 +0000 UTC m=+2337.172966268" observedRunningTime="2025-12-11 08:54:49.987879164 +0000 UTC m=+2338.365247861" watchObservedRunningTime="2025-12-11 08:54:50.003037298 +0000 UTC m=+2338.380406015" Dec 11 08:54:58 crc kubenswrapper[4881]: I1211 08:54:58.053037 4881 generic.go:334] "Generic (PLEG): container finished" podID="0b72500e-98a3-4e2a-895b-422da6f81a8c" containerID="5e03a96d1629ebd5fb7587e5c08130d5c83fd9d4348bf19f8f6f3a6ba2f7a648" exitCode=0 Dec 11 08:54:58 crc kubenswrapper[4881]: I1211 08:54:58.053126 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-xjxv2" event={"ID":"0b72500e-98a3-4e2a-895b-422da6f81a8c","Type":"ContainerDied","Data":"5e03a96d1629ebd5fb7587e5c08130d5c83fd9d4348bf19f8f6f3a6ba2f7a648"} Dec 11 08:54:59 crc kubenswrapper[4881]: I1211 08:54:59.396796 4881 patch_prober.go:28] interesting pod/machine-config-daemon-z9nnh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 11 08:54:59 crc kubenswrapper[4881]: I1211 08:54:59.397369 4881 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 11 08:54:59 crc kubenswrapper[4881]: I1211 08:54:59.547414 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-xjxv2" Dec 11 08:54:59 crc kubenswrapper[4881]: I1211 08:54:59.732495 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cb7tq\" (UniqueName: \"kubernetes.io/projected/0b72500e-98a3-4e2a-895b-422da6f81a8c-kube-api-access-cb7tq\") pod \"0b72500e-98a3-4e2a-895b-422da6f81a8c\" (UID: \"0b72500e-98a3-4e2a-895b-422da6f81a8c\") " Dec 11 08:54:59 crc kubenswrapper[4881]: I1211 08:54:59.732822 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/0b72500e-98a3-4e2a-895b-422da6f81a8c-ssh-key\") pod \"0b72500e-98a3-4e2a-895b-422da6f81a8c\" (UID: \"0b72500e-98a3-4e2a-895b-422da6f81a8c\") " Dec 11 08:54:59 crc kubenswrapper[4881]: I1211 08:54:59.732867 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/0b72500e-98a3-4e2a-895b-422da6f81a8c-inventory\") pod \"0b72500e-98a3-4e2a-895b-422da6f81a8c\" (UID: \"0b72500e-98a3-4e2a-895b-422da6f81a8c\") " Dec 11 08:54:59 crc kubenswrapper[4881]: I1211 08:54:59.738580 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b72500e-98a3-4e2a-895b-422da6f81a8c-kube-api-access-cb7tq" (OuterVolumeSpecName: "kube-api-access-cb7tq") pod "0b72500e-98a3-4e2a-895b-422da6f81a8c" (UID: "0b72500e-98a3-4e2a-895b-422da6f81a8c"). InnerVolumeSpecName "kube-api-access-cb7tq". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:54:59 crc kubenswrapper[4881]: I1211 08:54:59.767626 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b72500e-98a3-4e2a-895b-422da6f81a8c-inventory" (OuterVolumeSpecName: "inventory") pod "0b72500e-98a3-4e2a-895b-422da6f81a8c" (UID: "0b72500e-98a3-4e2a-895b-422da6f81a8c"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:54:59 crc kubenswrapper[4881]: I1211 08:54:59.769715 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b72500e-98a3-4e2a-895b-422da6f81a8c-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "0b72500e-98a3-4e2a-895b-422da6f81a8c" (UID: "0b72500e-98a3-4e2a-895b-422da6f81a8c"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:54:59 crc kubenswrapper[4881]: I1211 08:54:59.836492 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cb7tq\" (UniqueName: \"kubernetes.io/projected/0b72500e-98a3-4e2a-895b-422da6f81a8c-kube-api-access-cb7tq\") on node \"crc\" DevicePath \"\"" Dec 11 08:54:59 crc kubenswrapper[4881]: I1211 08:54:59.836527 4881 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/0b72500e-98a3-4e2a-895b-422da6f81a8c-ssh-key\") on node \"crc\" DevicePath \"\"" Dec 11 08:54:59 crc kubenswrapper[4881]: I1211 08:54:59.836538 4881 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/0b72500e-98a3-4e2a-895b-422da6f81a8c-inventory\") on node \"crc\" DevicePath \"\"" Dec 11 08:55:00 crc kubenswrapper[4881]: I1211 08:55:00.095397 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-xjxv2" event={"ID":"0b72500e-98a3-4e2a-895b-422da6f81a8c","Type":"ContainerDied","Data":"5409e6966f2356ea52c66e10d28ab662f43a6675f9134f6c7cdc616530675f16"} Dec 11 08:55:00 crc kubenswrapper[4881]: I1211 08:55:00.095440 4881 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5409e6966f2356ea52c66e10d28ab662f43a6675f9134f6c7cdc616530675f16" Dec 11 08:55:00 crc kubenswrapper[4881]: I1211 08:55:00.095525 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-xjxv2" Dec 11 08:55:00 crc kubenswrapper[4881]: I1211 08:55:00.175767 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-bsvm6"] Dec 11 08:55:00 crc kubenswrapper[4881]: E1211 08:55:00.176959 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0b72500e-98a3-4e2a-895b-422da6f81a8c" containerName="run-os-edpm-deployment-openstack-edpm-ipam" Dec 11 08:55:00 crc kubenswrapper[4881]: I1211 08:55:00.176985 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="0b72500e-98a3-4e2a-895b-422da6f81a8c" containerName="run-os-edpm-deployment-openstack-edpm-ipam" Dec 11 08:55:00 crc kubenswrapper[4881]: I1211 08:55:00.177536 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="0b72500e-98a3-4e2a-895b-422da6f81a8c" containerName="run-os-edpm-deployment-openstack-edpm-ipam" Dec 11 08:55:00 crc kubenswrapper[4881]: I1211 08:55:00.178660 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-bsvm6" Dec 11 08:55:00 crc kubenswrapper[4881]: I1211 08:55:00.181418 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Dec 11 08:55:00 crc kubenswrapper[4881]: I1211 08:55:00.181700 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Dec 11 08:55:00 crc kubenswrapper[4881]: I1211 08:55:00.181968 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-bnm72" Dec 11 08:55:00 crc kubenswrapper[4881]: I1211 08:55:00.182190 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Dec 11 08:55:00 crc kubenswrapper[4881]: I1211 08:55:00.206529 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-bsvm6"] Dec 11 08:55:00 crc kubenswrapper[4881]: I1211 08:55:00.351395 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/55357646-b980-4023-b886-5365ec6fd85f-inventory\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-bsvm6\" (UID: \"55357646-b980-4023-b886-5365ec6fd85f\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-bsvm6" Dec 11 08:55:00 crc kubenswrapper[4881]: I1211 08:55:00.351477 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6lqb8\" (UniqueName: \"kubernetes.io/projected/55357646-b980-4023-b886-5365ec6fd85f-kube-api-access-6lqb8\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-bsvm6\" (UID: \"55357646-b980-4023-b886-5365ec6fd85f\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-bsvm6" Dec 11 08:55:00 crc kubenswrapper[4881]: I1211 08:55:00.351535 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/55357646-b980-4023-b886-5365ec6fd85f-ssh-key\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-bsvm6\" (UID: \"55357646-b980-4023-b886-5365ec6fd85f\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-bsvm6" Dec 11 08:55:00 crc kubenswrapper[4881]: I1211 08:55:00.453848 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/55357646-b980-4023-b886-5365ec6fd85f-inventory\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-bsvm6\" (UID: \"55357646-b980-4023-b886-5365ec6fd85f\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-bsvm6" Dec 11 08:55:00 crc kubenswrapper[4881]: I1211 08:55:00.454106 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6lqb8\" (UniqueName: \"kubernetes.io/projected/55357646-b980-4023-b886-5365ec6fd85f-kube-api-access-6lqb8\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-bsvm6\" (UID: \"55357646-b980-4023-b886-5365ec6fd85f\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-bsvm6" Dec 11 08:55:00 crc kubenswrapper[4881]: I1211 08:55:00.454200 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/55357646-b980-4023-b886-5365ec6fd85f-ssh-key\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-bsvm6\" (UID: \"55357646-b980-4023-b886-5365ec6fd85f\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-bsvm6" Dec 11 08:55:00 crc kubenswrapper[4881]: I1211 08:55:00.459329 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/55357646-b980-4023-b886-5365ec6fd85f-ssh-key\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-bsvm6\" (UID: \"55357646-b980-4023-b886-5365ec6fd85f\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-bsvm6" Dec 11 08:55:00 crc kubenswrapper[4881]: I1211 08:55:00.467517 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/55357646-b980-4023-b886-5365ec6fd85f-inventory\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-bsvm6\" (UID: \"55357646-b980-4023-b886-5365ec6fd85f\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-bsvm6" Dec 11 08:55:00 crc kubenswrapper[4881]: I1211 08:55:00.485456 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6lqb8\" (UniqueName: \"kubernetes.io/projected/55357646-b980-4023-b886-5365ec6fd85f-kube-api-access-6lqb8\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-bsvm6\" (UID: \"55357646-b980-4023-b886-5365ec6fd85f\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-bsvm6" Dec 11 08:55:00 crc kubenswrapper[4881]: I1211 08:55:00.515532 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-bsvm6" Dec 11 08:55:01 crc kubenswrapper[4881]: I1211 08:55:01.093449 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-bsvm6"] Dec 11 08:55:02 crc kubenswrapper[4881]: I1211 08:55:02.120851 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-bsvm6" event={"ID":"55357646-b980-4023-b886-5365ec6fd85f","Type":"ContainerStarted","Data":"c2feed6de0073bf86f957a9ee68c8f83fa5f8bb558598a1d51bfb26bbf9fc1ca"} Dec 11 08:55:03 crc kubenswrapper[4881]: I1211 08:55:03.134637 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-bsvm6" event={"ID":"55357646-b980-4023-b886-5365ec6fd85f","Type":"ContainerStarted","Data":"a853b6abbd6cf677b3291f4320986d3cfa600392b224bd74140ff267a9e3887a"} Dec 11 08:55:03 crc kubenswrapper[4881]: I1211 08:55:03.166994 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-bsvm6" podStartSLOduration=2.038465179 podStartE2EDuration="3.16696808s" podCreationTimestamp="2025-12-11 08:55:00 +0000 UTC" firstStartedPulling="2025-12-11 08:55:01.10029942 +0000 UTC m=+2349.477668117" lastFinishedPulling="2025-12-11 08:55:02.228802311 +0000 UTC m=+2350.606171018" observedRunningTime="2025-12-11 08:55:03.151263492 +0000 UTC m=+2351.528632179" watchObservedRunningTime="2025-12-11 08:55:03.16696808 +0000 UTC m=+2351.544336777" Dec 11 08:55:09 crc kubenswrapper[4881]: I1211 08:55:09.910317 4881 scope.go:117] "RemoveContainer" containerID="0fe41f4153d771d21a6b9388ac26e7f7d7f07050854a3d1f9c085687af6a8561" Dec 11 08:55:13 crc kubenswrapper[4881]: I1211 08:55:13.252886 4881 generic.go:334] "Generic (PLEG): container finished" podID="55357646-b980-4023-b886-5365ec6fd85f" containerID="a853b6abbd6cf677b3291f4320986d3cfa600392b224bd74140ff267a9e3887a" exitCode=0 Dec 11 08:55:13 crc kubenswrapper[4881]: I1211 08:55:13.253073 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-bsvm6" event={"ID":"55357646-b980-4023-b886-5365ec6fd85f","Type":"ContainerDied","Data":"a853b6abbd6cf677b3291f4320986d3cfa600392b224bd74140ff267a9e3887a"} Dec 11 08:55:14 crc kubenswrapper[4881]: I1211 08:55:14.739530 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-bsvm6" Dec 11 08:55:14 crc kubenswrapper[4881]: I1211 08:55:14.756426 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/55357646-b980-4023-b886-5365ec6fd85f-inventory\") pod \"55357646-b980-4023-b886-5365ec6fd85f\" (UID: \"55357646-b980-4023-b886-5365ec6fd85f\") " Dec 11 08:55:14 crc kubenswrapper[4881]: I1211 08:55:14.756496 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/55357646-b980-4023-b886-5365ec6fd85f-ssh-key\") pod \"55357646-b980-4023-b886-5365ec6fd85f\" (UID: \"55357646-b980-4023-b886-5365ec6fd85f\") " Dec 11 08:55:14 crc kubenswrapper[4881]: I1211 08:55:14.756629 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6lqb8\" (UniqueName: \"kubernetes.io/projected/55357646-b980-4023-b886-5365ec6fd85f-kube-api-access-6lqb8\") pod \"55357646-b980-4023-b886-5365ec6fd85f\" (UID: \"55357646-b980-4023-b886-5365ec6fd85f\") " Dec 11 08:55:14 crc kubenswrapper[4881]: I1211 08:55:14.764328 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/55357646-b980-4023-b886-5365ec6fd85f-kube-api-access-6lqb8" (OuterVolumeSpecName: "kube-api-access-6lqb8") pod "55357646-b980-4023-b886-5365ec6fd85f" (UID: "55357646-b980-4023-b886-5365ec6fd85f"). InnerVolumeSpecName "kube-api-access-6lqb8". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:55:14 crc kubenswrapper[4881]: I1211 08:55:14.792292 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/55357646-b980-4023-b886-5365ec6fd85f-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "55357646-b980-4023-b886-5365ec6fd85f" (UID: "55357646-b980-4023-b886-5365ec6fd85f"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:55:14 crc kubenswrapper[4881]: I1211 08:55:14.798758 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/55357646-b980-4023-b886-5365ec6fd85f-inventory" (OuterVolumeSpecName: "inventory") pod "55357646-b980-4023-b886-5365ec6fd85f" (UID: "55357646-b980-4023-b886-5365ec6fd85f"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:55:14 crc kubenswrapper[4881]: I1211 08:55:14.859416 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6lqb8\" (UniqueName: \"kubernetes.io/projected/55357646-b980-4023-b886-5365ec6fd85f-kube-api-access-6lqb8\") on node \"crc\" DevicePath \"\"" Dec 11 08:55:14 crc kubenswrapper[4881]: I1211 08:55:14.859611 4881 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/55357646-b980-4023-b886-5365ec6fd85f-inventory\") on node \"crc\" DevicePath \"\"" Dec 11 08:55:14 crc kubenswrapper[4881]: I1211 08:55:14.859621 4881 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/55357646-b980-4023-b886-5365ec6fd85f-ssh-key\") on node \"crc\" DevicePath \"\"" Dec 11 08:55:15 crc kubenswrapper[4881]: I1211 08:55:15.273221 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-bsvm6" event={"ID":"55357646-b980-4023-b886-5365ec6fd85f","Type":"ContainerDied","Data":"c2feed6de0073bf86f957a9ee68c8f83fa5f8bb558598a1d51bfb26bbf9fc1ca"} Dec 11 08:55:15 crc kubenswrapper[4881]: I1211 08:55:15.273262 4881 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c2feed6de0073bf86f957a9ee68c8f83fa5f8bb558598a1d51bfb26bbf9fc1ca" Dec 11 08:55:15 crc kubenswrapper[4881]: I1211 08:55:15.273279 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-bsvm6" Dec 11 08:55:15 crc kubenswrapper[4881]: I1211 08:55:15.350483 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/install-certs-edpm-deployment-openstack-edpm-ipam-pvld9"] Dec 11 08:55:15 crc kubenswrapper[4881]: E1211 08:55:15.351055 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="55357646-b980-4023-b886-5365ec6fd85f" containerName="reboot-os-edpm-deployment-openstack-edpm-ipam" Dec 11 08:55:15 crc kubenswrapper[4881]: I1211 08:55:15.351079 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="55357646-b980-4023-b886-5365ec6fd85f" containerName="reboot-os-edpm-deployment-openstack-edpm-ipam" Dec 11 08:55:15 crc kubenswrapper[4881]: I1211 08:55:15.351647 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="55357646-b980-4023-b886-5365ec6fd85f" containerName="reboot-os-edpm-deployment-openstack-edpm-ipam" Dec 11 08:55:15 crc kubenswrapper[4881]: I1211 08:55:15.353052 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-pvld9" Dec 11 08:55:15 crc kubenswrapper[4881]: I1211 08:55:15.357085 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-telemetry-default-certs-0" Dec 11 08:55:15 crc kubenswrapper[4881]: I1211 08:55:15.357110 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Dec 11 08:55:15 crc kubenswrapper[4881]: I1211 08:55:15.357246 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Dec 11 08:55:15 crc kubenswrapper[4881]: I1211 08:55:15.357315 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-neutron-metadata-default-certs-0" Dec 11 08:55:15 crc kubenswrapper[4881]: I1211 08:55:15.357608 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-telemetry-power-monitoring-default-certs-0" Dec 11 08:55:15 crc kubenswrapper[4881]: I1211 08:55:15.357879 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-ovn-default-certs-0" Dec 11 08:55:15 crc kubenswrapper[4881]: I1211 08:55:15.358060 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-bnm72" Dec 11 08:55:15 crc kubenswrapper[4881]: I1211 08:55:15.358916 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-libvirt-default-certs-0" Dec 11 08:55:15 crc kubenswrapper[4881]: I1211 08:55:15.360770 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Dec 11 08:55:15 crc kubenswrapper[4881]: I1211 08:55:15.371721 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/a4c5efad-5566-4a8d-85d8-c897f04fcb46-openstack-edpm-ipam-neutron-metadata-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-pvld9\" (UID: \"a4c5efad-5566-4a8d-85d8-c897f04fcb46\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-pvld9" Dec 11 08:55:15 crc kubenswrapper[4881]: I1211 08:55:15.371793 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a4c5efad-5566-4a8d-85d8-c897f04fcb46-inventory\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-pvld9\" (UID: \"a4c5efad-5566-4a8d-85d8-c897f04fcb46\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-pvld9" Dec 11 08:55:15 crc kubenswrapper[4881]: I1211 08:55:15.371842 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/a4c5efad-5566-4a8d-85d8-c897f04fcb46-openstack-edpm-ipam-telemetry-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-pvld9\" (UID: \"a4c5efad-5566-4a8d-85d8-c897f04fcb46\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-pvld9" Dec 11 08:55:15 crc kubenswrapper[4881]: I1211 08:55:15.371864 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a4c5efad-5566-4a8d-85d8-c897f04fcb46-ovn-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-pvld9\" (UID: \"a4c5efad-5566-4a8d-85d8-c897f04fcb46\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-pvld9" Dec 11 08:55:15 crc kubenswrapper[4881]: I1211 08:55:15.371927 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5wfrt\" (UniqueName: \"kubernetes.io/projected/a4c5efad-5566-4a8d-85d8-c897f04fcb46-kube-api-access-5wfrt\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-pvld9\" (UID: \"a4c5efad-5566-4a8d-85d8-c897f04fcb46\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-pvld9" Dec 11 08:55:15 crc kubenswrapper[4881]: I1211 08:55:15.371950 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/a4c5efad-5566-4a8d-85d8-c897f04fcb46-openstack-edpm-ipam-ovn-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-pvld9\" (UID: \"a4c5efad-5566-4a8d-85d8-c897f04fcb46\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-pvld9" Dec 11 08:55:15 crc kubenswrapper[4881]: I1211 08:55:15.371994 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a4c5efad-5566-4a8d-85d8-c897f04fcb46-telemetry-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-pvld9\" (UID: \"a4c5efad-5566-4a8d-85d8-c897f04fcb46\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-pvld9" Dec 11 08:55:15 crc kubenswrapper[4881]: I1211 08:55:15.372044 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a4c5efad-5566-4a8d-85d8-c897f04fcb46-repo-setup-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-pvld9\" (UID: \"a4c5efad-5566-4a8d-85d8-c897f04fcb46\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-pvld9" Dec 11 08:55:15 crc kubenswrapper[4881]: I1211 08:55:15.372081 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a4c5efad-5566-4a8d-85d8-c897f04fcb46-libvirt-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-pvld9\" (UID: \"a4c5efad-5566-4a8d-85d8-c897f04fcb46\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-pvld9" Dec 11 08:55:15 crc kubenswrapper[4881]: I1211 08:55:15.372099 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a4c5efad-5566-4a8d-85d8-c897f04fcb46-bootstrap-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-pvld9\" (UID: \"a4c5efad-5566-4a8d-85d8-c897f04fcb46\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-pvld9" Dec 11 08:55:15 crc kubenswrapper[4881]: I1211 08:55:15.372139 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a4c5efad-5566-4a8d-85d8-c897f04fcb46-nova-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-pvld9\" (UID: \"a4c5efad-5566-4a8d-85d8-c897f04fcb46\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-pvld9" Dec 11 08:55:15 crc kubenswrapper[4881]: I1211 08:55:15.372167 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a4c5efad-5566-4a8d-85d8-c897f04fcb46-neutron-metadata-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-pvld9\" (UID: \"a4c5efad-5566-4a8d-85d8-c897f04fcb46\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-pvld9" Dec 11 08:55:15 crc kubenswrapper[4881]: I1211 08:55:15.372183 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a4c5efad-5566-4a8d-85d8-c897f04fcb46-ssh-key\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-pvld9\" (UID: \"a4c5efad-5566-4a8d-85d8-c897f04fcb46\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-pvld9" Dec 11 08:55:15 crc kubenswrapper[4881]: I1211 08:55:15.372212 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/a4c5efad-5566-4a8d-85d8-c897f04fcb46-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-pvld9\" (UID: \"a4c5efad-5566-4a8d-85d8-c897f04fcb46\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-pvld9" Dec 11 08:55:15 crc kubenswrapper[4881]: I1211 08:55:15.372255 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"telemetry-power-monitoring-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a4c5efad-5566-4a8d-85d8-c897f04fcb46-telemetry-power-monitoring-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-pvld9\" (UID: \"a4c5efad-5566-4a8d-85d8-c897f04fcb46\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-pvld9" Dec 11 08:55:15 crc kubenswrapper[4881]: I1211 08:55:15.372286 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-telemetry-power-monitoring-default-certs-0\" (UniqueName: \"kubernetes.io/projected/a4c5efad-5566-4a8d-85d8-c897f04fcb46-openstack-edpm-ipam-telemetry-power-monitoring-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-pvld9\" (UID: \"a4c5efad-5566-4a8d-85d8-c897f04fcb46\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-pvld9" Dec 11 08:55:15 crc kubenswrapper[4881]: I1211 08:55:15.373256 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-certs-edpm-deployment-openstack-edpm-ipam-pvld9"] Dec 11 08:55:15 crc kubenswrapper[4881]: I1211 08:55:15.477325 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a4c5efad-5566-4a8d-85d8-c897f04fcb46-neutron-metadata-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-pvld9\" (UID: \"a4c5efad-5566-4a8d-85d8-c897f04fcb46\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-pvld9" Dec 11 08:55:15 crc kubenswrapper[4881]: I1211 08:55:15.477390 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a4c5efad-5566-4a8d-85d8-c897f04fcb46-ssh-key\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-pvld9\" (UID: \"a4c5efad-5566-4a8d-85d8-c897f04fcb46\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-pvld9" Dec 11 08:55:15 crc kubenswrapper[4881]: I1211 08:55:15.477435 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/a4c5efad-5566-4a8d-85d8-c897f04fcb46-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-pvld9\" (UID: \"a4c5efad-5566-4a8d-85d8-c897f04fcb46\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-pvld9" Dec 11 08:55:15 crc kubenswrapper[4881]: I1211 08:55:15.477495 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"telemetry-power-monitoring-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a4c5efad-5566-4a8d-85d8-c897f04fcb46-telemetry-power-monitoring-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-pvld9\" (UID: \"a4c5efad-5566-4a8d-85d8-c897f04fcb46\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-pvld9" Dec 11 08:55:15 crc kubenswrapper[4881]: I1211 08:55:15.477526 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-telemetry-power-monitoring-default-certs-0\" (UniqueName: \"kubernetes.io/projected/a4c5efad-5566-4a8d-85d8-c897f04fcb46-openstack-edpm-ipam-telemetry-power-monitoring-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-pvld9\" (UID: \"a4c5efad-5566-4a8d-85d8-c897f04fcb46\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-pvld9" Dec 11 08:55:15 crc kubenswrapper[4881]: I1211 08:55:15.477586 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/a4c5efad-5566-4a8d-85d8-c897f04fcb46-openstack-edpm-ipam-neutron-metadata-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-pvld9\" (UID: \"a4c5efad-5566-4a8d-85d8-c897f04fcb46\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-pvld9" Dec 11 08:55:15 crc kubenswrapper[4881]: I1211 08:55:15.477632 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a4c5efad-5566-4a8d-85d8-c897f04fcb46-inventory\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-pvld9\" (UID: \"a4c5efad-5566-4a8d-85d8-c897f04fcb46\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-pvld9" Dec 11 08:55:15 crc kubenswrapper[4881]: I1211 08:55:15.477665 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/a4c5efad-5566-4a8d-85d8-c897f04fcb46-openstack-edpm-ipam-telemetry-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-pvld9\" (UID: \"a4c5efad-5566-4a8d-85d8-c897f04fcb46\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-pvld9" Dec 11 08:55:15 crc kubenswrapper[4881]: I1211 08:55:15.477691 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a4c5efad-5566-4a8d-85d8-c897f04fcb46-ovn-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-pvld9\" (UID: \"a4c5efad-5566-4a8d-85d8-c897f04fcb46\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-pvld9" Dec 11 08:55:15 crc kubenswrapper[4881]: I1211 08:55:15.477743 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5wfrt\" (UniqueName: \"kubernetes.io/projected/a4c5efad-5566-4a8d-85d8-c897f04fcb46-kube-api-access-5wfrt\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-pvld9\" (UID: \"a4c5efad-5566-4a8d-85d8-c897f04fcb46\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-pvld9" Dec 11 08:55:15 crc kubenswrapper[4881]: I1211 08:55:15.477773 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/a4c5efad-5566-4a8d-85d8-c897f04fcb46-openstack-edpm-ipam-ovn-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-pvld9\" (UID: \"a4c5efad-5566-4a8d-85d8-c897f04fcb46\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-pvld9" Dec 11 08:55:15 crc kubenswrapper[4881]: I1211 08:55:15.477808 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a4c5efad-5566-4a8d-85d8-c897f04fcb46-telemetry-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-pvld9\" (UID: \"a4c5efad-5566-4a8d-85d8-c897f04fcb46\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-pvld9" Dec 11 08:55:15 crc kubenswrapper[4881]: I1211 08:55:15.477872 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a4c5efad-5566-4a8d-85d8-c897f04fcb46-repo-setup-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-pvld9\" (UID: \"a4c5efad-5566-4a8d-85d8-c897f04fcb46\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-pvld9" Dec 11 08:55:15 crc kubenswrapper[4881]: I1211 08:55:15.477908 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a4c5efad-5566-4a8d-85d8-c897f04fcb46-libvirt-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-pvld9\" (UID: \"a4c5efad-5566-4a8d-85d8-c897f04fcb46\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-pvld9" Dec 11 08:55:15 crc kubenswrapper[4881]: I1211 08:55:15.477933 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a4c5efad-5566-4a8d-85d8-c897f04fcb46-bootstrap-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-pvld9\" (UID: \"a4c5efad-5566-4a8d-85d8-c897f04fcb46\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-pvld9" Dec 11 08:55:15 crc kubenswrapper[4881]: I1211 08:55:15.477984 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a4c5efad-5566-4a8d-85d8-c897f04fcb46-nova-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-pvld9\" (UID: \"a4c5efad-5566-4a8d-85d8-c897f04fcb46\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-pvld9" Dec 11 08:55:15 crc kubenswrapper[4881]: I1211 08:55:15.484150 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a4c5efad-5566-4a8d-85d8-c897f04fcb46-telemetry-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-pvld9\" (UID: \"a4c5efad-5566-4a8d-85d8-c897f04fcb46\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-pvld9" Dec 11 08:55:15 crc kubenswrapper[4881]: I1211 08:55:15.484862 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a4c5efad-5566-4a8d-85d8-c897f04fcb46-nova-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-pvld9\" (UID: \"a4c5efad-5566-4a8d-85d8-c897f04fcb46\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-pvld9" Dec 11 08:55:15 crc kubenswrapper[4881]: I1211 08:55:15.485609 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"telemetry-power-monitoring-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a4c5efad-5566-4a8d-85d8-c897f04fcb46-telemetry-power-monitoring-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-pvld9\" (UID: \"a4c5efad-5566-4a8d-85d8-c897f04fcb46\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-pvld9" Dec 11 08:55:15 crc kubenswrapper[4881]: I1211 08:55:15.485977 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a4c5efad-5566-4a8d-85d8-c897f04fcb46-inventory\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-pvld9\" (UID: \"a4c5efad-5566-4a8d-85d8-c897f04fcb46\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-pvld9" Dec 11 08:55:15 crc kubenswrapper[4881]: I1211 08:55:15.486243 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a4c5efad-5566-4a8d-85d8-c897f04fcb46-ovn-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-pvld9\" (UID: \"a4c5efad-5566-4a8d-85d8-c897f04fcb46\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-pvld9" Dec 11 08:55:15 crc kubenswrapper[4881]: I1211 08:55:15.486302 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-telemetry-power-monitoring-default-certs-0\" (UniqueName: \"kubernetes.io/projected/a4c5efad-5566-4a8d-85d8-c897f04fcb46-openstack-edpm-ipam-telemetry-power-monitoring-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-pvld9\" (UID: \"a4c5efad-5566-4a8d-85d8-c897f04fcb46\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-pvld9" Dec 11 08:55:15 crc kubenswrapper[4881]: I1211 08:55:15.487872 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/a4c5efad-5566-4a8d-85d8-c897f04fcb46-openstack-edpm-ipam-telemetry-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-pvld9\" (UID: \"a4c5efad-5566-4a8d-85d8-c897f04fcb46\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-pvld9" Dec 11 08:55:15 crc kubenswrapper[4881]: I1211 08:55:15.488704 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/a4c5efad-5566-4a8d-85d8-c897f04fcb46-openstack-edpm-ipam-neutron-metadata-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-pvld9\" (UID: \"a4c5efad-5566-4a8d-85d8-c897f04fcb46\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-pvld9" Dec 11 08:55:15 crc kubenswrapper[4881]: I1211 08:55:15.488965 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/a4c5efad-5566-4a8d-85d8-c897f04fcb46-openstack-edpm-ipam-ovn-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-pvld9\" (UID: \"a4c5efad-5566-4a8d-85d8-c897f04fcb46\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-pvld9" Dec 11 08:55:15 crc kubenswrapper[4881]: I1211 08:55:15.490914 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a4c5efad-5566-4a8d-85d8-c897f04fcb46-libvirt-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-pvld9\" (UID: \"a4c5efad-5566-4a8d-85d8-c897f04fcb46\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-pvld9" Dec 11 08:55:15 crc kubenswrapper[4881]: I1211 08:55:15.493060 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/a4c5efad-5566-4a8d-85d8-c897f04fcb46-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-pvld9\" (UID: \"a4c5efad-5566-4a8d-85d8-c897f04fcb46\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-pvld9" Dec 11 08:55:15 crc kubenswrapper[4881]: I1211 08:55:15.493930 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a4c5efad-5566-4a8d-85d8-c897f04fcb46-repo-setup-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-pvld9\" (UID: \"a4c5efad-5566-4a8d-85d8-c897f04fcb46\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-pvld9" Dec 11 08:55:15 crc kubenswrapper[4881]: I1211 08:55:15.494431 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a4c5efad-5566-4a8d-85d8-c897f04fcb46-ssh-key\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-pvld9\" (UID: \"a4c5efad-5566-4a8d-85d8-c897f04fcb46\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-pvld9" Dec 11 08:55:15 crc kubenswrapper[4881]: I1211 08:55:15.494563 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a4c5efad-5566-4a8d-85d8-c897f04fcb46-neutron-metadata-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-pvld9\" (UID: \"a4c5efad-5566-4a8d-85d8-c897f04fcb46\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-pvld9" Dec 11 08:55:15 crc kubenswrapper[4881]: I1211 08:55:15.494629 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a4c5efad-5566-4a8d-85d8-c897f04fcb46-bootstrap-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-pvld9\" (UID: \"a4c5efad-5566-4a8d-85d8-c897f04fcb46\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-pvld9" Dec 11 08:55:15 crc kubenswrapper[4881]: I1211 08:55:15.497955 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5wfrt\" (UniqueName: \"kubernetes.io/projected/a4c5efad-5566-4a8d-85d8-c897f04fcb46-kube-api-access-5wfrt\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-pvld9\" (UID: \"a4c5efad-5566-4a8d-85d8-c897f04fcb46\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-pvld9" Dec 11 08:55:15 crc kubenswrapper[4881]: I1211 08:55:15.776652 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-pvld9" Dec 11 08:55:16 crc kubenswrapper[4881]: I1211 08:55:16.388097 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-certs-edpm-deployment-openstack-edpm-ipam-pvld9"] Dec 11 08:55:16 crc kubenswrapper[4881]: W1211 08:55:16.388516 4881 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda4c5efad_5566_4a8d_85d8_c897f04fcb46.slice/crio-e662ccf9f01d01f0712d2dd80be51fdd4aee3073d50af4b26e31f32063946eb8 WatchSource:0}: Error finding container e662ccf9f01d01f0712d2dd80be51fdd4aee3073d50af4b26e31f32063946eb8: Status 404 returned error can't find the container with id e662ccf9f01d01f0712d2dd80be51fdd4aee3073d50af4b26e31f32063946eb8 Dec 11 08:55:17 crc kubenswrapper[4881]: I1211 08:55:17.299723 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-pvld9" event={"ID":"a4c5efad-5566-4a8d-85d8-c897f04fcb46","Type":"ContainerStarted","Data":"ff1a8b1e225002b99055098a51c140960b551b9cbbf4b4880ef55b17fbca9c84"} Dec 11 08:55:17 crc kubenswrapper[4881]: I1211 08:55:17.300070 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-pvld9" event={"ID":"a4c5efad-5566-4a8d-85d8-c897f04fcb46","Type":"ContainerStarted","Data":"e662ccf9f01d01f0712d2dd80be51fdd4aee3073d50af4b26e31f32063946eb8"} Dec 11 08:55:17 crc kubenswrapper[4881]: I1211 08:55:17.348440 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-pvld9" podStartSLOduration=1.861529633 podStartE2EDuration="2.348417566s" podCreationTimestamp="2025-12-11 08:55:15 +0000 UTC" firstStartedPulling="2025-12-11 08:55:16.391961717 +0000 UTC m=+2364.769330414" lastFinishedPulling="2025-12-11 08:55:16.87884965 +0000 UTC m=+2365.256218347" observedRunningTime="2025-12-11 08:55:17.33316225 +0000 UTC m=+2365.710530957" watchObservedRunningTime="2025-12-11 08:55:17.348417566 +0000 UTC m=+2365.725786273" Dec 11 08:55:20 crc kubenswrapper[4881]: I1211 08:55:20.043970 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/aodh-db-sync-mn4r5"] Dec 11 08:55:20 crc kubenswrapper[4881]: I1211 08:55:20.057401 4881 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/aodh-db-sync-mn4r5"] Dec 11 08:55:21 crc kubenswrapper[4881]: I1211 08:55:21.024234 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cb0fb1c6-fd5d-4334-9584-efb1c5c75d05" path="/var/lib/kubelet/pods/cb0fb1c6-fd5d-4334-9584-efb1c5c75d05/volumes" Dec 11 08:55:29 crc kubenswrapper[4881]: I1211 08:55:29.397484 4881 patch_prober.go:28] interesting pod/machine-config-daemon-z9nnh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 11 08:55:29 crc kubenswrapper[4881]: I1211 08:55:29.398086 4881 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 11 08:55:29 crc kubenswrapper[4881]: I1211 08:55:29.398133 4881 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" Dec 11 08:55:29 crc kubenswrapper[4881]: I1211 08:55:29.399306 4881 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"2c1b273a42ceaf987004a76e656f9e43f9531662645d97e9202713754e0053d2"} pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Dec 11 08:55:29 crc kubenswrapper[4881]: I1211 08:55:29.399380 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" containerName="machine-config-daemon" containerID="cri-o://2c1b273a42ceaf987004a76e656f9e43f9531662645d97e9202713754e0053d2" gracePeriod=600 Dec 11 08:55:30 crc kubenswrapper[4881]: I1211 08:55:30.435849 4881 generic.go:334] "Generic (PLEG): container finished" podID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" containerID="2c1b273a42ceaf987004a76e656f9e43f9531662645d97e9202713754e0053d2" exitCode=0 Dec 11 08:55:30 crc kubenswrapper[4881]: I1211 08:55:30.435929 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" event={"ID":"56d69133-af36-4cbd-af7d-3a58cc4dd8ca","Type":"ContainerDied","Data":"2c1b273a42ceaf987004a76e656f9e43f9531662645d97e9202713754e0053d2"} Dec 11 08:55:30 crc kubenswrapper[4881]: I1211 08:55:30.436116 4881 scope.go:117] "RemoveContainer" containerID="73cf77c8e58dfead623c00357f3020e90cfa5d92429139db8dcd67259d7f3aee" Dec 11 08:55:30 crc kubenswrapper[4881]: E1211 08:55:30.724421 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 08:55:31 crc kubenswrapper[4881]: I1211 08:55:31.447976 4881 scope.go:117] "RemoveContainer" containerID="2c1b273a42ceaf987004a76e656f9e43f9531662645d97e9202713754e0053d2" Dec 11 08:55:31 crc kubenswrapper[4881]: E1211 08:55:31.449319 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 08:55:47 crc kubenswrapper[4881]: I1211 08:55:47.005984 4881 scope.go:117] "RemoveContainer" containerID="2c1b273a42ceaf987004a76e656f9e43f9531662645d97e9202713754e0053d2" Dec 11 08:55:47 crc kubenswrapper[4881]: E1211 08:55:47.006960 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 08:55:58 crc kubenswrapper[4881]: I1211 08:55:58.005983 4881 scope.go:117] "RemoveContainer" containerID="2c1b273a42ceaf987004a76e656f9e43f9531662645d97e9202713754e0053d2" Dec 11 08:55:58 crc kubenswrapper[4881]: E1211 08:55:58.006679 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 08:56:02 crc kubenswrapper[4881]: I1211 08:56:02.780033 4881 generic.go:334] "Generic (PLEG): container finished" podID="a4c5efad-5566-4a8d-85d8-c897f04fcb46" containerID="ff1a8b1e225002b99055098a51c140960b551b9cbbf4b4880ef55b17fbca9c84" exitCode=0 Dec 11 08:56:02 crc kubenswrapper[4881]: I1211 08:56:02.780589 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-pvld9" event={"ID":"a4c5efad-5566-4a8d-85d8-c897f04fcb46","Type":"ContainerDied","Data":"ff1a8b1e225002b99055098a51c140960b551b9cbbf4b4880ef55b17fbca9c84"} Dec 11 08:56:04 crc kubenswrapper[4881]: I1211 08:56:04.275229 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-pvld9" Dec 11 08:56:04 crc kubenswrapper[4881]: I1211 08:56:04.347117 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-telemetry-power-monitoring-default-certs-0\" (UniqueName: \"kubernetes.io/projected/a4c5efad-5566-4a8d-85d8-c897f04fcb46-openstack-edpm-ipam-telemetry-power-monitoring-default-certs-0\") pod \"a4c5efad-5566-4a8d-85d8-c897f04fcb46\" (UID: \"a4c5efad-5566-4a8d-85d8-c897f04fcb46\") " Dec 11 08:56:04 crc kubenswrapper[4881]: I1211 08:56:04.347244 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"telemetry-power-monitoring-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a4c5efad-5566-4a8d-85d8-c897f04fcb46-telemetry-power-monitoring-combined-ca-bundle\") pod \"a4c5efad-5566-4a8d-85d8-c897f04fcb46\" (UID: \"a4c5efad-5566-4a8d-85d8-c897f04fcb46\") " Dec 11 08:56:04 crc kubenswrapper[4881]: I1211 08:56:04.347273 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/a4c5efad-5566-4a8d-85d8-c897f04fcb46-openstack-edpm-ipam-ovn-default-certs-0\") pod \"a4c5efad-5566-4a8d-85d8-c897f04fcb46\" (UID: \"a4c5efad-5566-4a8d-85d8-c897f04fcb46\") " Dec 11 08:56:04 crc kubenswrapper[4881]: I1211 08:56:04.347302 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a4c5efad-5566-4a8d-85d8-c897f04fcb46-bootstrap-combined-ca-bundle\") pod \"a4c5efad-5566-4a8d-85d8-c897f04fcb46\" (UID: \"a4c5efad-5566-4a8d-85d8-c897f04fcb46\") " Dec 11 08:56:04 crc kubenswrapper[4881]: I1211 08:56:04.347352 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/a4c5efad-5566-4a8d-85d8-c897f04fcb46-openstack-edpm-ipam-telemetry-default-certs-0\") pod \"a4c5efad-5566-4a8d-85d8-c897f04fcb46\" (UID: \"a4c5efad-5566-4a8d-85d8-c897f04fcb46\") " Dec 11 08:56:04 crc kubenswrapper[4881]: I1211 08:56:04.347400 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a4c5efad-5566-4a8d-85d8-c897f04fcb46-inventory\") pod \"a4c5efad-5566-4a8d-85d8-c897f04fcb46\" (UID: \"a4c5efad-5566-4a8d-85d8-c897f04fcb46\") " Dec 11 08:56:04 crc kubenswrapper[4881]: I1211 08:56:04.347440 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a4c5efad-5566-4a8d-85d8-c897f04fcb46-telemetry-combined-ca-bundle\") pod \"a4c5efad-5566-4a8d-85d8-c897f04fcb46\" (UID: \"a4c5efad-5566-4a8d-85d8-c897f04fcb46\") " Dec 11 08:56:04 crc kubenswrapper[4881]: I1211 08:56:04.347485 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a4c5efad-5566-4a8d-85d8-c897f04fcb46-repo-setup-combined-ca-bundle\") pod \"a4c5efad-5566-4a8d-85d8-c897f04fcb46\" (UID: \"a4c5efad-5566-4a8d-85d8-c897f04fcb46\") " Dec 11 08:56:04 crc kubenswrapper[4881]: I1211 08:56:04.347570 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a4c5efad-5566-4a8d-85d8-c897f04fcb46-nova-combined-ca-bundle\") pod \"a4c5efad-5566-4a8d-85d8-c897f04fcb46\" (UID: \"a4c5efad-5566-4a8d-85d8-c897f04fcb46\") " Dec 11 08:56:04 crc kubenswrapper[4881]: I1211 08:56:04.347595 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a4c5efad-5566-4a8d-85d8-c897f04fcb46-ovn-combined-ca-bundle\") pod \"a4c5efad-5566-4a8d-85d8-c897f04fcb46\" (UID: \"a4c5efad-5566-4a8d-85d8-c897f04fcb46\") " Dec 11 08:56:04 crc kubenswrapper[4881]: I1211 08:56:04.347635 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/a4c5efad-5566-4a8d-85d8-c897f04fcb46-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"a4c5efad-5566-4a8d-85d8-c897f04fcb46\" (UID: \"a4c5efad-5566-4a8d-85d8-c897f04fcb46\") " Dec 11 08:56:04 crc kubenswrapper[4881]: I1211 08:56:04.347669 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a4c5efad-5566-4a8d-85d8-c897f04fcb46-libvirt-combined-ca-bundle\") pod \"a4c5efad-5566-4a8d-85d8-c897f04fcb46\" (UID: \"a4c5efad-5566-4a8d-85d8-c897f04fcb46\") " Dec 11 08:56:04 crc kubenswrapper[4881]: I1211 08:56:04.347694 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a4c5efad-5566-4a8d-85d8-c897f04fcb46-neutron-metadata-combined-ca-bundle\") pod \"a4c5efad-5566-4a8d-85d8-c897f04fcb46\" (UID: \"a4c5efad-5566-4a8d-85d8-c897f04fcb46\") " Dec 11 08:56:04 crc kubenswrapper[4881]: I1211 08:56:04.347720 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5wfrt\" (UniqueName: \"kubernetes.io/projected/a4c5efad-5566-4a8d-85d8-c897f04fcb46-kube-api-access-5wfrt\") pod \"a4c5efad-5566-4a8d-85d8-c897f04fcb46\" (UID: \"a4c5efad-5566-4a8d-85d8-c897f04fcb46\") " Dec 11 08:56:04 crc kubenswrapper[4881]: I1211 08:56:04.347763 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a4c5efad-5566-4a8d-85d8-c897f04fcb46-ssh-key\") pod \"a4c5efad-5566-4a8d-85d8-c897f04fcb46\" (UID: \"a4c5efad-5566-4a8d-85d8-c897f04fcb46\") " Dec 11 08:56:04 crc kubenswrapper[4881]: I1211 08:56:04.347788 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/a4c5efad-5566-4a8d-85d8-c897f04fcb46-openstack-edpm-ipam-neutron-metadata-default-certs-0\") pod \"a4c5efad-5566-4a8d-85d8-c897f04fcb46\" (UID: \"a4c5efad-5566-4a8d-85d8-c897f04fcb46\") " Dec 11 08:56:04 crc kubenswrapper[4881]: I1211 08:56:04.358020 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a4c5efad-5566-4a8d-85d8-c897f04fcb46-nova-combined-ca-bundle" (OuterVolumeSpecName: "nova-combined-ca-bundle") pod "a4c5efad-5566-4a8d-85d8-c897f04fcb46" (UID: "a4c5efad-5566-4a8d-85d8-c897f04fcb46"). InnerVolumeSpecName "nova-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:56:04 crc kubenswrapper[4881]: I1211 08:56:04.358166 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a4c5efad-5566-4a8d-85d8-c897f04fcb46-openstack-edpm-ipam-telemetry-power-monitoring-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-telemetry-power-monitoring-default-certs-0") pod "a4c5efad-5566-4a8d-85d8-c897f04fcb46" (UID: "a4c5efad-5566-4a8d-85d8-c897f04fcb46"). InnerVolumeSpecName "openstack-edpm-ipam-telemetry-power-monitoring-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:56:04 crc kubenswrapper[4881]: I1211 08:56:04.361678 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a4c5efad-5566-4a8d-85d8-c897f04fcb46-openstack-edpm-ipam-ovn-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-ovn-default-certs-0") pod "a4c5efad-5566-4a8d-85d8-c897f04fcb46" (UID: "a4c5efad-5566-4a8d-85d8-c897f04fcb46"). InnerVolumeSpecName "openstack-edpm-ipam-ovn-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:56:04 crc kubenswrapper[4881]: I1211 08:56:04.361668 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a4c5efad-5566-4a8d-85d8-c897f04fcb46-bootstrap-combined-ca-bundle" (OuterVolumeSpecName: "bootstrap-combined-ca-bundle") pod "a4c5efad-5566-4a8d-85d8-c897f04fcb46" (UID: "a4c5efad-5566-4a8d-85d8-c897f04fcb46"). InnerVolumeSpecName "bootstrap-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:56:04 crc kubenswrapper[4881]: I1211 08:56:04.361786 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a4c5efad-5566-4a8d-85d8-c897f04fcb46-openstack-edpm-ipam-neutron-metadata-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-neutron-metadata-default-certs-0") pod "a4c5efad-5566-4a8d-85d8-c897f04fcb46" (UID: "a4c5efad-5566-4a8d-85d8-c897f04fcb46"). InnerVolumeSpecName "openstack-edpm-ipam-neutron-metadata-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:56:04 crc kubenswrapper[4881]: I1211 08:56:04.366271 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a4c5efad-5566-4a8d-85d8-c897f04fcb46-ovn-combined-ca-bundle" (OuterVolumeSpecName: "ovn-combined-ca-bundle") pod "a4c5efad-5566-4a8d-85d8-c897f04fcb46" (UID: "a4c5efad-5566-4a8d-85d8-c897f04fcb46"). InnerVolumeSpecName "ovn-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:56:04 crc kubenswrapper[4881]: I1211 08:56:04.366419 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a4c5efad-5566-4a8d-85d8-c897f04fcb46-telemetry-power-monitoring-combined-ca-bundle" (OuterVolumeSpecName: "telemetry-power-monitoring-combined-ca-bundle") pod "a4c5efad-5566-4a8d-85d8-c897f04fcb46" (UID: "a4c5efad-5566-4a8d-85d8-c897f04fcb46"). InnerVolumeSpecName "telemetry-power-monitoring-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:56:04 crc kubenswrapper[4881]: I1211 08:56:04.367099 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a4c5efad-5566-4a8d-85d8-c897f04fcb46-openstack-edpm-ipam-libvirt-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-libvirt-default-certs-0") pod "a4c5efad-5566-4a8d-85d8-c897f04fcb46" (UID: "a4c5efad-5566-4a8d-85d8-c897f04fcb46"). InnerVolumeSpecName "openstack-edpm-ipam-libvirt-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:56:04 crc kubenswrapper[4881]: I1211 08:56:04.367237 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a4c5efad-5566-4a8d-85d8-c897f04fcb46-kube-api-access-5wfrt" (OuterVolumeSpecName: "kube-api-access-5wfrt") pod "a4c5efad-5566-4a8d-85d8-c897f04fcb46" (UID: "a4c5efad-5566-4a8d-85d8-c897f04fcb46"). InnerVolumeSpecName "kube-api-access-5wfrt". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:56:04 crc kubenswrapper[4881]: I1211 08:56:04.368089 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a4c5efad-5566-4a8d-85d8-c897f04fcb46-neutron-metadata-combined-ca-bundle" (OuterVolumeSpecName: "neutron-metadata-combined-ca-bundle") pod "a4c5efad-5566-4a8d-85d8-c897f04fcb46" (UID: "a4c5efad-5566-4a8d-85d8-c897f04fcb46"). InnerVolumeSpecName "neutron-metadata-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:56:04 crc kubenswrapper[4881]: I1211 08:56:04.379586 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a4c5efad-5566-4a8d-85d8-c897f04fcb46-repo-setup-combined-ca-bundle" (OuterVolumeSpecName: "repo-setup-combined-ca-bundle") pod "a4c5efad-5566-4a8d-85d8-c897f04fcb46" (UID: "a4c5efad-5566-4a8d-85d8-c897f04fcb46"). InnerVolumeSpecName "repo-setup-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:56:04 crc kubenswrapper[4881]: I1211 08:56:04.380548 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a4c5efad-5566-4a8d-85d8-c897f04fcb46-telemetry-combined-ca-bundle" (OuterVolumeSpecName: "telemetry-combined-ca-bundle") pod "a4c5efad-5566-4a8d-85d8-c897f04fcb46" (UID: "a4c5efad-5566-4a8d-85d8-c897f04fcb46"). InnerVolumeSpecName "telemetry-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:56:04 crc kubenswrapper[4881]: I1211 08:56:04.387868 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a4c5efad-5566-4a8d-85d8-c897f04fcb46-libvirt-combined-ca-bundle" (OuterVolumeSpecName: "libvirt-combined-ca-bundle") pod "a4c5efad-5566-4a8d-85d8-c897f04fcb46" (UID: "a4c5efad-5566-4a8d-85d8-c897f04fcb46"). InnerVolumeSpecName "libvirt-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:56:04 crc kubenswrapper[4881]: I1211 08:56:04.388132 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a4c5efad-5566-4a8d-85d8-c897f04fcb46-openstack-edpm-ipam-telemetry-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-telemetry-default-certs-0") pod "a4c5efad-5566-4a8d-85d8-c897f04fcb46" (UID: "a4c5efad-5566-4a8d-85d8-c897f04fcb46"). InnerVolumeSpecName "openstack-edpm-ipam-telemetry-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:56:04 crc kubenswrapper[4881]: I1211 08:56:04.425780 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a4c5efad-5566-4a8d-85d8-c897f04fcb46-inventory" (OuterVolumeSpecName: "inventory") pod "a4c5efad-5566-4a8d-85d8-c897f04fcb46" (UID: "a4c5efad-5566-4a8d-85d8-c897f04fcb46"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:56:04 crc kubenswrapper[4881]: I1211 08:56:04.436386 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a4c5efad-5566-4a8d-85d8-c897f04fcb46-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "a4c5efad-5566-4a8d-85d8-c897f04fcb46" (UID: "a4c5efad-5566-4a8d-85d8-c897f04fcb46"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:56:04 crc kubenswrapper[4881]: I1211 08:56:04.450926 4881 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a4c5efad-5566-4a8d-85d8-c897f04fcb46-ssh-key\") on node \"crc\" DevicePath \"\"" Dec 11 08:56:04 crc kubenswrapper[4881]: I1211 08:56:04.451226 4881 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/a4c5efad-5566-4a8d-85d8-c897f04fcb46-openstack-edpm-ipam-neutron-metadata-default-certs-0\") on node \"crc\" DevicePath \"\"" Dec 11 08:56:04 crc kubenswrapper[4881]: I1211 08:56:04.451366 4881 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-telemetry-power-monitoring-default-certs-0\" (UniqueName: \"kubernetes.io/projected/a4c5efad-5566-4a8d-85d8-c897f04fcb46-openstack-edpm-ipam-telemetry-power-monitoring-default-certs-0\") on node \"crc\" DevicePath \"\"" Dec 11 08:56:04 crc kubenswrapper[4881]: I1211 08:56:04.451470 4881 reconciler_common.go:293] "Volume detached for volume \"telemetry-power-monitoring-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a4c5efad-5566-4a8d-85d8-c897f04fcb46-telemetry-power-monitoring-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 11 08:56:04 crc kubenswrapper[4881]: I1211 08:56:04.451579 4881 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/a4c5efad-5566-4a8d-85d8-c897f04fcb46-openstack-edpm-ipam-ovn-default-certs-0\") on node \"crc\" DevicePath \"\"" Dec 11 08:56:04 crc kubenswrapper[4881]: I1211 08:56:04.451684 4881 reconciler_common.go:293] "Volume detached for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a4c5efad-5566-4a8d-85d8-c897f04fcb46-bootstrap-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 11 08:56:04 crc kubenswrapper[4881]: I1211 08:56:04.451785 4881 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/a4c5efad-5566-4a8d-85d8-c897f04fcb46-openstack-edpm-ipam-telemetry-default-certs-0\") on node \"crc\" DevicePath \"\"" Dec 11 08:56:04 crc kubenswrapper[4881]: I1211 08:56:04.451888 4881 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a4c5efad-5566-4a8d-85d8-c897f04fcb46-inventory\") on node \"crc\" DevicePath \"\"" Dec 11 08:56:04 crc kubenswrapper[4881]: I1211 08:56:04.451978 4881 reconciler_common.go:293] "Volume detached for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a4c5efad-5566-4a8d-85d8-c897f04fcb46-telemetry-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 11 08:56:04 crc kubenswrapper[4881]: I1211 08:56:04.452079 4881 reconciler_common.go:293] "Volume detached for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a4c5efad-5566-4a8d-85d8-c897f04fcb46-repo-setup-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 11 08:56:04 crc kubenswrapper[4881]: I1211 08:56:04.452171 4881 reconciler_common.go:293] "Volume detached for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a4c5efad-5566-4a8d-85d8-c897f04fcb46-nova-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 11 08:56:04 crc kubenswrapper[4881]: I1211 08:56:04.452274 4881 reconciler_common.go:293] "Volume detached for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a4c5efad-5566-4a8d-85d8-c897f04fcb46-ovn-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 11 08:56:04 crc kubenswrapper[4881]: I1211 08:56:04.452384 4881 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/a4c5efad-5566-4a8d-85d8-c897f04fcb46-openstack-edpm-ipam-libvirt-default-certs-0\") on node \"crc\" DevicePath \"\"" Dec 11 08:56:04 crc kubenswrapper[4881]: I1211 08:56:04.452471 4881 reconciler_common.go:293] "Volume detached for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a4c5efad-5566-4a8d-85d8-c897f04fcb46-libvirt-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 11 08:56:04 crc kubenswrapper[4881]: I1211 08:56:04.452548 4881 reconciler_common.go:293] "Volume detached for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a4c5efad-5566-4a8d-85d8-c897f04fcb46-neutron-metadata-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 11 08:56:04 crc kubenswrapper[4881]: I1211 08:56:04.452621 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5wfrt\" (UniqueName: \"kubernetes.io/projected/a4c5efad-5566-4a8d-85d8-c897f04fcb46-kube-api-access-5wfrt\") on node \"crc\" DevicePath \"\"" Dec 11 08:56:04 crc kubenswrapper[4881]: I1211 08:56:04.804177 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-pvld9" event={"ID":"a4c5efad-5566-4a8d-85d8-c897f04fcb46","Type":"ContainerDied","Data":"e662ccf9f01d01f0712d2dd80be51fdd4aee3073d50af4b26e31f32063946eb8"} Dec 11 08:56:04 crc kubenswrapper[4881]: I1211 08:56:04.804508 4881 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e662ccf9f01d01f0712d2dd80be51fdd4aee3073d50af4b26e31f32063946eb8" Dec 11 08:56:04 crc kubenswrapper[4881]: I1211 08:56:04.804390 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-pvld9" Dec 11 08:56:04 crc kubenswrapper[4881]: I1211 08:56:04.916105 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-edpm-deployment-openstack-edpm-ipam-tvdms"] Dec 11 08:56:04 crc kubenswrapper[4881]: E1211 08:56:04.916792 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a4c5efad-5566-4a8d-85d8-c897f04fcb46" containerName="install-certs-edpm-deployment-openstack-edpm-ipam" Dec 11 08:56:04 crc kubenswrapper[4881]: I1211 08:56:04.916821 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="a4c5efad-5566-4a8d-85d8-c897f04fcb46" containerName="install-certs-edpm-deployment-openstack-edpm-ipam" Dec 11 08:56:04 crc kubenswrapper[4881]: I1211 08:56:04.917129 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="a4c5efad-5566-4a8d-85d8-c897f04fcb46" containerName="install-certs-edpm-deployment-openstack-edpm-ipam" Dec 11 08:56:04 crc kubenswrapper[4881]: I1211 08:56:04.918278 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-tvdms" Dec 11 08:56:04 crc kubenswrapper[4881]: I1211 08:56:04.924692 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Dec 11 08:56:04 crc kubenswrapper[4881]: I1211 08:56:04.924932 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-config" Dec 11 08:56:04 crc kubenswrapper[4881]: I1211 08:56:04.924940 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Dec 11 08:56:04 crc kubenswrapper[4881]: I1211 08:56:04.925000 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Dec 11 08:56:04 crc kubenswrapper[4881]: I1211 08:56:04.925277 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-bnm72" Dec 11 08:56:04 crc kubenswrapper[4881]: I1211 08:56:04.933105 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-edpm-deployment-openstack-edpm-ipam-tvdms"] Dec 11 08:56:05 crc kubenswrapper[4881]: I1211 08:56:05.065276 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/9b70ae00-542c-47d9-b985-5fc2433218a5-ovncontroller-config-0\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-tvdms\" (UID: \"9b70ae00-542c-47d9-b985-5fc2433218a5\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-tvdms" Dec 11 08:56:05 crc kubenswrapper[4881]: I1211 08:56:05.065442 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/9b70ae00-542c-47d9-b985-5fc2433218a5-inventory\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-tvdms\" (UID: \"9b70ae00-542c-47d9-b985-5fc2433218a5\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-tvdms" Dec 11 08:56:05 crc kubenswrapper[4881]: I1211 08:56:05.065485 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/9b70ae00-542c-47d9-b985-5fc2433218a5-ssh-key\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-tvdms\" (UID: \"9b70ae00-542c-47d9-b985-5fc2433218a5\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-tvdms" Dec 11 08:56:05 crc kubenswrapper[4881]: I1211 08:56:05.065573 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9b70ae00-542c-47d9-b985-5fc2433218a5-ovn-combined-ca-bundle\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-tvdms\" (UID: \"9b70ae00-542c-47d9-b985-5fc2433218a5\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-tvdms" Dec 11 08:56:05 crc kubenswrapper[4881]: I1211 08:56:05.065828 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-542wk\" (UniqueName: \"kubernetes.io/projected/9b70ae00-542c-47d9-b985-5fc2433218a5-kube-api-access-542wk\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-tvdms\" (UID: \"9b70ae00-542c-47d9-b985-5fc2433218a5\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-tvdms" Dec 11 08:56:05 crc kubenswrapper[4881]: I1211 08:56:05.168321 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/9b70ae00-542c-47d9-b985-5fc2433218a5-ovncontroller-config-0\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-tvdms\" (UID: \"9b70ae00-542c-47d9-b985-5fc2433218a5\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-tvdms" Dec 11 08:56:05 crc kubenswrapper[4881]: I1211 08:56:05.168413 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/9b70ae00-542c-47d9-b985-5fc2433218a5-inventory\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-tvdms\" (UID: \"9b70ae00-542c-47d9-b985-5fc2433218a5\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-tvdms" Dec 11 08:56:05 crc kubenswrapper[4881]: I1211 08:56:05.168445 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/9b70ae00-542c-47d9-b985-5fc2433218a5-ssh-key\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-tvdms\" (UID: \"9b70ae00-542c-47d9-b985-5fc2433218a5\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-tvdms" Dec 11 08:56:05 crc kubenswrapper[4881]: I1211 08:56:05.168476 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9b70ae00-542c-47d9-b985-5fc2433218a5-ovn-combined-ca-bundle\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-tvdms\" (UID: \"9b70ae00-542c-47d9-b985-5fc2433218a5\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-tvdms" Dec 11 08:56:05 crc kubenswrapper[4881]: I1211 08:56:05.168578 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-542wk\" (UniqueName: \"kubernetes.io/projected/9b70ae00-542c-47d9-b985-5fc2433218a5-kube-api-access-542wk\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-tvdms\" (UID: \"9b70ae00-542c-47d9-b985-5fc2433218a5\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-tvdms" Dec 11 08:56:05 crc kubenswrapper[4881]: I1211 08:56:05.169756 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/9b70ae00-542c-47d9-b985-5fc2433218a5-ovncontroller-config-0\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-tvdms\" (UID: \"9b70ae00-542c-47d9-b985-5fc2433218a5\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-tvdms" Dec 11 08:56:05 crc kubenswrapper[4881]: I1211 08:56:05.172864 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9b70ae00-542c-47d9-b985-5fc2433218a5-ovn-combined-ca-bundle\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-tvdms\" (UID: \"9b70ae00-542c-47d9-b985-5fc2433218a5\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-tvdms" Dec 11 08:56:05 crc kubenswrapper[4881]: I1211 08:56:05.173036 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/9b70ae00-542c-47d9-b985-5fc2433218a5-ssh-key\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-tvdms\" (UID: \"9b70ae00-542c-47d9-b985-5fc2433218a5\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-tvdms" Dec 11 08:56:05 crc kubenswrapper[4881]: I1211 08:56:05.173242 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/9b70ae00-542c-47d9-b985-5fc2433218a5-inventory\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-tvdms\" (UID: \"9b70ae00-542c-47d9-b985-5fc2433218a5\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-tvdms" Dec 11 08:56:05 crc kubenswrapper[4881]: I1211 08:56:05.191158 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-542wk\" (UniqueName: \"kubernetes.io/projected/9b70ae00-542c-47d9-b985-5fc2433218a5-kube-api-access-542wk\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-tvdms\" (UID: \"9b70ae00-542c-47d9-b985-5fc2433218a5\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-tvdms" Dec 11 08:56:05 crc kubenswrapper[4881]: I1211 08:56:05.240606 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-tvdms" Dec 11 08:56:05 crc kubenswrapper[4881]: I1211 08:56:05.858875 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-edpm-deployment-openstack-edpm-ipam-tvdms"] Dec 11 08:56:06 crc kubenswrapper[4881]: I1211 08:56:06.839873 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-tvdms" event={"ID":"9b70ae00-542c-47d9-b985-5fc2433218a5","Type":"ContainerStarted","Data":"68ad1fd717024bbba417933b5e84581a82f574ae04aab6329f1894eed7ba8b2c"} Dec 11 08:56:07 crc kubenswrapper[4881]: I1211 08:56:07.865251 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-tvdms" event={"ID":"9b70ae00-542c-47d9-b985-5fc2433218a5","Type":"ContainerStarted","Data":"eac344494217a8589ef02a6deb10b61fbcc50a3aa9814e11a69342e5a018c310"} Dec 11 08:56:07 crc kubenswrapper[4881]: I1211 08:56:07.884359 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-tvdms" podStartSLOduration=3.155909298 podStartE2EDuration="3.884314965s" podCreationTimestamp="2025-12-11 08:56:04 +0000 UTC" firstStartedPulling="2025-12-11 08:56:05.867911296 +0000 UTC m=+2414.245280023" lastFinishedPulling="2025-12-11 08:56:06.596316993 +0000 UTC m=+2414.973685690" observedRunningTime="2025-12-11 08:56:07.879739641 +0000 UTC m=+2416.257108358" watchObservedRunningTime="2025-12-11 08:56:07.884314965 +0000 UTC m=+2416.261683672" Dec 11 08:56:09 crc kubenswrapper[4881]: I1211 08:56:09.978080 4881 scope.go:117] "RemoveContainer" containerID="d11af08761a00ae5f85d754efd0273b40889750d8d3d535fadc9b6f1094dc844" Dec 11 08:56:10 crc kubenswrapper[4881]: I1211 08:56:10.005305 4881 scope.go:117] "RemoveContainer" containerID="2c1b273a42ceaf987004a76e656f9e43f9531662645d97e9202713754e0053d2" Dec 11 08:56:10 crc kubenswrapper[4881]: E1211 08:56:10.005600 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 08:56:24 crc kubenswrapper[4881]: I1211 08:56:24.007567 4881 scope.go:117] "RemoveContainer" containerID="2c1b273a42ceaf987004a76e656f9e43f9531662645d97e9202713754e0053d2" Dec 11 08:56:24 crc kubenswrapper[4881]: E1211 08:56:24.011445 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 08:56:36 crc kubenswrapper[4881]: I1211 08:56:36.005452 4881 scope.go:117] "RemoveContainer" containerID="2c1b273a42ceaf987004a76e656f9e43f9531662645d97e9202713754e0053d2" Dec 11 08:56:36 crc kubenswrapper[4881]: E1211 08:56:36.006349 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 08:56:50 crc kubenswrapper[4881]: I1211 08:56:50.005679 4881 scope.go:117] "RemoveContainer" containerID="2c1b273a42ceaf987004a76e656f9e43f9531662645d97e9202713754e0053d2" Dec 11 08:56:50 crc kubenswrapper[4881]: E1211 08:56:50.006551 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 08:57:05 crc kubenswrapper[4881]: I1211 08:57:05.006430 4881 scope.go:117] "RemoveContainer" containerID="2c1b273a42ceaf987004a76e656f9e43f9531662645d97e9202713754e0053d2" Dec 11 08:57:05 crc kubenswrapper[4881]: E1211 08:57:05.007380 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 08:57:11 crc kubenswrapper[4881]: I1211 08:57:11.575477 4881 generic.go:334] "Generic (PLEG): container finished" podID="9b70ae00-542c-47d9-b985-5fc2433218a5" containerID="eac344494217a8589ef02a6deb10b61fbcc50a3aa9814e11a69342e5a018c310" exitCode=0 Dec 11 08:57:11 crc kubenswrapper[4881]: I1211 08:57:11.575576 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-tvdms" event={"ID":"9b70ae00-542c-47d9-b985-5fc2433218a5","Type":"ContainerDied","Data":"eac344494217a8589ef02a6deb10b61fbcc50a3aa9814e11a69342e5a018c310"} Dec 11 08:57:13 crc kubenswrapper[4881]: I1211 08:57:13.044233 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-tvdms" Dec 11 08:57:13 crc kubenswrapper[4881]: I1211 08:57:13.182904 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/9b70ae00-542c-47d9-b985-5fc2433218a5-inventory\") pod \"9b70ae00-542c-47d9-b985-5fc2433218a5\" (UID: \"9b70ae00-542c-47d9-b985-5fc2433218a5\") " Dec 11 08:57:13 crc kubenswrapper[4881]: I1211 08:57:13.183055 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/9b70ae00-542c-47d9-b985-5fc2433218a5-ssh-key\") pod \"9b70ae00-542c-47d9-b985-5fc2433218a5\" (UID: \"9b70ae00-542c-47d9-b985-5fc2433218a5\") " Dec 11 08:57:13 crc kubenswrapper[4881]: I1211 08:57:13.183185 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9b70ae00-542c-47d9-b985-5fc2433218a5-ovn-combined-ca-bundle\") pod \"9b70ae00-542c-47d9-b985-5fc2433218a5\" (UID: \"9b70ae00-542c-47d9-b985-5fc2433218a5\") " Dec 11 08:57:13 crc kubenswrapper[4881]: I1211 08:57:13.183308 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-542wk\" (UniqueName: \"kubernetes.io/projected/9b70ae00-542c-47d9-b985-5fc2433218a5-kube-api-access-542wk\") pod \"9b70ae00-542c-47d9-b985-5fc2433218a5\" (UID: \"9b70ae00-542c-47d9-b985-5fc2433218a5\") " Dec 11 08:57:13 crc kubenswrapper[4881]: I1211 08:57:13.183460 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/9b70ae00-542c-47d9-b985-5fc2433218a5-ovncontroller-config-0\") pod \"9b70ae00-542c-47d9-b985-5fc2433218a5\" (UID: \"9b70ae00-542c-47d9-b985-5fc2433218a5\") " Dec 11 08:57:13 crc kubenswrapper[4881]: I1211 08:57:13.189131 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9b70ae00-542c-47d9-b985-5fc2433218a5-kube-api-access-542wk" (OuterVolumeSpecName: "kube-api-access-542wk") pod "9b70ae00-542c-47d9-b985-5fc2433218a5" (UID: "9b70ae00-542c-47d9-b985-5fc2433218a5"). InnerVolumeSpecName "kube-api-access-542wk". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:57:13 crc kubenswrapper[4881]: I1211 08:57:13.189575 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9b70ae00-542c-47d9-b985-5fc2433218a5-ovn-combined-ca-bundle" (OuterVolumeSpecName: "ovn-combined-ca-bundle") pod "9b70ae00-542c-47d9-b985-5fc2433218a5" (UID: "9b70ae00-542c-47d9-b985-5fc2433218a5"). InnerVolumeSpecName "ovn-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:57:13 crc kubenswrapper[4881]: I1211 08:57:13.213306 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9b70ae00-542c-47d9-b985-5fc2433218a5-ovncontroller-config-0" (OuterVolumeSpecName: "ovncontroller-config-0") pod "9b70ae00-542c-47d9-b985-5fc2433218a5" (UID: "9b70ae00-542c-47d9-b985-5fc2433218a5"). InnerVolumeSpecName "ovncontroller-config-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 08:57:13 crc kubenswrapper[4881]: I1211 08:57:13.220380 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9b70ae00-542c-47d9-b985-5fc2433218a5-inventory" (OuterVolumeSpecName: "inventory") pod "9b70ae00-542c-47d9-b985-5fc2433218a5" (UID: "9b70ae00-542c-47d9-b985-5fc2433218a5"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:57:13 crc kubenswrapper[4881]: I1211 08:57:13.225714 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9b70ae00-542c-47d9-b985-5fc2433218a5-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "9b70ae00-542c-47d9-b985-5fc2433218a5" (UID: "9b70ae00-542c-47d9-b985-5fc2433218a5"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:57:13 crc kubenswrapper[4881]: I1211 08:57:13.287615 4881 reconciler_common.go:293] "Volume detached for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/9b70ae00-542c-47d9-b985-5fc2433218a5-ovncontroller-config-0\") on node \"crc\" DevicePath \"\"" Dec 11 08:57:13 crc kubenswrapper[4881]: I1211 08:57:13.287653 4881 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/9b70ae00-542c-47d9-b985-5fc2433218a5-inventory\") on node \"crc\" DevicePath \"\"" Dec 11 08:57:13 crc kubenswrapper[4881]: I1211 08:57:13.287662 4881 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/9b70ae00-542c-47d9-b985-5fc2433218a5-ssh-key\") on node \"crc\" DevicePath \"\"" Dec 11 08:57:13 crc kubenswrapper[4881]: I1211 08:57:13.287672 4881 reconciler_common.go:293] "Volume detached for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9b70ae00-542c-47d9-b985-5fc2433218a5-ovn-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 11 08:57:13 crc kubenswrapper[4881]: I1211 08:57:13.287683 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-542wk\" (UniqueName: \"kubernetes.io/projected/9b70ae00-542c-47d9-b985-5fc2433218a5-kube-api-access-542wk\") on node \"crc\" DevicePath \"\"" Dec 11 08:57:13 crc kubenswrapper[4881]: I1211 08:57:13.598138 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-tvdms" event={"ID":"9b70ae00-542c-47d9-b985-5fc2433218a5","Type":"ContainerDied","Data":"68ad1fd717024bbba417933b5e84581a82f574ae04aab6329f1894eed7ba8b2c"} Dec 11 08:57:13 crc kubenswrapper[4881]: I1211 08:57:13.598177 4881 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="68ad1fd717024bbba417933b5e84581a82f574ae04aab6329f1894eed7ba8b2c" Dec 11 08:57:13 crc kubenswrapper[4881]: I1211 08:57:13.598176 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-tvdms" Dec 11 08:57:13 crc kubenswrapper[4881]: I1211 08:57:13.706553 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-tkvg4"] Dec 11 08:57:13 crc kubenswrapper[4881]: E1211 08:57:13.707217 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9b70ae00-542c-47d9-b985-5fc2433218a5" containerName="ovn-edpm-deployment-openstack-edpm-ipam" Dec 11 08:57:13 crc kubenswrapper[4881]: I1211 08:57:13.707239 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="9b70ae00-542c-47d9-b985-5fc2433218a5" containerName="ovn-edpm-deployment-openstack-edpm-ipam" Dec 11 08:57:13 crc kubenswrapper[4881]: I1211 08:57:13.707544 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="9b70ae00-542c-47d9-b985-5fc2433218a5" containerName="ovn-edpm-deployment-openstack-edpm-ipam" Dec 11 08:57:13 crc kubenswrapper[4881]: I1211 08:57:13.708675 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-tkvg4" Dec 11 08:57:13 crc kubenswrapper[4881]: I1211 08:57:13.712230 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-bnm72" Dec 11 08:57:13 crc kubenswrapper[4881]: I1211 08:57:13.712438 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-ovn-metadata-agent-neutron-config" Dec 11 08:57:13 crc kubenswrapper[4881]: I1211 08:57:13.712549 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-neutron-config" Dec 11 08:57:13 crc kubenswrapper[4881]: I1211 08:57:13.712723 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Dec 11 08:57:13 crc kubenswrapper[4881]: I1211 08:57:13.717860 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Dec 11 08:57:13 crc kubenswrapper[4881]: I1211 08:57:13.718090 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Dec 11 08:57:13 crc kubenswrapper[4881]: I1211 08:57:13.725154 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-tkvg4"] Dec 11 08:57:13 crc kubenswrapper[4881]: I1211 08:57:13.900969 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/0a91cc42-4d2c-4527-81b9-7bfe0432f4f4-nova-metadata-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-tkvg4\" (UID: \"0a91cc42-4d2c-4527-81b9-7bfe0432f4f4\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-tkvg4" Dec 11 08:57:13 crc kubenswrapper[4881]: I1211 08:57:13.901309 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/0a91cc42-4d2c-4527-81b9-7bfe0432f4f4-ssh-key\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-tkvg4\" (UID: \"0a91cc42-4d2c-4527-81b9-7bfe0432f4f4\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-tkvg4" Dec 11 08:57:13 crc kubenswrapper[4881]: I1211 08:57:13.901433 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8d4dz\" (UniqueName: \"kubernetes.io/projected/0a91cc42-4d2c-4527-81b9-7bfe0432f4f4-kube-api-access-8d4dz\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-tkvg4\" (UID: \"0a91cc42-4d2c-4527-81b9-7bfe0432f4f4\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-tkvg4" Dec 11 08:57:13 crc kubenswrapper[4881]: I1211 08:57:13.901591 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0a91cc42-4d2c-4527-81b9-7bfe0432f4f4-neutron-metadata-combined-ca-bundle\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-tkvg4\" (UID: \"0a91cc42-4d2c-4527-81b9-7bfe0432f4f4\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-tkvg4" Dec 11 08:57:13 crc kubenswrapper[4881]: I1211 08:57:13.901713 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/0a91cc42-4d2c-4527-81b9-7bfe0432f4f4-inventory\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-tkvg4\" (UID: \"0a91cc42-4d2c-4527-81b9-7bfe0432f4f4\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-tkvg4" Dec 11 08:57:13 crc kubenswrapper[4881]: I1211 08:57:13.901755 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/0a91cc42-4d2c-4527-81b9-7bfe0432f4f4-neutron-ovn-metadata-agent-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-tkvg4\" (UID: \"0a91cc42-4d2c-4527-81b9-7bfe0432f4f4\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-tkvg4" Dec 11 08:57:14 crc kubenswrapper[4881]: I1211 08:57:14.003457 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0a91cc42-4d2c-4527-81b9-7bfe0432f4f4-neutron-metadata-combined-ca-bundle\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-tkvg4\" (UID: \"0a91cc42-4d2c-4527-81b9-7bfe0432f4f4\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-tkvg4" Dec 11 08:57:14 crc kubenswrapper[4881]: I1211 08:57:14.003555 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/0a91cc42-4d2c-4527-81b9-7bfe0432f4f4-inventory\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-tkvg4\" (UID: \"0a91cc42-4d2c-4527-81b9-7bfe0432f4f4\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-tkvg4" Dec 11 08:57:14 crc kubenswrapper[4881]: I1211 08:57:14.003589 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/0a91cc42-4d2c-4527-81b9-7bfe0432f4f4-neutron-ovn-metadata-agent-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-tkvg4\" (UID: \"0a91cc42-4d2c-4527-81b9-7bfe0432f4f4\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-tkvg4" Dec 11 08:57:14 crc kubenswrapper[4881]: I1211 08:57:14.003638 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/0a91cc42-4d2c-4527-81b9-7bfe0432f4f4-nova-metadata-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-tkvg4\" (UID: \"0a91cc42-4d2c-4527-81b9-7bfe0432f4f4\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-tkvg4" Dec 11 08:57:14 crc kubenswrapper[4881]: I1211 08:57:14.004204 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/0a91cc42-4d2c-4527-81b9-7bfe0432f4f4-ssh-key\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-tkvg4\" (UID: \"0a91cc42-4d2c-4527-81b9-7bfe0432f4f4\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-tkvg4" Dec 11 08:57:14 crc kubenswrapper[4881]: I1211 08:57:14.004271 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8d4dz\" (UniqueName: \"kubernetes.io/projected/0a91cc42-4d2c-4527-81b9-7bfe0432f4f4-kube-api-access-8d4dz\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-tkvg4\" (UID: \"0a91cc42-4d2c-4527-81b9-7bfe0432f4f4\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-tkvg4" Dec 11 08:57:14 crc kubenswrapper[4881]: I1211 08:57:14.007018 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/0a91cc42-4d2c-4527-81b9-7bfe0432f4f4-nova-metadata-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-tkvg4\" (UID: \"0a91cc42-4d2c-4527-81b9-7bfe0432f4f4\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-tkvg4" Dec 11 08:57:14 crc kubenswrapper[4881]: I1211 08:57:14.008908 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/0a91cc42-4d2c-4527-81b9-7bfe0432f4f4-inventory\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-tkvg4\" (UID: \"0a91cc42-4d2c-4527-81b9-7bfe0432f4f4\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-tkvg4" Dec 11 08:57:14 crc kubenswrapper[4881]: I1211 08:57:14.009256 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/0a91cc42-4d2c-4527-81b9-7bfe0432f4f4-ssh-key\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-tkvg4\" (UID: \"0a91cc42-4d2c-4527-81b9-7bfe0432f4f4\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-tkvg4" Dec 11 08:57:14 crc kubenswrapper[4881]: I1211 08:57:14.009300 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0a91cc42-4d2c-4527-81b9-7bfe0432f4f4-neutron-metadata-combined-ca-bundle\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-tkvg4\" (UID: \"0a91cc42-4d2c-4527-81b9-7bfe0432f4f4\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-tkvg4" Dec 11 08:57:14 crc kubenswrapper[4881]: I1211 08:57:14.015980 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/0a91cc42-4d2c-4527-81b9-7bfe0432f4f4-neutron-ovn-metadata-agent-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-tkvg4\" (UID: \"0a91cc42-4d2c-4527-81b9-7bfe0432f4f4\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-tkvg4" Dec 11 08:57:14 crc kubenswrapper[4881]: I1211 08:57:14.022072 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8d4dz\" (UniqueName: \"kubernetes.io/projected/0a91cc42-4d2c-4527-81b9-7bfe0432f4f4-kube-api-access-8d4dz\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-tkvg4\" (UID: \"0a91cc42-4d2c-4527-81b9-7bfe0432f4f4\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-tkvg4" Dec 11 08:57:14 crc kubenswrapper[4881]: I1211 08:57:14.043358 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-tkvg4" Dec 11 08:57:14 crc kubenswrapper[4881]: I1211 08:57:14.636463 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-tkvg4"] Dec 11 08:57:15 crc kubenswrapper[4881]: I1211 08:57:15.621187 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-tkvg4" event={"ID":"0a91cc42-4d2c-4527-81b9-7bfe0432f4f4","Type":"ContainerStarted","Data":"4b84fa308c07fa233ba8fba77071988c9c82d223814ab4e994750418590eea4f"} Dec 11 08:57:15 crc kubenswrapper[4881]: I1211 08:57:15.621748 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-tkvg4" event={"ID":"0a91cc42-4d2c-4527-81b9-7bfe0432f4f4","Type":"ContainerStarted","Data":"5bcf7efec7884ee40d184a8ae1f87e3a6b2ae14e444c2117da06bc8955054d26"} Dec 11 08:57:15 crc kubenswrapper[4881]: I1211 08:57:15.643775 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-tkvg4" podStartSLOduration=2.017817413 podStartE2EDuration="2.643752974s" podCreationTimestamp="2025-12-11 08:57:13 +0000 UTC" firstStartedPulling="2025-12-11 08:57:14.648007447 +0000 UTC m=+2483.025376144" lastFinishedPulling="2025-12-11 08:57:15.273943008 +0000 UTC m=+2483.651311705" observedRunningTime="2025-12-11 08:57:15.641435428 +0000 UTC m=+2484.018804125" watchObservedRunningTime="2025-12-11 08:57:15.643752974 +0000 UTC m=+2484.021121671" Dec 11 08:57:18 crc kubenswrapper[4881]: I1211 08:57:18.005654 4881 scope.go:117] "RemoveContainer" containerID="2c1b273a42ceaf987004a76e656f9e43f9531662645d97e9202713754e0053d2" Dec 11 08:57:18 crc kubenswrapper[4881]: E1211 08:57:18.007082 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 08:57:29 crc kubenswrapper[4881]: I1211 08:57:29.005674 4881 scope.go:117] "RemoveContainer" containerID="2c1b273a42ceaf987004a76e656f9e43f9531662645d97e9202713754e0053d2" Dec 11 08:57:29 crc kubenswrapper[4881]: E1211 08:57:29.006538 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 08:57:44 crc kubenswrapper[4881]: I1211 08:57:44.007647 4881 scope.go:117] "RemoveContainer" containerID="2c1b273a42ceaf987004a76e656f9e43f9531662645d97e9202713754e0053d2" Dec 11 08:57:44 crc kubenswrapper[4881]: E1211 08:57:44.008552 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 08:57:55 crc kubenswrapper[4881]: I1211 08:57:55.005327 4881 scope.go:117] "RemoveContainer" containerID="2c1b273a42ceaf987004a76e656f9e43f9531662645d97e9202713754e0053d2" Dec 11 08:57:55 crc kubenswrapper[4881]: E1211 08:57:55.006138 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 08:58:04 crc kubenswrapper[4881]: I1211 08:58:04.164215 4881 generic.go:334] "Generic (PLEG): container finished" podID="0a91cc42-4d2c-4527-81b9-7bfe0432f4f4" containerID="4b84fa308c07fa233ba8fba77071988c9c82d223814ab4e994750418590eea4f" exitCode=0 Dec 11 08:58:04 crc kubenswrapper[4881]: I1211 08:58:04.164317 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-tkvg4" event={"ID":"0a91cc42-4d2c-4527-81b9-7bfe0432f4f4","Type":"ContainerDied","Data":"4b84fa308c07fa233ba8fba77071988c9c82d223814ab4e994750418590eea4f"} Dec 11 08:58:05 crc kubenswrapper[4881]: I1211 08:58:05.612509 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-tkvg4" Dec 11 08:58:05 crc kubenswrapper[4881]: I1211 08:58:05.744850 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/0a91cc42-4d2c-4527-81b9-7bfe0432f4f4-neutron-ovn-metadata-agent-neutron-config-0\") pod \"0a91cc42-4d2c-4527-81b9-7bfe0432f4f4\" (UID: \"0a91cc42-4d2c-4527-81b9-7bfe0432f4f4\") " Dec 11 08:58:05 crc kubenswrapper[4881]: I1211 08:58:05.744966 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/0a91cc42-4d2c-4527-81b9-7bfe0432f4f4-inventory\") pod \"0a91cc42-4d2c-4527-81b9-7bfe0432f4f4\" (UID: \"0a91cc42-4d2c-4527-81b9-7bfe0432f4f4\") " Dec 11 08:58:05 crc kubenswrapper[4881]: I1211 08:58:05.745010 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0a91cc42-4d2c-4527-81b9-7bfe0432f4f4-neutron-metadata-combined-ca-bundle\") pod \"0a91cc42-4d2c-4527-81b9-7bfe0432f4f4\" (UID: \"0a91cc42-4d2c-4527-81b9-7bfe0432f4f4\") " Dec 11 08:58:05 crc kubenswrapper[4881]: I1211 08:58:05.745153 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/0a91cc42-4d2c-4527-81b9-7bfe0432f4f4-nova-metadata-neutron-config-0\") pod \"0a91cc42-4d2c-4527-81b9-7bfe0432f4f4\" (UID: \"0a91cc42-4d2c-4527-81b9-7bfe0432f4f4\") " Dec 11 08:58:05 crc kubenswrapper[4881]: I1211 08:58:05.745173 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/0a91cc42-4d2c-4527-81b9-7bfe0432f4f4-ssh-key\") pod \"0a91cc42-4d2c-4527-81b9-7bfe0432f4f4\" (UID: \"0a91cc42-4d2c-4527-81b9-7bfe0432f4f4\") " Dec 11 08:58:05 crc kubenswrapper[4881]: I1211 08:58:05.745312 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8d4dz\" (UniqueName: \"kubernetes.io/projected/0a91cc42-4d2c-4527-81b9-7bfe0432f4f4-kube-api-access-8d4dz\") pod \"0a91cc42-4d2c-4527-81b9-7bfe0432f4f4\" (UID: \"0a91cc42-4d2c-4527-81b9-7bfe0432f4f4\") " Dec 11 08:58:05 crc kubenswrapper[4881]: I1211 08:58:05.760494 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0a91cc42-4d2c-4527-81b9-7bfe0432f4f4-neutron-metadata-combined-ca-bundle" (OuterVolumeSpecName: "neutron-metadata-combined-ca-bundle") pod "0a91cc42-4d2c-4527-81b9-7bfe0432f4f4" (UID: "0a91cc42-4d2c-4527-81b9-7bfe0432f4f4"). InnerVolumeSpecName "neutron-metadata-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:58:05 crc kubenswrapper[4881]: I1211 08:58:05.760653 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0a91cc42-4d2c-4527-81b9-7bfe0432f4f4-kube-api-access-8d4dz" (OuterVolumeSpecName: "kube-api-access-8d4dz") pod "0a91cc42-4d2c-4527-81b9-7bfe0432f4f4" (UID: "0a91cc42-4d2c-4527-81b9-7bfe0432f4f4"). InnerVolumeSpecName "kube-api-access-8d4dz". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:58:05 crc kubenswrapper[4881]: I1211 08:58:05.776747 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0a91cc42-4d2c-4527-81b9-7bfe0432f4f4-nova-metadata-neutron-config-0" (OuterVolumeSpecName: "nova-metadata-neutron-config-0") pod "0a91cc42-4d2c-4527-81b9-7bfe0432f4f4" (UID: "0a91cc42-4d2c-4527-81b9-7bfe0432f4f4"). InnerVolumeSpecName "nova-metadata-neutron-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:58:05 crc kubenswrapper[4881]: I1211 08:58:05.776922 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0a91cc42-4d2c-4527-81b9-7bfe0432f4f4-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "0a91cc42-4d2c-4527-81b9-7bfe0432f4f4" (UID: "0a91cc42-4d2c-4527-81b9-7bfe0432f4f4"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:58:05 crc kubenswrapper[4881]: I1211 08:58:05.779793 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0a91cc42-4d2c-4527-81b9-7bfe0432f4f4-neutron-ovn-metadata-agent-neutron-config-0" (OuterVolumeSpecName: "neutron-ovn-metadata-agent-neutron-config-0") pod "0a91cc42-4d2c-4527-81b9-7bfe0432f4f4" (UID: "0a91cc42-4d2c-4527-81b9-7bfe0432f4f4"). InnerVolumeSpecName "neutron-ovn-metadata-agent-neutron-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:58:05 crc kubenswrapper[4881]: I1211 08:58:05.784069 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0a91cc42-4d2c-4527-81b9-7bfe0432f4f4-inventory" (OuterVolumeSpecName: "inventory") pod "0a91cc42-4d2c-4527-81b9-7bfe0432f4f4" (UID: "0a91cc42-4d2c-4527-81b9-7bfe0432f4f4"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 08:58:05 crc kubenswrapper[4881]: I1211 08:58:05.848541 4881 reconciler_common.go:293] "Volume detached for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/0a91cc42-4d2c-4527-81b9-7bfe0432f4f4-neutron-ovn-metadata-agent-neutron-config-0\") on node \"crc\" DevicePath \"\"" Dec 11 08:58:05 crc kubenswrapper[4881]: I1211 08:58:05.848753 4881 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/0a91cc42-4d2c-4527-81b9-7bfe0432f4f4-inventory\") on node \"crc\" DevicePath \"\"" Dec 11 08:58:05 crc kubenswrapper[4881]: I1211 08:58:05.848812 4881 reconciler_common.go:293] "Volume detached for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0a91cc42-4d2c-4527-81b9-7bfe0432f4f4-neutron-metadata-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 11 08:58:05 crc kubenswrapper[4881]: I1211 08:58:05.848868 4881 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/0a91cc42-4d2c-4527-81b9-7bfe0432f4f4-nova-metadata-neutron-config-0\") on node \"crc\" DevicePath \"\"" Dec 11 08:58:05 crc kubenswrapper[4881]: I1211 08:58:05.848926 4881 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/0a91cc42-4d2c-4527-81b9-7bfe0432f4f4-ssh-key\") on node \"crc\" DevicePath \"\"" Dec 11 08:58:05 crc kubenswrapper[4881]: I1211 08:58:05.848981 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8d4dz\" (UniqueName: \"kubernetes.io/projected/0a91cc42-4d2c-4527-81b9-7bfe0432f4f4-kube-api-access-8d4dz\") on node \"crc\" DevicePath \"\"" Dec 11 08:58:06 crc kubenswrapper[4881]: I1211 08:58:06.189171 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-tkvg4" event={"ID":"0a91cc42-4d2c-4527-81b9-7bfe0432f4f4","Type":"ContainerDied","Data":"5bcf7efec7884ee40d184a8ae1f87e3a6b2ae14e444c2117da06bc8955054d26"} Dec 11 08:58:06 crc kubenswrapper[4881]: I1211 08:58:06.189512 4881 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5bcf7efec7884ee40d184a8ae1f87e3a6b2ae14e444c2117da06bc8955054d26" Dec 11 08:58:06 crc kubenswrapper[4881]: I1211 08:58:06.189569 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-tkvg4" Dec 11 08:58:06 crc kubenswrapper[4881]: I1211 08:58:06.357401 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/libvirt-edpm-deployment-openstack-edpm-ipam-xdlgc"] Dec 11 08:58:06 crc kubenswrapper[4881]: E1211 08:58:06.357963 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0a91cc42-4d2c-4527-81b9-7bfe0432f4f4" containerName="neutron-metadata-edpm-deployment-openstack-edpm-ipam" Dec 11 08:58:06 crc kubenswrapper[4881]: I1211 08:58:06.357981 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="0a91cc42-4d2c-4527-81b9-7bfe0432f4f4" containerName="neutron-metadata-edpm-deployment-openstack-edpm-ipam" Dec 11 08:58:06 crc kubenswrapper[4881]: I1211 08:58:06.358255 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="0a91cc42-4d2c-4527-81b9-7bfe0432f4f4" containerName="neutron-metadata-edpm-deployment-openstack-edpm-ipam" Dec 11 08:58:06 crc kubenswrapper[4881]: I1211 08:58:06.359235 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-xdlgc" Dec 11 08:58:06 crc kubenswrapper[4881]: I1211 08:58:06.364809 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Dec 11 08:58:06 crc kubenswrapper[4881]: I1211 08:58:06.365783 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-bnm72" Dec 11 08:58:06 crc kubenswrapper[4881]: I1211 08:58:06.365789 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Dec 11 08:58:06 crc kubenswrapper[4881]: I1211 08:58:06.366184 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"libvirt-secret" Dec 11 08:58:06 crc kubenswrapper[4881]: I1211 08:58:06.366320 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Dec 11 08:58:06 crc kubenswrapper[4881]: I1211 08:58:06.391198 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/libvirt-edpm-deployment-openstack-edpm-ipam-xdlgc"] Dec 11 08:58:06 crc kubenswrapper[4881]: I1211 08:58:06.466276 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1ad81113-10d1-4110-81ad-abd39146b84c-libvirt-combined-ca-bundle\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-xdlgc\" (UID: \"1ad81113-10d1-4110-81ad-abd39146b84c\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-xdlgc" Dec 11 08:58:06 crc kubenswrapper[4881]: I1211 08:58:06.466373 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/1ad81113-10d1-4110-81ad-abd39146b84c-libvirt-secret-0\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-xdlgc\" (UID: \"1ad81113-10d1-4110-81ad-abd39146b84c\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-xdlgc" Dec 11 08:58:06 crc kubenswrapper[4881]: I1211 08:58:06.466771 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gml4c\" (UniqueName: \"kubernetes.io/projected/1ad81113-10d1-4110-81ad-abd39146b84c-kube-api-access-gml4c\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-xdlgc\" (UID: \"1ad81113-10d1-4110-81ad-abd39146b84c\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-xdlgc" Dec 11 08:58:06 crc kubenswrapper[4881]: I1211 08:58:06.466931 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/1ad81113-10d1-4110-81ad-abd39146b84c-ssh-key\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-xdlgc\" (UID: \"1ad81113-10d1-4110-81ad-abd39146b84c\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-xdlgc" Dec 11 08:58:06 crc kubenswrapper[4881]: I1211 08:58:06.467066 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/1ad81113-10d1-4110-81ad-abd39146b84c-inventory\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-xdlgc\" (UID: \"1ad81113-10d1-4110-81ad-abd39146b84c\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-xdlgc" Dec 11 08:58:06 crc kubenswrapper[4881]: I1211 08:58:06.569438 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/1ad81113-10d1-4110-81ad-abd39146b84c-ssh-key\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-xdlgc\" (UID: \"1ad81113-10d1-4110-81ad-abd39146b84c\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-xdlgc" Dec 11 08:58:06 crc kubenswrapper[4881]: I1211 08:58:06.569520 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/1ad81113-10d1-4110-81ad-abd39146b84c-inventory\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-xdlgc\" (UID: \"1ad81113-10d1-4110-81ad-abd39146b84c\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-xdlgc" Dec 11 08:58:06 crc kubenswrapper[4881]: I1211 08:58:06.569595 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1ad81113-10d1-4110-81ad-abd39146b84c-libvirt-combined-ca-bundle\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-xdlgc\" (UID: \"1ad81113-10d1-4110-81ad-abd39146b84c\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-xdlgc" Dec 11 08:58:06 crc kubenswrapper[4881]: I1211 08:58:06.569638 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/1ad81113-10d1-4110-81ad-abd39146b84c-libvirt-secret-0\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-xdlgc\" (UID: \"1ad81113-10d1-4110-81ad-abd39146b84c\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-xdlgc" Dec 11 08:58:06 crc kubenswrapper[4881]: I1211 08:58:06.569741 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gml4c\" (UniqueName: \"kubernetes.io/projected/1ad81113-10d1-4110-81ad-abd39146b84c-kube-api-access-gml4c\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-xdlgc\" (UID: \"1ad81113-10d1-4110-81ad-abd39146b84c\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-xdlgc" Dec 11 08:58:06 crc kubenswrapper[4881]: I1211 08:58:06.573534 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/1ad81113-10d1-4110-81ad-abd39146b84c-ssh-key\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-xdlgc\" (UID: \"1ad81113-10d1-4110-81ad-abd39146b84c\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-xdlgc" Dec 11 08:58:06 crc kubenswrapper[4881]: I1211 08:58:06.573872 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/1ad81113-10d1-4110-81ad-abd39146b84c-libvirt-secret-0\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-xdlgc\" (UID: \"1ad81113-10d1-4110-81ad-abd39146b84c\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-xdlgc" Dec 11 08:58:06 crc kubenswrapper[4881]: I1211 08:58:06.573881 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/1ad81113-10d1-4110-81ad-abd39146b84c-inventory\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-xdlgc\" (UID: \"1ad81113-10d1-4110-81ad-abd39146b84c\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-xdlgc" Dec 11 08:58:06 crc kubenswrapper[4881]: I1211 08:58:06.575132 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1ad81113-10d1-4110-81ad-abd39146b84c-libvirt-combined-ca-bundle\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-xdlgc\" (UID: \"1ad81113-10d1-4110-81ad-abd39146b84c\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-xdlgc" Dec 11 08:58:06 crc kubenswrapper[4881]: I1211 08:58:06.586192 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gml4c\" (UniqueName: \"kubernetes.io/projected/1ad81113-10d1-4110-81ad-abd39146b84c-kube-api-access-gml4c\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-xdlgc\" (UID: \"1ad81113-10d1-4110-81ad-abd39146b84c\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-xdlgc" Dec 11 08:58:06 crc kubenswrapper[4881]: I1211 08:58:06.741892 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-xdlgc" Dec 11 08:58:07 crc kubenswrapper[4881]: I1211 08:58:07.324110 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/libvirt-edpm-deployment-openstack-edpm-ipam-xdlgc"] Dec 11 08:58:08 crc kubenswrapper[4881]: I1211 08:58:08.211833 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-xdlgc" event={"ID":"1ad81113-10d1-4110-81ad-abd39146b84c","Type":"ContainerStarted","Data":"839a09b53d580a3a22e167c904d4d6d50bf1d28f7be53fc21dbf525af82fea0a"} Dec 11 08:58:09 crc kubenswrapper[4881]: I1211 08:58:09.223648 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-xdlgc" event={"ID":"1ad81113-10d1-4110-81ad-abd39146b84c","Type":"ContainerStarted","Data":"189884015e0984650d0da1ddc1b86df1c1e1dbaefdf021ff5c5afcc3d9a5649d"} Dec 11 08:58:09 crc kubenswrapper[4881]: I1211 08:58:09.238609 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-xdlgc" podStartSLOduration=2.197467912 podStartE2EDuration="3.238589089s" podCreationTimestamp="2025-12-11 08:58:06 +0000 UTC" firstStartedPulling="2025-12-11 08:58:07.330985156 +0000 UTC m=+2535.708353853" lastFinishedPulling="2025-12-11 08:58:08.372106323 +0000 UTC m=+2536.749475030" observedRunningTime="2025-12-11 08:58:09.237224775 +0000 UTC m=+2537.614593502" watchObservedRunningTime="2025-12-11 08:58:09.238589089 +0000 UTC m=+2537.615957786" Dec 11 08:58:10 crc kubenswrapper[4881]: I1211 08:58:10.006179 4881 scope.go:117] "RemoveContainer" containerID="2c1b273a42ceaf987004a76e656f9e43f9531662645d97e9202713754e0053d2" Dec 11 08:58:10 crc kubenswrapper[4881]: E1211 08:58:10.006579 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 08:58:23 crc kubenswrapper[4881]: I1211 08:58:23.015080 4881 scope.go:117] "RemoveContainer" containerID="2c1b273a42ceaf987004a76e656f9e43f9531662645d97e9202713754e0053d2" Dec 11 08:58:23 crc kubenswrapper[4881]: E1211 08:58:23.016101 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 08:58:36 crc kubenswrapper[4881]: I1211 08:58:36.008055 4881 scope.go:117] "RemoveContainer" containerID="2c1b273a42ceaf987004a76e656f9e43f9531662645d97e9202713754e0053d2" Dec 11 08:58:36 crc kubenswrapper[4881]: E1211 08:58:36.010164 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 08:58:47 crc kubenswrapper[4881]: I1211 08:58:47.006182 4881 scope.go:117] "RemoveContainer" containerID="2c1b273a42ceaf987004a76e656f9e43f9531662645d97e9202713754e0053d2" Dec 11 08:58:47 crc kubenswrapper[4881]: E1211 08:58:47.007136 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 08:59:00 crc kubenswrapper[4881]: I1211 08:59:00.006189 4881 scope.go:117] "RemoveContainer" containerID="2c1b273a42ceaf987004a76e656f9e43f9531662645d97e9202713754e0053d2" Dec 11 08:59:00 crc kubenswrapper[4881]: E1211 08:59:00.007015 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 08:59:04 crc kubenswrapper[4881]: I1211 08:59:04.642692 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-f68ds"] Dec 11 08:59:04 crc kubenswrapper[4881]: I1211 08:59:04.650864 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-f68ds" Dec 11 08:59:04 crc kubenswrapper[4881]: I1211 08:59:04.681230 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-f68ds"] Dec 11 08:59:04 crc kubenswrapper[4881]: I1211 08:59:04.732915 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-srswc\" (UniqueName: \"kubernetes.io/projected/35509e03-022a-40f0-9268-037ba4ed4848-kube-api-access-srswc\") pod \"community-operators-f68ds\" (UID: \"35509e03-022a-40f0-9268-037ba4ed4848\") " pod="openshift-marketplace/community-operators-f68ds" Dec 11 08:59:04 crc kubenswrapper[4881]: I1211 08:59:04.733180 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/35509e03-022a-40f0-9268-037ba4ed4848-catalog-content\") pod \"community-operators-f68ds\" (UID: \"35509e03-022a-40f0-9268-037ba4ed4848\") " pod="openshift-marketplace/community-operators-f68ds" Dec 11 08:59:04 crc kubenswrapper[4881]: I1211 08:59:04.733366 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/35509e03-022a-40f0-9268-037ba4ed4848-utilities\") pod \"community-operators-f68ds\" (UID: \"35509e03-022a-40f0-9268-037ba4ed4848\") " pod="openshift-marketplace/community-operators-f68ds" Dec 11 08:59:04 crc kubenswrapper[4881]: I1211 08:59:04.835609 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/35509e03-022a-40f0-9268-037ba4ed4848-catalog-content\") pod \"community-operators-f68ds\" (UID: \"35509e03-022a-40f0-9268-037ba4ed4848\") " pod="openshift-marketplace/community-operators-f68ds" Dec 11 08:59:04 crc kubenswrapper[4881]: I1211 08:59:04.835711 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/35509e03-022a-40f0-9268-037ba4ed4848-utilities\") pod \"community-operators-f68ds\" (UID: \"35509e03-022a-40f0-9268-037ba4ed4848\") " pod="openshift-marketplace/community-operators-f68ds" Dec 11 08:59:04 crc kubenswrapper[4881]: I1211 08:59:04.835901 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-srswc\" (UniqueName: \"kubernetes.io/projected/35509e03-022a-40f0-9268-037ba4ed4848-kube-api-access-srswc\") pod \"community-operators-f68ds\" (UID: \"35509e03-022a-40f0-9268-037ba4ed4848\") " pod="openshift-marketplace/community-operators-f68ds" Dec 11 08:59:04 crc kubenswrapper[4881]: I1211 08:59:04.836724 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/35509e03-022a-40f0-9268-037ba4ed4848-catalog-content\") pod \"community-operators-f68ds\" (UID: \"35509e03-022a-40f0-9268-037ba4ed4848\") " pod="openshift-marketplace/community-operators-f68ds" Dec 11 08:59:04 crc kubenswrapper[4881]: I1211 08:59:04.836767 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/35509e03-022a-40f0-9268-037ba4ed4848-utilities\") pod \"community-operators-f68ds\" (UID: \"35509e03-022a-40f0-9268-037ba4ed4848\") " pod="openshift-marketplace/community-operators-f68ds" Dec 11 08:59:04 crc kubenswrapper[4881]: I1211 08:59:04.856281 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-srswc\" (UniqueName: \"kubernetes.io/projected/35509e03-022a-40f0-9268-037ba4ed4848-kube-api-access-srswc\") pod \"community-operators-f68ds\" (UID: \"35509e03-022a-40f0-9268-037ba4ed4848\") " pod="openshift-marketplace/community-operators-f68ds" Dec 11 08:59:04 crc kubenswrapper[4881]: I1211 08:59:04.975885 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-f68ds" Dec 11 08:59:05 crc kubenswrapper[4881]: I1211 08:59:05.490463 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-f68ds"] Dec 11 08:59:05 crc kubenswrapper[4881]: I1211 08:59:05.879274 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-f68ds" event={"ID":"35509e03-022a-40f0-9268-037ba4ed4848","Type":"ContainerStarted","Data":"fae2d24cddf8d1d35d71b4021897e83b8923ea111e1291adc216f1d2c139496b"} Dec 11 08:59:06 crc kubenswrapper[4881]: I1211 08:59:06.890204 4881 generic.go:334] "Generic (PLEG): container finished" podID="35509e03-022a-40f0-9268-037ba4ed4848" containerID="da9f885a2c287a5f47f076eb596949ddbb7c6254cefd3cf152e0f9695739f036" exitCode=0 Dec 11 08:59:06 crc kubenswrapper[4881]: I1211 08:59:06.890294 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-f68ds" event={"ID":"35509e03-022a-40f0-9268-037ba4ed4848","Type":"ContainerDied","Data":"da9f885a2c287a5f47f076eb596949ddbb7c6254cefd3cf152e0f9695739f036"} Dec 11 08:59:06 crc kubenswrapper[4881]: I1211 08:59:06.892618 4881 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Dec 11 08:59:08 crc kubenswrapper[4881]: I1211 08:59:08.911741 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-f68ds" event={"ID":"35509e03-022a-40f0-9268-037ba4ed4848","Type":"ContainerStarted","Data":"fe70cf65eafe0d763af0376ce12991d7b80ac96f64fa8c2c1b5d189ae46fc138"} Dec 11 08:59:13 crc kubenswrapper[4881]: I1211 08:59:13.967673 4881 generic.go:334] "Generic (PLEG): container finished" podID="35509e03-022a-40f0-9268-037ba4ed4848" containerID="fe70cf65eafe0d763af0376ce12991d7b80ac96f64fa8c2c1b5d189ae46fc138" exitCode=0 Dec 11 08:59:13 crc kubenswrapper[4881]: I1211 08:59:13.967746 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-f68ds" event={"ID":"35509e03-022a-40f0-9268-037ba4ed4848","Type":"ContainerDied","Data":"fe70cf65eafe0d763af0376ce12991d7b80ac96f64fa8c2c1b5d189ae46fc138"} Dec 11 08:59:15 crc kubenswrapper[4881]: I1211 08:59:15.005506 4881 scope.go:117] "RemoveContainer" containerID="2c1b273a42ceaf987004a76e656f9e43f9531662645d97e9202713754e0053d2" Dec 11 08:59:15 crc kubenswrapper[4881]: E1211 08:59:15.006273 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 08:59:20 crc kubenswrapper[4881]: E1211 08:59:20.182281 4881 kubelet.go:2526] "Housekeeping took longer than expected" err="housekeeping took too long" expected="1s" actual="1.177s" Dec 11 08:59:30 crc kubenswrapper[4881]: I1211 08:59:30.005549 4881 scope.go:117] "RemoveContainer" containerID="2c1b273a42ceaf987004a76e656f9e43f9531662645d97e9202713754e0053d2" Dec 11 08:59:30 crc kubenswrapper[4881]: E1211 08:59:30.006672 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 08:59:30 crc kubenswrapper[4881]: I1211 08:59:30.146574 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-f68ds" event={"ID":"35509e03-022a-40f0-9268-037ba4ed4848","Type":"ContainerStarted","Data":"78a668d7e8ff891cee651b43f25a80324f7193b1882555d470868eebd4805055"} Dec 11 08:59:30 crc kubenswrapper[4881]: I1211 08:59:30.179209 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-f68ds" podStartSLOduration=3.712062838 podStartE2EDuration="26.179174224s" podCreationTimestamp="2025-12-11 08:59:04 +0000 UTC" firstStartedPulling="2025-12-11 08:59:06.89236584 +0000 UTC m=+2595.269734537" lastFinishedPulling="2025-12-11 08:59:29.359477226 +0000 UTC m=+2617.736845923" observedRunningTime="2025-12-11 08:59:30.162486847 +0000 UTC m=+2618.539855544" watchObservedRunningTime="2025-12-11 08:59:30.179174224 +0000 UTC m=+2618.556542921" Dec 11 08:59:34 crc kubenswrapper[4881]: I1211 08:59:34.976395 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-f68ds" Dec 11 08:59:34 crc kubenswrapper[4881]: I1211 08:59:34.976694 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-f68ds" Dec 11 08:59:35 crc kubenswrapper[4881]: I1211 08:59:35.030706 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-f68ds" Dec 11 08:59:35 crc kubenswrapper[4881]: I1211 08:59:35.251728 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-f68ds" Dec 11 08:59:35 crc kubenswrapper[4881]: I1211 08:59:35.843010 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-f68ds"] Dec 11 08:59:37 crc kubenswrapper[4881]: I1211 08:59:37.221397 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-f68ds" podUID="35509e03-022a-40f0-9268-037ba4ed4848" containerName="registry-server" containerID="cri-o://78a668d7e8ff891cee651b43f25a80324f7193b1882555d470868eebd4805055" gracePeriod=2 Dec 11 08:59:38 crc kubenswrapper[4881]: I1211 08:59:38.241545 4881 generic.go:334] "Generic (PLEG): container finished" podID="35509e03-022a-40f0-9268-037ba4ed4848" containerID="78a668d7e8ff891cee651b43f25a80324f7193b1882555d470868eebd4805055" exitCode=0 Dec 11 08:59:38 crc kubenswrapper[4881]: I1211 08:59:38.241607 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-f68ds" event={"ID":"35509e03-022a-40f0-9268-037ba4ed4848","Type":"ContainerDied","Data":"78a668d7e8ff891cee651b43f25a80324f7193b1882555d470868eebd4805055"} Dec 11 08:59:38 crc kubenswrapper[4881]: I1211 08:59:38.241865 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-f68ds" event={"ID":"35509e03-022a-40f0-9268-037ba4ed4848","Type":"ContainerDied","Data":"fae2d24cddf8d1d35d71b4021897e83b8923ea111e1291adc216f1d2c139496b"} Dec 11 08:59:38 crc kubenswrapper[4881]: I1211 08:59:38.241880 4881 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="fae2d24cddf8d1d35d71b4021897e83b8923ea111e1291adc216f1d2c139496b" Dec 11 08:59:38 crc kubenswrapper[4881]: I1211 08:59:38.287775 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-f68ds" Dec 11 08:59:38 crc kubenswrapper[4881]: I1211 08:59:38.392174 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-srswc\" (UniqueName: \"kubernetes.io/projected/35509e03-022a-40f0-9268-037ba4ed4848-kube-api-access-srswc\") pod \"35509e03-022a-40f0-9268-037ba4ed4848\" (UID: \"35509e03-022a-40f0-9268-037ba4ed4848\") " Dec 11 08:59:38 crc kubenswrapper[4881]: I1211 08:59:38.392354 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/35509e03-022a-40f0-9268-037ba4ed4848-catalog-content\") pod \"35509e03-022a-40f0-9268-037ba4ed4848\" (UID: \"35509e03-022a-40f0-9268-037ba4ed4848\") " Dec 11 08:59:38 crc kubenswrapper[4881]: I1211 08:59:38.392519 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/35509e03-022a-40f0-9268-037ba4ed4848-utilities\") pod \"35509e03-022a-40f0-9268-037ba4ed4848\" (UID: \"35509e03-022a-40f0-9268-037ba4ed4848\") " Dec 11 08:59:38 crc kubenswrapper[4881]: I1211 08:59:38.393232 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/35509e03-022a-40f0-9268-037ba4ed4848-utilities" (OuterVolumeSpecName: "utilities") pod "35509e03-022a-40f0-9268-037ba4ed4848" (UID: "35509e03-022a-40f0-9268-037ba4ed4848"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 08:59:38 crc kubenswrapper[4881]: I1211 08:59:38.393672 4881 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/35509e03-022a-40f0-9268-037ba4ed4848-utilities\") on node \"crc\" DevicePath \"\"" Dec 11 08:59:38 crc kubenswrapper[4881]: I1211 08:59:38.398544 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/35509e03-022a-40f0-9268-037ba4ed4848-kube-api-access-srswc" (OuterVolumeSpecName: "kube-api-access-srswc") pod "35509e03-022a-40f0-9268-037ba4ed4848" (UID: "35509e03-022a-40f0-9268-037ba4ed4848"). InnerVolumeSpecName "kube-api-access-srswc". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 08:59:38 crc kubenswrapper[4881]: I1211 08:59:38.441861 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/35509e03-022a-40f0-9268-037ba4ed4848-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "35509e03-022a-40f0-9268-037ba4ed4848" (UID: "35509e03-022a-40f0-9268-037ba4ed4848"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 08:59:38 crc kubenswrapper[4881]: I1211 08:59:38.496172 4881 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/35509e03-022a-40f0-9268-037ba4ed4848-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 11 08:59:38 crc kubenswrapper[4881]: I1211 08:59:38.496211 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-srswc\" (UniqueName: \"kubernetes.io/projected/35509e03-022a-40f0-9268-037ba4ed4848-kube-api-access-srswc\") on node \"crc\" DevicePath \"\"" Dec 11 08:59:39 crc kubenswrapper[4881]: I1211 08:59:39.253151 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-f68ds" Dec 11 08:59:39 crc kubenswrapper[4881]: I1211 08:59:39.287413 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-f68ds"] Dec 11 08:59:39 crc kubenswrapper[4881]: I1211 08:59:39.315284 4881 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-f68ds"] Dec 11 08:59:41 crc kubenswrapper[4881]: I1211 08:59:41.006143 4881 scope.go:117] "RemoveContainer" containerID="2c1b273a42ceaf987004a76e656f9e43f9531662645d97e9202713754e0053d2" Dec 11 08:59:41 crc kubenswrapper[4881]: E1211 08:59:41.007057 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 08:59:41 crc kubenswrapper[4881]: I1211 08:59:41.022512 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="35509e03-022a-40f0-9268-037ba4ed4848" path="/var/lib/kubelet/pods/35509e03-022a-40f0-9268-037ba4ed4848/volumes" Dec 11 08:59:54 crc kubenswrapper[4881]: I1211 08:59:54.007073 4881 scope.go:117] "RemoveContainer" containerID="2c1b273a42ceaf987004a76e656f9e43f9531662645d97e9202713754e0053d2" Dec 11 08:59:54 crc kubenswrapper[4881]: E1211 08:59:54.008635 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 09:00:00 crc kubenswrapper[4881]: I1211 09:00:00.181570 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29424060-lzkkj"] Dec 11 09:00:00 crc kubenswrapper[4881]: E1211 09:00:00.182866 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="35509e03-022a-40f0-9268-037ba4ed4848" containerName="extract-utilities" Dec 11 09:00:00 crc kubenswrapper[4881]: I1211 09:00:00.182889 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="35509e03-022a-40f0-9268-037ba4ed4848" containerName="extract-utilities" Dec 11 09:00:00 crc kubenswrapper[4881]: E1211 09:00:00.182917 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="35509e03-022a-40f0-9268-037ba4ed4848" containerName="registry-server" Dec 11 09:00:00 crc kubenswrapper[4881]: I1211 09:00:00.182925 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="35509e03-022a-40f0-9268-037ba4ed4848" containerName="registry-server" Dec 11 09:00:00 crc kubenswrapper[4881]: E1211 09:00:00.182944 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="35509e03-022a-40f0-9268-037ba4ed4848" containerName="extract-content" Dec 11 09:00:00 crc kubenswrapper[4881]: I1211 09:00:00.182952 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="35509e03-022a-40f0-9268-037ba4ed4848" containerName="extract-content" Dec 11 09:00:00 crc kubenswrapper[4881]: I1211 09:00:00.183284 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="35509e03-022a-40f0-9268-037ba4ed4848" containerName="registry-server" Dec 11 09:00:00 crc kubenswrapper[4881]: I1211 09:00:00.184467 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29424060-lzkkj" Dec 11 09:00:00 crc kubenswrapper[4881]: I1211 09:00:00.194515 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Dec 11 09:00:00 crc kubenswrapper[4881]: I1211 09:00:00.194523 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Dec 11 09:00:00 crc kubenswrapper[4881]: I1211 09:00:00.197279 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29424060-lzkkj"] Dec 11 09:00:00 crc kubenswrapper[4881]: I1211 09:00:00.252740 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/9eba9e53-1f89-432d-8099-28ec7d7ce331-config-volume\") pod \"collect-profiles-29424060-lzkkj\" (UID: \"9eba9e53-1f89-432d-8099-28ec7d7ce331\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29424060-lzkkj" Dec 11 09:00:00 crc kubenswrapper[4881]: I1211 09:00:00.253134 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-27q5w\" (UniqueName: \"kubernetes.io/projected/9eba9e53-1f89-432d-8099-28ec7d7ce331-kube-api-access-27q5w\") pod \"collect-profiles-29424060-lzkkj\" (UID: \"9eba9e53-1f89-432d-8099-28ec7d7ce331\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29424060-lzkkj" Dec 11 09:00:00 crc kubenswrapper[4881]: I1211 09:00:00.253299 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/9eba9e53-1f89-432d-8099-28ec7d7ce331-secret-volume\") pod \"collect-profiles-29424060-lzkkj\" (UID: \"9eba9e53-1f89-432d-8099-28ec7d7ce331\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29424060-lzkkj" Dec 11 09:00:00 crc kubenswrapper[4881]: I1211 09:00:00.354915 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/9eba9e53-1f89-432d-8099-28ec7d7ce331-config-volume\") pod \"collect-profiles-29424060-lzkkj\" (UID: \"9eba9e53-1f89-432d-8099-28ec7d7ce331\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29424060-lzkkj" Dec 11 09:00:00 crc kubenswrapper[4881]: I1211 09:00:00.355038 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-27q5w\" (UniqueName: \"kubernetes.io/projected/9eba9e53-1f89-432d-8099-28ec7d7ce331-kube-api-access-27q5w\") pod \"collect-profiles-29424060-lzkkj\" (UID: \"9eba9e53-1f89-432d-8099-28ec7d7ce331\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29424060-lzkkj" Dec 11 09:00:00 crc kubenswrapper[4881]: I1211 09:00:00.355157 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/9eba9e53-1f89-432d-8099-28ec7d7ce331-secret-volume\") pod \"collect-profiles-29424060-lzkkj\" (UID: \"9eba9e53-1f89-432d-8099-28ec7d7ce331\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29424060-lzkkj" Dec 11 09:00:00 crc kubenswrapper[4881]: I1211 09:00:00.355923 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/9eba9e53-1f89-432d-8099-28ec7d7ce331-config-volume\") pod \"collect-profiles-29424060-lzkkj\" (UID: \"9eba9e53-1f89-432d-8099-28ec7d7ce331\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29424060-lzkkj" Dec 11 09:00:00 crc kubenswrapper[4881]: I1211 09:00:00.362298 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/9eba9e53-1f89-432d-8099-28ec7d7ce331-secret-volume\") pod \"collect-profiles-29424060-lzkkj\" (UID: \"9eba9e53-1f89-432d-8099-28ec7d7ce331\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29424060-lzkkj" Dec 11 09:00:00 crc kubenswrapper[4881]: I1211 09:00:00.377255 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-27q5w\" (UniqueName: \"kubernetes.io/projected/9eba9e53-1f89-432d-8099-28ec7d7ce331-kube-api-access-27q5w\") pod \"collect-profiles-29424060-lzkkj\" (UID: \"9eba9e53-1f89-432d-8099-28ec7d7ce331\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29424060-lzkkj" Dec 11 09:00:00 crc kubenswrapper[4881]: I1211 09:00:00.516481 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29424060-lzkkj" Dec 11 09:00:01 crc kubenswrapper[4881]: I1211 09:00:01.000845 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29424060-lzkkj"] Dec 11 09:00:01 crc kubenswrapper[4881]: I1211 09:00:01.496457 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29424060-lzkkj" event={"ID":"9eba9e53-1f89-432d-8099-28ec7d7ce331","Type":"ContainerStarted","Data":"9688ead5aa86528b8dc1a04c530da212bcd09429c2f4eaec759ef0bcbe3f78e7"} Dec 11 09:00:01 crc kubenswrapper[4881]: I1211 09:00:01.496845 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29424060-lzkkj" event={"ID":"9eba9e53-1f89-432d-8099-28ec7d7ce331","Type":"ContainerStarted","Data":"b403cd2870ab298db19db141249c4eedb124244aa3956a2c446955d68a64ffe7"} Dec 11 09:00:02 crc kubenswrapper[4881]: I1211 09:00:02.508190 4881 generic.go:334] "Generic (PLEG): container finished" podID="9eba9e53-1f89-432d-8099-28ec7d7ce331" containerID="9688ead5aa86528b8dc1a04c530da212bcd09429c2f4eaec759ef0bcbe3f78e7" exitCode=0 Dec 11 09:00:02 crc kubenswrapper[4881]: I1211 09:00:02.508299 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29424060-lzkkj" event={"ID":"9eba9e53-1f89-432d-8099-28ec7d7ce331","Type":"ContainerDied","Data":"9688ead5aa86528b8dc1a04c530da212bcd09429c2f4eaec759ef0bcbe3f78e7"} Dec 11 09:00:03 crc kubenswrapper[4881]: I1211 09:00:03.894896 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29424060-lzkkj" Dec 11 09:00:04 crc kubenswrapper[4881]: I1211 09:00:04.047386 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/9eba9e53-1f89-432d-8099-28ec7d7ce331-config-volume\") pod \"9eba9e53-1f89-432d-8099-28ec7d7ce331\" (UID: \"9eba9e53-1f89-432d-8099-28ec7d7ce331\") " Dec 11 09:00:04 crc kubenswrapper[4881]: I1211 09:00:04.047470 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/9eba9e53-1f89-432d-8099-28ec7d7ce331-secret-volume\") pod \"9eba9e53-1f89-432d-8099-28ec7d7ce331\" (UID: \"9eba9e53-1f89-432d-8099-28ec7d7ce331\") " Dec 11 09:00:04 crc kubenswrapper[4881]: I1211 09:00:04.048283 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9eba9e53-1f89-432d-8099-28ec7d7ce331-config-volume" (OuterVolumeSpecName: "config-volume") pod "9eba9e53-1f89-432d-8099-28ec7d7ce331" (UID: "9eba9e53-1f89-432d-8099-28ec7d7ce331"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 09:00:04 crc kubenswrapper[4881]: I1211 09:00:04.050285 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-27q5w\" (UniqueName: \"kubernetes.io/projected/9eba9e53-1f89-432d-8099-28ec7d7ce331-kube-api-access-27q5w\") pod \"9eba9e53-1f89-432d-8099-28ec7d7ce331\" (UID: \"9eba9e53-1f89-432d-8099-28ec7d7ce331\") " Dec 11 09:00:04 crc kubenswrapper[4881]: I1211 09:00:04.051962 4881 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/9eba9e53-1f89-432d-8099-28ec7d7ce331-config-volume\") on node \"crc\" DevicePath \"\"" Dec 11 09:00:04 crc kubenswrapper[4881]: I1211 09:00:04.061530 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9eba9e53-1f89-432d-8099-28ec7d7ce331-kube-api-access-27q5w" (OuterVolumeSpecName: "kube-api-access-27q5w") pod "9eba9e53-1f89-432d-8099-28ec7d7ce331" (UID: "9eba9e53-1f89-432d-8099-28ec7d7ce331"). InnerVolumeSpecName "kube-api-access-27q5w". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 09:00:04 crc kubenswrapper[4881]: I1211 09:00:04.064198 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9eba9e53-1f89-432d-8099-28ec7d7ce331-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "9eba9e53-1f89-432d-8099-28ec7d7ce331" (UID: "9eba9e53-1f89-432d-8099-28ec7d7ce331"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 09:00:04 crc kubenswrapper[4881]: I1211 09:00:04.155611 4881 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/9eba9e53-1f89-432d-8099-28ec7d7ce331-secret-volume\") on node \"crc\" DevicePath \"\"" Dec 11 09:00:04 crc kubenswrapper[4881]: I1211 09:00:04.155652 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-27q5w\" (UniqueName: \"kubernetes.io/projected/9eba9e53-1f89-432d-8099-28ec7d7ce331-kube-api-access-27q5w\") on node \"crc\" DevicePath \"\"" Dec 11 09:00:04 crc kubenswrapper[4881]: I1211 09:00:04.528616 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29424060-lzkkj" event={"ID":"9eba9e53-1f89-432d-8099-28ec7d7ce331","Type":"ContainerDied","Data":"b403cd2870ab298db19db141249c4eedb124244aa3956a2c446955d68a64ffe7"} Dec 11 09:00:04 crc kubenswrapper[4881]: I1211 09:00:04.528661 4881 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b403cd2870ab298db19db141249c4eedb124244aa3956a2c446955d68a64ffe7" Dec 11 09:00:04 crc kubenswrapper[4881]: I1211 09:00:04.528713 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29424060-lzkkj" Dec 11 09:00:04 crc kubenswrapper[4881]: I1211 09:00:04.741396 4881 patch_prober.go:28] interesting pod/monitoring-plugin-5cb9d5856f-c4cvw container/monitoring-plugin namespace/openshift-monitoring: Readiness probe status=failure output="Get \"https://10.217.0.79:9443/health\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" start-of-body= Dec 11 09:00:04 crc kubenswrapper[4881]: I1211 09:00:04.741781 4881 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-monitoring/monitoring-plugin-5cb9d5856f-c4cvw" podUID="1904b37a-2b5b-425b-b3cb-cad8c86efcff" containerName="monitoring-plugin" probeResult="failure" output="Get \"https://10.217.0.79:9443/health\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Dec 11 09:00:04 crc kubenswrapper[4881]: I1211 09:00:04.984066 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29424015-bl9jf"] Dec 11 09:00:04 crc kubenswrapper[4881]: I1211 09:00:04.995031 4881 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29424015-bl9jf"] Dec 11 09:00:05 crc kubenswrapper[4881]: I1211 09:00:05.938670 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="77d994ef-53f1-4ef8-a668-38226c6c460b" path="/var/lib/kubelet/pods/77d994ef-53f1-4ef8-a668-38226c6c460b/volumes" Dec 11 09:00:08 crc kubenswrapper[4881]: I1211 09:00:08.006328 4881 scope.go:117] "RemoveContainer" containerID="2c1b273a42ceaf987004a76e656f9e43f9531662645d97e9202713754e0053d2" Dec 11 09:00:08 crc kubenswrapper[4881]: E1211 09:00:08.007202 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 09:00:10 crc kubenswrapper[4881]: I1211 09:00:10.137377 4881 scope.go:117] "RemoveContainer" containerID="50e31bab43851eab903aacf067313fcad8a0086c585973583eb3d17ed380b709" Dec 11 09:00:10 crc kubenswrapper[4881]: I1211 09:00:10.167207 4881 scope.go:117] "RemoveContainer" containerID="ba80b510d8e6805aa78f0205e77f978a9e7c40cf45d67e093babcdd607de2a8b" Dec 11 09:00:10 crc kubenswrapper[4881]: I1211 09:00:10.222762 4881 scope.go:117] "RemoveContainer" containerID="309bbeac67bb53aa0969a3e3d31bddc1e3ca5d095eb1d312a01e5c0ff16c920c" Dec 11 09:00:10 crc kubenswrapper[4881]: I1211 09:00:10.661527 4881 scope.go:117] "RemoveContainer" containerID="75437796ec84f7d3c720a61fd117211283087a2611ae662aefc9c291a59de970" Dec 11 09:00:20 crc kubenswrapper[4881]: I1211 09:00:20.006975 4881 scope.go:117] "RemoveContainer" containerID="2c1b273a42ceaf987004a76e656f9e43f9531662645d97e9202713754e0053d2" Dec 11 09:00:20 crc kubenswrapper[4881]: E1211 09:00:20.009263 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 09:00:34 crc kubenswrapper[4881]: I1211 09:00:34.006182 4881 scope.go:117] "RemoveContainer" containerID="2c1b273a42ceaf987004a76e656f9e43f9531662645d97e9202713754e0053d2" Dec 11 09:00:34 crc kubenswrapper[4881]: I1211 09:00:34.281064 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" event={"ID":"56d69133-af36-4cbd-af7d-3a58cc4dd8ca","Type":"ContainerStarted","Data":"e510f860249c2369bd34a09d0c999610be3cd247e6b9eafae5e3404be20264de"} Dec 11 09:01:00 crc kubenswrapper[4881]: I1211 09:01:00.172046 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-cron-29424061-884r9"] Dec 11 09:01:00 crc kubenswrapper[4881]: E1211 09:01:00.173285 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9eba9e53-1f89-432d-8099-28ec7d7ce331" containerName="collect-profiles" Dec 11 09:01:00 crc kubenswrapper[4881]: I1211 09:01:00.173303 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="9eba9e53-1f89-432d-8099-28ec7d7ce331" containerName="collect-profiles" Dec 11 09:01:00 crc kubenswrapper[4881]: I1211 09:01:00.173650 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="9eba9e53-1f89-432d-8099-28ec7d7ce331" containerName="collect-profiles" Dec 11 09:01:00 crc kubenswrapper[4881]: I1211 09:01:00.174692 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29424061-884r9" Dec 11 09:01:00 crc kubenswrapper[4881]: I1211 09:01:00.191398 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29424061-884r9"] Dec 11 09:01:00 crc kubenswrapper[4881]: I1211 09:01:00.262572 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/5cfd9f27-8ab3-4f72-8fb8-fa1b11a82b7a-fernet-keys\") pod \"keystone-cron-29424061-884r9\" (UID: \"5cfd9f27-8ab3-4f72-8fb8-fa1b11a82b7a\") " pod="openstack/keystone-cron-29424061-884r9" Dec 11 09:01:00 crc kubenswrapper[4881]: I1211 09:01:00.262990 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5cfd9f27-8ab3-4f72-8fb8-fa1b11a82b7a-combined-ca-bundle\") pod \"keystone-cron-29424061-884r9\" (UID: \"5cfd9f27-8ab3-4f72-8fb8-fa1b11a82b7a\") " pod="openstack/keystone-cron-29424061-884r9" Dec 11 09:01:00 crc kubenswrapper[4881]: I1211 09:01:00.263309 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7srtp\" (UniqueName: \"kubernetes.io/projected/5cfd9f27-8ab3-4f72-8fb8-fa1b11a82b7a-kube-api-access-7srtp\") pod \"keystone-cron-29424061-884r9\" (UID: \"5cfd9f27-8ab3-4f72-8fb8-fa1b11a82b7a\") " pod="openstack/keystone-cron-29424061-884r9" Dec 11 09:01:00 crc kubenswrapper[4881]: I1211 09:01:00.263515 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5cfd9f27-8ab3-4f72-8fb8-fa1b11a82b7a-config-data\") pod \"keystone-cron-29424061-884r9\" (UID: \"5cfd9f27-8ab3-4f72-8fb8-fa1b11a82b7a\") " pod="openstack/keystone-cron-29424061-884r9" Dec 11 09:01:00 crc kubenswrapper[4881]: I1211 09:01:00.365933 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5cfd9f27-8ab3-4f72-8fb8-fa1b11a82b7a-config-data\") pod \"keystone-cron-29424061-884r9\" (UID: \"5cfd9f27-8ab3-4f72-8fb8-fa1b11a82b7a\") " pod="openstack/keystone-cron-29424061-884r9" Dec 11 09:01:00 crc kubenswrapper[4881]: I1211 09:01:00.366056 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/5cfd9f27-8ab3-4f72-8fb8-fa1b11a82b7a-fernet-keys\") pod \"keystone-cron-29424061-884r9\" (UID: \"5cfd9f27-8ab3-4f72-8fb8-fa1b11a82b7a\") " pod="openstack/keystone-cron-29424061-884r9" Dec 11 09:01:00 crc kubenswrapper[4881]: I1211 09:01:00.366186 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5cfd9f27-8ab3-4f72-8fb8-fa1b11a82b7a-combined-ca-bundle\") pod \"keystone-cron-29424061-884r9\" (UID: \"5cfd9f27-8ab3-4f72-8fb8-fa1b11a82b7a\") " pod="openstack/keystone-cron-29424061-884r9" Dec 11 09:01:00 crc kubenswrapper[4881]: I1211 09:01:00.366282 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7srtp\" (UniqueName: \"kubernetes.io/projected/5cfd9f27-8ab3-4f72-8fb8-fa1b11a82b7a-kube-api-access-7srtp\") pod \"keystone-cron-29424061-884r9\" (UID: \"5cfd9f27-8ab3-4f72-8fb8-fa1b11a82b7a\") " pod="openstack/keystone-cron-29424061-884r9" Dec 11 09:01:00 crc kubenswrapper[4881]: I1211 09:01:00.372799 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5cfd9f27-8ab3-4f72-8fb8-fa1b11a82b7a-config-data\") pod \"keystone-cron-29424061-884r9\" (UID: \"5cfd9f27-8ab3-4f72-8fb8-fa1b11a82b7a\") " pod="openstack/keystone-cron-29424061-884r9" Dec 11 09:01:00 crc kubenswrapper[4881]: I1211 09:01:00.372919 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5cfd9f27-8ab3-4f72-8fb8-fa1b11a82b7a-combined-ca-bundle\") pod \"keystone-cron-29424061-884r9\" (UID: \"5cfd9f27-8ab3-4f72-8fb8-fa1b11a82b7a\") " pod="openstack/keystone-cron-29424061-884r9" Dec 11 09:01:00 crc kubenswrapper[4881]: I1211 09:01:00.376464 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/5cfd9f27-8ab3-4f72-8fb8-fa1b11a82b7a-fernet-keys\") pod \"keystone-cron-29424061-884r9\" (UID: \"5cfd9f27-8ab3-4f72-8fb8-fa1b11a82b7a\") " pod="openstack/keystone-cron-29424061-884r9" Dec 11 09:01:00 crc kubenswrapper[4881]: I1211 09:01:00.393003 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7srtp\" (UniqueName: \"kubernetes.io/projected/5cfd9f27-8ab3-4f72-8fb8-fa1b11a82b7a-kube-api-access-7srtp\") pod \"keystone-cron-29424061-884r9\" (UID: \"5cfd9f27-8ab3-4f72-8fb8-fa1b11a82b7a\") " pod="openstack/keystone-cron-29424061-884r9" Dec 11 09:01:00 crc kubenswrapper[4881]: I1211 09:01:00.520592 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29424061-884r9" Dec 11 09:01:01 crc kubenswrapper[4881]: I1211 09:01:01.018207 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29424061-884r9"] Dec 11 09:01:01 crc kubenswrapper[4881]: I1211 09:01:01.744900 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29424061-884r9" event={"ID":"5cfd9f27-8ab3-4f72-8fb8-fa1b11a82b7a","Type":"ContainerStarted","Data":"e0184cece939ad7c8405617c26b4c543ed6944c47f52b043b275e7317e2eeba7"} Dec 11 09:01:01 crc kubenswrapper[4881]: I1211 09:01:01.745231 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29424061-884r9" event={"ID":"5cfd9f27-8ab3-4f72-8fb8-fa1b11a82b7a","Type":"ContainerStarted","Data":"6cfadd33a044f35a54b7f290fc23080fc9572d0a1567001bdc254da6c4f5ca3a"} Dec 11 09:01:01 crc kubenswrapper[4881]: I1211 09:01:01.759939 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-cron-29424061-884r9" podStartSLOduration=1.759917948 podStartE2EDuration="1.759917948s" podCreationTimestamp="2025-12-11 09:01:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 09:01:01.757702733 +0000 UTC m=+2710.135071430" watchObservedRunningTime="2025-12-11 09:01:01.759917948 +0000 UTC m=+2710.137286645" Dec 11 09:01:05 crc kubenswrapper[4881]: I1211 09:01:05.790784 4881 generic.go:334] "Generic (PLEG): container finished" podID="5cfd9f27-8ab3-4f72-8fb8-fa1b11a82b7a" containerID="e0184cece939ad7c8405617c26b4c543ed6944c47f52b043b275e7317e2eeba7" exitCode=0 Dec 11 09:01:05 crc kubenswrapper[4881]: I1211 09:01:05.790894 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29424061-884r9" event={"ID":"5cfd9f27-8ab3-4f72-8fb8-fa1b11a82b7a","Type":"ContainerDied","Data":"e0184cece939ad7c8405617c26b4c543ed6944c47f52b043b275e7317e2eeba7"} Dec 11 09:01:07 crc kubenswrapper[4881]: I1211 09:01:07.220141 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29424061-884r9" Dec 11 09:01:07 crc kubenswrapper[4881]: I1211 09:01:07.370354 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5cfd9f27-8ab3-4f72-8fb8-fa1b11a82b7a-combined-ca-bundle\") pod \"5cfd9f27-8ab3-4f72-8fb8-fa1b11a82b7a\" (UID: \"5cfd9f27-8ab3-4f72-8fb8-fa1b11a82b7a\") " Dec 11 09:01:07 crc kubenswrapper[4881]: I1211 09:01:07.370765 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5cfd9f27-8ab3-4f72-8fb8-fa1b11a82b7a-config-data\") pod \"5cfd9f27-8ab3-4f72-8fb8-fa1b11a82b7a\" (UID: \"5cfd9f27-8ab3-4f72-8fb8-fa1b11a82b7a\") " Dec 11 09:01:07 crc kubenswrapper[4881]: I1211 09:01:07.370822 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/5cfd9f27-8ab3-4f72-8fb8-fa1b11a82b7a-fernet-keys\") pod \"5cfd9f27-8ab3-4f72-8fb8-fa1b11a82b7a\" (UID: \"5cfd9f27-8ab3-4f72-8fb8-fa1b11a82b7a\") " Dec 11 09:01:07 crc kubenswrapper[4881]: I1211 09:01:07.370939 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7srtp\" (UniqueName: \"kubernetes.io/projected/5cfd9f27-8ab3-4f72-8fb8-fa1b11a82b7a-kube-api-access-7srtp\") pod \"5cfd9f27-8ab3-4f72-8fb8-fa1b11a82b7a\" (UID: \"5cfd9f27-8ab3-4f72-8fb8-fa1b11a82b7a\") " Dec 11 09:01:07 crc kubenswrapper[4881]: I1211 09:01:07.376781 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5cfd9f27-8ab3-4f72-8fb8-fa1b11a82b7a-kube-api-access-7srtp" (OuterVolumeSpecName: "kube-api-access-7srtp") pod "5cfd9f27-8ab3-4f72-8fb8-fa1b11a82b7a" (UID: "5cfd9f27-8ab3-4f72-8fb8-fa1b11a82b7a"). InnerVolumeSpecName "kube-api-access-7srtp". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 09:01:07 crc kubenswrapper[4881]: I1211 09:01:07.380898 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5cfd9f27-8ab3-4f72-8fb8-fa1b11a82b7a-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "5cfd9f27-8ab3-4f72-8fb8-fa1b11a82b7a" (UID: "5cfd9f27-8ab3-4f72-8fb8-fa1b11a82b7a"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 09:01:07 crc kubenswrapper[4881]: I1211 09:01:07.417797 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5cfd9f27-8ab3-4f72-8fb8-fa1b11a82b7a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "5cfd9f27-8ab3-4f72-8fb8-fa1b11a82b7a" (UID: "5cfd9f27-8ab3-4f72-8fb8-fa1b11a82b7a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 09:01:07 crc kubenswrapper[4881]: I1211 09:01:07.454514 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5cfd9f27-8ab3-4f72-8fb8-fa1b11a82b7a-config-data" (OuterVolumeSpecName: "config-data") pod "5cfd9f27-8ab3-4f72-8fb8-fa1b11a82b7a" (UID: "5cfd9f27-8ab3-4f72-8fb8-fa1b11a82b7a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 09:01:07 crc kubenswrapper[4881]: I1211 09:01:07.473523 4881 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5cfd9f27-8ab3-4f72-8fb8-fa1b11a82b7a-config-data\") on node \"crc\" DevicePath \"\"" Dec 11 09:01:07 crc kubenswrapper[4881]: I1211 09:01:07.473557 4881 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/5cfd9f27-8ab3-4f72-8fb8-fa1b11a82b7a-fernet-keys\") on node \"crc\" DevicePath \"\"" Dec 11 09:01:07 crc kubenswrapper[4881]: I1211 09:01:07.473567 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7srtp\" (UniqueName: \"kubernetes.io/projected/5cfd9f27-8ab3-4f72-8fb8-fa1b11a82b7a-kube-api-access-7srtp\") on node \"crc\" DevicePath \"\"" Dec 11 09:01:07 crc kubenswrapper[4881]: I1211 09:01:07.473579 4881 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5cfd9f27-8ab3-4f72-8fb8-fa1b11a82b7a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 11 09:01:07 crc kubenswrapper[4881]: I1211 09:01:07.842651 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29424061-884r9" event={"ID":"5cfd9f27-8ab3-4f72-8fb8-fa1b11a82b7a","Type":"ContainerDied","Data":"6cfadd33a044f35a54b7f290fc23080fc9572d0a1567001bdc254da6c4f5ca3a"} Dec 11 09:01:07 crc kubenswrapper[4881]: I1211 09:01:07.842717 4881 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6cfadd33a044f35a54b7f290fc23080fc9572d0a1567001bdc254da6c4f5ca3a" Dec 11 09:01:07 crc kubenswrapper[4881]: I1211 09:01:07.842686 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29424061-884r9" Dec 11 09:01:34 crc kubenswrapper[4881]: I1211 09:01:34.843492 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-9rdcd"] Dec 11 09:01:34 crc kubenswrapper[4881]: E1211 09:01:34.844737 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5cfd9f27-8ab3-4f72-8fb8-fa1b11a82b7a" containerName="keystone-cron" Dec 11 09:01:34 crc kubenswrapper[4881]: I1211 09:01:34.844757 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="5cfd9f27-8ab3-4f72-8fb8-fa1b11a82b7a" containerName="keystone-cron" Dec 11 09:01:34 crc kubenswrapper[4881]: I1211 09:01:34.845098 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="5cfd9f27-8ab3-4f72-8fb8-fa1b11a82b7a" containerName="keystone-cron" Dec 11 09:01:34 crc kubenswrapper[4881]: I1211 09:01:34.847367 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-9rdcd" Dec 11 09:01:34 crc kubenswrapper[4881]: I1211 09:01:34.857183 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-9rdcd"] Dec 11 09:01:34 crc kubenswrapper[4881]: I1211 09:01:34.926169 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/64249227-05ce-47f7-b54a-534e168e1e54-utilities\") pod \"redhat-operators-9rdcd\" (UID: \"64249227-05ce-47f7-b54a-534e168e1e54\") " pod="openshift-marketplace/redhat-operators-9rdcd" Dec 11 09:01:34 crc kubenswrapper[4881]: I1211 09:01:34.926570 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zvn2c\" (UniqueName: \"kubernetes.io/projected/64249227-05ce-47f7-b54a-534e168e1e54-kube-api-access-zvn2c\") pod \"redhat-operators-9rdcd\" (UID: \"64249227-05ce-47f7-b54a-534e168e1e54\") " pod="openshift-marketplace/redhat-operators-9rdcd" Dec 11 09:01:34 crc kubenswrapper[4881]: I1211 09:01:34.926753 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/64249227-05ce-47f7-b54a-534e168e1e54-catalog-content\") pod \"redhat-operators-9rdcd\" (UID: \"64249227-05ce-47f7-b54a-534e168e1e54\") " pod="openshift-marketplace/redhat-operators-9rdcd" Dec 11 09:01:35 crc kubenswrapper[4881]: I1211 09:01:35.029754 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/64249227-05ce-47f7-b54a-534e168e1e54-utilities\") pod \"redhat-operators-9rdcd\" (UID: \"64249227-05ce-47f7-b54a-534e168e1e54\") " pod="openshift-marketplace/redhat-operators-9rdcd" Dec 11 09:01:35 crc kubenswrapper[4881]: I1211 09:01:35.029811 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zvn2c\" (UniqueName: \"kubernetes.io/projected/64249227-05ce-47f7-b54a-534e168e1e54-kube-api-access-zvn2c\") pod \"redhat-operators-9rdcd\" (UID: \"64249227-05ce-47f7-b54a-534e168e1e54\") " pod="openshift-marketplace/redhat-operators-9rdcd" Dec 11 09:01:35 crc kubenswrapper[4881]: I1211 09:01:35.030137 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/64249227-05ce-47f7-b54a-534e168e1e54-catalog-content\") pod \"redhat-operators-9rdcd\" (UID: \"64249227-05ce-47f7-b54a-534e168e1e54\") " pod="openshift-marketplace/redhat-operators-9rdcd" Dec 11 09:01:35 crc kubenswrapper[4881]: I1211 09:01:35.030272 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/64249227-05ce-47f7-b54a-534e168e1e54-utilities\") pod \"redhat-operators-9rdcd\" (UID: \"64249227-05ce-47f7-b54a-534e168e1e54\") " pod="openshift-marketplace/redhat-operators-9rdcd" Dec 11 09:01:35 crc kubenswrapper[4881]: I1211 09:01:35.030541 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/64249227-05ce-47f7-b54a-534e168e1e54-catalog-content\") pod \"redhat-operators-9rdcd\" (UID: \"64249227-05ce-47f7-b54a-534e168e1e54\") " pod="openshift-marketplace/redhat-operators-9rdcd" Dec 11 09:01:35 crc kubenswrapper[4881]: I1211 09:01:35.051511 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zvn2c\" (UniqueName: \"kubernetes.io/projected/64249227-05ce-47f7-b54a-534e168e1e54-kube-api-access-zvn2c\") pod \"redhat-operators-9rdcd\" (UID: \"64249227-05ce-47f7-b54a-534e168e1e54\") " pod="openshift-marketplace/redhat-operators-9rdcd" Dec 11 09:01:35 crc kubenswrapper[4881]: I1211 09:01:35.193214 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-9rdcd" Dec 11 09:01:35 crc kubenswrapper[4881]: I1211 09:01:35.813947 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-9rdcd"] Dec 11 09:01:36 crc kubenswrapper[4881]: I1211 09:01:36.154180 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9rdcd" event={"ID":"64249227-05ce-47f7-b54a-534e168e1e54","Type":"ContainerStarted","Data":"56bcb40faf54621712de3ea4568411b9f89c9f6dd0a7bbb81b29b9c3fdeec7ed"} Dec 11 09:01:37 crc kubenswrapper[4881]: I1211 09:01:37.165874 4881 generic.go:334] "Generic (PLEG): container finished" podID="64249227-05ce-47f7-b54a-534e168e1e54" containerID="0463e93a886a189c56684b8e8bcf1a03d38092484df329a5d7a86a4f9f127f84" exitCode=0 Dec 11 09:01:37 crc kubenswrapper[4881]: I1211 09:01:37.166140 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9rdcd" event={"ID":"64249227-05ce-47f7-b54a-534e168e1e54","Type":"ContainerDied","Data":"0463e93a886a189c56684b8e8bcf1a03d38092484df329a5d7a86a4f9f127f84"} Dec 11 09:01:41 crc kubenswrapper[4881]: I1211 09:01:41.215806 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9rdcd" event={"ID":"64249227-05ce-47f7-b54a-534e168e1e54","Type":"ContainerStarted","Data":"3185e697563342b2cd8e2e5e09e2e6f7ce7e4d6449bb72b8e41ad7765c222570"} Dec 11 09:01:51 crc kubenswrapper[4881]: I1211 09:01:51.796446 4881 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/ceilometer-0" podUID="b51fa237-35ec-47d6-b61d-c3e50dc8450f" containerName="ceilometer-central-agent" probeResult="failure" output="command timed out" Dec 11 09:01:55 crc kubenswrapper[4881]: I1211 09:01:55.380702 4881 generic.go:334] "Generic (PLEG): container finished" podID="64249227-05ce-47f7-b54a-534e168e1e54" containerID="3185e697563342b2cd8e2e5e09e2e6f7ce7e4d6449bb72b8e41ad7765c222570" exitCode=0 Dec 11 09:01:55 crc kubenswrapper[4881]: I1211 09:01:55.380808 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9rdcd" event={"ID":"64249227-05ce-47f7-b54a-534e168e1e54","Type":"ContainerDied","Data":"3185e697563342b2cd8e2e5e09e2e6f7ce7e4d6449bb72b8e41ad7765c222570"} Dec 11 09:02:01 crc kubenswrapper[4881]: I1211 09:02:01.448472 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9rdcd" event={"ID":"64249227-05ce-47f7-b54a-534e168e1e54","Type":"ContainerStarted","Data":"c483ca9d565828fc45f4e90e72e1016663e4419279d51aab886f747e759c8d05"} Dec 11 09:02:01 crc kubenswrapper[4881]: I1211 09:02:01.469245 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-9rdcd" podStartSLOduration=4.43648369 podStartE2EDuration="27.469225921s" podCreationTimestamp="2025-12-11 09:01:34 +0000 UTC" firstStartedPulling="2025-12-11 09:01:37.168058228 +0000 UTC m=+2745.545426925" lastFinishedPulling="2025-12-11 09:02:00.200800459 +0000 UTC m=+2768.578169156" observedRunningTime="2025-12-11 09:02:01.464889743 +0000 UTC m=+2769.842258450" watchObservedRunningTime="2025-12-11 09:02:01.469225921 +0000 UTC m=+2769.846594618" Dec 11 09:02:05 crc kubenswrapper[4881]: I1211 09:02:05.194039 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-9rdcd" Dec 11 09:02:05 crc kubenswrapper[4881]: I1211 09:02:05.194491 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-9rdcd" Dec 11 09:02:06 crc kubenswrapper[4881]: I1211 09:02:06.252464 4881 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-9rdcd" podUID="64249227-05ce-47f7-b54a-534e168e1e54" containerName="registry-server" probeResult="failure" output=< Dec 11 09:02:06 crc kubenswrapper[4881]: timeout: failed to connect service ":50051" within 1s Dec 11 09:02:06 crc kubenswrapper[4881]: > Dec 11 09:02:10 crc kubenswrapper[4881]: I1211 09:02:10.849029 4881 fsHandler.go:133] fs: disk usage and inodes count on following dirs took 1.266840292s: [/var/lib/containers/storage/overlay/4db10c2b8f0abb87978f22e6a22ab020059f5d419d7c760f716292e5ef127e1b/diff /var/log/pods/openshift-nmstate_nmstate-operator-6769fb99d-g9bf9_ae9ad369-6e2e-4c6c-a12a-cf228edaa48c/nmstate-operator/0.log]; will not log again for this container unless duration exceeds 2s Dec 11 09:02:15 crc kubenswrapper[4881]: I1211 09:02:15.238697 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-9rdcd" Dec 11 09:02:15 crc kubenswrapper[4881]: I1211 09:02:15.291039 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-9rdcd" Dec 11 09:02:15 crc kubenswrapper[4881]: I1211 09:02:15.477158 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-9rdcd"] Dec 11 09:02:16 crc kubenswrapper[4881]: I1211 09:02:16.962898 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-9rdcd" podUID="64249227-05ce-47f7-b54a-534e168e1e54" containerName="registry-server" containerID="cri-o://c483ca9d565828fc45f4e90e72e1016663e4419279d51aab886f747e759c8d05" gracePeriod=2 Dec 11 09:02:17 crc kubenswrapper[4881]: I1211 09:02:17.517932 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-9rdcd" Dec 11 09:02:17 crc kubenswrapper[4881]: I1211 09:02:17.690269 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/64249227-05ce-47f7-b54a-534e168e1e54-utilities\") pod \"64249227-05ce-47f7-b54a-534e168e1e54\" (UID: \"64249227-05ce-47f7-b54a-534e168e1e54\") " Dec 11 09:02:17 crc kubenswrapper[4881]: I1211 09:02:17.690463 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/64249227-05ce-47f7-b54a-534e168e1e54-catalog-content\") pod \"64249227-05ce-47f7-b54a-534e168e1e54\" (UID: \"64249227-05ce-47f7-b54a-534e168e1e54\") " Dec 11 09:02:17 crc kubenswrapper[4881]: I1211 09:02:17.690539 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zvn2c\" (UniqueName: \"kubernetes.io/projected/64249227-05ce-47f7-b54a-534e168e1e54-kube-api-access-zvn2c\") pod \"64249227-05ce-47f7-b54a-534e168e1e54\" (UID: \"64249227-05ce-47f7-b54a-534e168e1e54\") " Dec 11 09:02:17 crc kubenswrapper[4881]: I1211 09:02:17.691004 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/64249227-05ce-47f7-b54a-534e168e1e54-utilities" (OuterVolumeSpecName: "utilities") pod "64249227-05ce-47f7-b54a-534e168e1e54" (UID: "64249227-05ce-47f7-b54a-534e168e1e54"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 09:02:17 crc kubenswrapper[4881]: I1211 09:02:17.691300 4881 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/64249227-05ce-47f7-b54a-534e168e1e54-utilities\") on node \"crc\" DevicePath \"\"" Dec 11 09:02:17 crc kubenswrapper[4881]: I1211 09:02:17.696834 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/64249227-05ce-47f7-b54a-534e168e1e54-kube-api-access-zvn2c" (OuterVolumeSpecName: "kube-api-access-zvn2c") pod "64249227-05ce-47f7-b54a-534e168e1e54" (UID: "64249227-05ce-47f7-b54a-534e168e1e54"). InnerVolumeSpecName "kube-api-access-zvn2c". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 09:02:17 crc kubenswrapper[4881]: I1211 09:02:17.793900 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zvn2c\" (UniqueName: \"kubernetes.io/projected/64249227-05ce-47f7-b54a-534e168e1e54-kube-api-access-zvn2c\") on node \"crc\" DevicePath \"\"" Dec 11 09:02:17 crc kubenswrapper[4881]: I1211 09:02:17.817701 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/64249227-05ce-47f7-b54a-534e168e1e54-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "64249227-05ce-47f7-b54a-534e168e1e54" (UID: "64249227-05ce-47f7-b54a-534e168e1e54"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 09:02:17 crc kubenswrapper[4881]: I1211 09:02:17.895996 4881 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/64249227-05ce-47f7-b54a-534e168e1e54-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 11 09:02:17 crc kubenswrapper[4881]: I1211 09:02:17.974842 4881 generic.go:334] "Generic (PLEG): container finished" podID="64249227-05ce-47f7-b54a-534e168e1e54" containerID="c483ca9d565828fc45f4e90e72e1016663e4419279d51aab886f747e759c8d05" exitCode=0 Dec 11 09:02:17 crc kubenswrapper[4881]: I1211 09:02:17.974889 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9rdcd" event={"ID":"64249227-05ce-47f7-b54a-534e168e1e54","Type":"ContainerDied","Data":"c483ca9d565828fc45f4e90e72e1016663e4419279d51aab886f747e759c8d05"} Dec 11 09:02:17 crc kubenswrapper[4881]: I1211 09:02:17.974921 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9rdcd" event={"ID":"64249227-05ce-47f7-b54a-534e168e1e54","Type":"ContainerDied","Data":"56bcb40faf54621712de3ea4568411b9f89c9f6dd0a7bbb81b29b9c3fdeec7ed"} Dec 11 09:02:17 crc kubenswrapper[4881]: I1211 09:02:17.974940 4881 scope.go:117] "RemoveContainer" containerID="c483ca9d565828fc45f4e90e72e1016663e4419279d51aab886f747e759c8d05" Dec 11 09:02:17 crc kubenswrapper[4881]: I1211 09:02:17.974946 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-9rdcd" Dec 11 09:02:18 crc kubenswrapper[4881]: I1211 09:02:18.000817 4881 scope.go:117] "RemoveContainer" containerID="3185e697563342b2cd8e2e5e09e2e6f7ce7e4d6449bb72b8e41ad7765c222570" Dec 11 09:02:18 crc kubenswrapper[4881]: I1211 09:02:18.016489 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-9rdcd"] Dec 11 09:02:18 crc kubenswrapper[4881]: I1211 09:02:18.026940 4881 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-9rdcd"] Dec 11 09:02:18 crc kubenswrapper[4881]: I1211 09:02:18.039955 4881 scope.go:117] "RemoveContainer" containerID="0463e93a886a189c56684b8e8bcf1a03d38092484df329a5d7a86a4f9f127f84" Dec 11 09:02:18 crc kubenswrapper[4881]: I1211 09:02:18.090233 4881 scope.go:117] "RemoveContainer" containerID="c483ca9d565828fc45f4e90e72e1016663e4419279d51aab886f747e759c8d05" Dec 11 09:02:18 crc kubenswrapper[4881]: E1211 09:02:18.090764 4881 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c483ca9d565828fc45f4e90e72e1016663e4419279d51aab886f747e759c8d05\": container with ID starting with c483ca9d565828fc45f4e90e72e1016663e4419279d51aab886f747e759c8d05 not found: ID does not exist" containerID="c483ca9d565828fc45f4e90e72e1016663e4419279d51aab886f747e759c8d05" Dec 11 09:02:18 crc kubenswrapper[4881]: I1211 09:02:18.090812 4881 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c483ca9d565828fc45f4e90e72e1016663e4419279d51aab886f747e759c8d05"} err="failed to get container status \"c483ca9d565828fc45f4e90e72e1016663e4419279d51aab886f747e759c8d05\": rpc error: code = NotFound desc = could not find container \"c483ca9d565828fc45f4e90e72e1016663e4419279d51aab886f747e759c8d05\": container with ID starting with c483ca9d565828fc45f4e90e72e1016663e4419279d51aab886f747e759c8d05 not found: ID does not exist" Dec 11 09:02:18 crc kubenswrapper[4881]: I1211 09:02:18.090841 4881 scope.go:117] "RemoveContainer" containerID="3185e697563342b2cd8e2e5e09e2e6f7ce7e4d6449bb72b8e41ad7765c222570" Dec 11 09:02:18 crc kubenswrapper[4881]: E1211 09:02:18.091191 4881 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3185e697563342b2cd8e2e5e09e2e6f7ce7e4d6449bb72b8e41ad7765c222570\": container with ID starting with 3185e697563342b2cd8e2e5e09e2e6f7ce7e4d6449bb72b8e41ad7765c222570 not found: ID does not exist" containerID="3185e697563342b2cd8e2e5e09e2e6f7ce7e4d6449bb72b8e41ad7765c222570" Dec 11 09:02:18 crc kubenswrapper[4881]: I1211 09:02:18.091252 4881 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3185e697563342b2cd8e2e5e09e2e6f7ce7e4d6449bb72b8e41ad7765c222570"} err="failed to get container status \"3185e697563342b2cd8e2e5e09e2e6f7ce7e4d6449bb72b8e41ad7765c222570\": rpc error: code = NotFound desc = could not find container \"3185e697563342b2cd8e2e5e09e2e6f7ce7e4d6449bb72b8e41ad7765c222570\": container with ID starting with 3185e697563342b2cd8e2e5e09e2e6f7ce7e4d6449bb72b8e41ad7765c222570 not found: ID does not exist" Dec 11 09:02:18 crc kubenswrapper[4881]: I1211 09:02:18.091284 4881 scope.go:117] "RemoveContainer" containerID="0463e93a886a189c56684b8e8bcf1a03d38092484df329a5d7a86a4f9f127f84" Dec 11 09:02:18 crc kubenswrapper[4881]: E1211 09:02:18.092099 4881 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0463e93a886a189c56684b8e8bcf1a03d38092484df329a5d7a86a4f9f127f84\": container with ID starting with 0463e93a886a189c56684b8e8bcf1a03d38092484df329a5d7a86a4f9f127f84 not found: ID does not exist" containerID="0463e93a886a189c56684b8e8bcf1a03d38092484df329a5d7a86a4f9f127f84" Dec 11 09:02:18 crc kubenswrapper[4881]: I1211 09:02:18.092147 4881 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0463e93a886a189c56684b8e8bcf1a03d38092484df329a5d7a86a4f9f127f84"} err="failed to get container status \"0463e93a886a189c56684b8e8bcf1a03d38092484df329a5d7a86a4f9f127f84\": rpc error: code = NotFound desc = could not find container \"0463e93a886a189c56684b8e8bcf1a03d38092484df329a5d7a86a4f9f127f84\": container with ID starting with 0463e93a886a189c56684b8e8bcf1a03d38092484df329a5d7a86a4f9f127f84 not found: ID does not exist" Dec 11 09:02:19 crc kubenswrapper[4881]: I1211 09:02:19.017797 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="64249227-05ce-47f7-b54a-534e168e1e54" path="/var/lib/kubelet/pods/64249227-05ce-47f7-b54a-534e168e1e54/volumes" Dec 11 09:02:30 crc kubenswrapper[4881]: I1211 09:02:30.126223 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-r42tp"] Dec 11 09:02:30 crc kubenswrapper[4881]: E1211 09:02:30.127272 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="64249227-05ce-47f7-b54a-534e168e1e54" containerName="registry-server" Dec 11 09:02:30 crc kubenswrapper[4881]: I1211 09:02:30.127285 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="64249227-05ce-47f7-b54a-534e168e1e54" containerName="registry-server" Dec 11 09:02:30 crc kubenswrapper[4881]: E1211 09:02:30.127322 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="64249227-05ce-47f7-b54a-534e168e1e54" containerName="extract-content" Dec 11 09:02:30 crc kubenswrapper[4881]: I1211 09:02:30.127329 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="64249227-05ce-47f7-b54a-534e168e1e54" containerName="extract-content" Dec 11 09:02:30 crc kubenswrapper[4881]: E1211 09:02:30.127372 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="64249227-05ce-47f7-b54a-534e168e1e54" containerName="extract-utilities" Dec 11 09:02:30 crc kubenswrapper[4881]: I1211 09:02:30.127379 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="64249227-05ce-47f7-b54a-534e168e1e54" containerName="extract-utilities" Dec 11 09:02:30 crc kubenswrapper[4881]: I1211 09:02:30.130207 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="64249227-05ce-47f7-b54a-534e168e1e54" containerName="registry-server" Dec 11 09:02:30 crc kubenswrapper[4881]: I1211 09:02:30.131993 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-r42tp" Dec 11 09:02:30 crc kubenswrapper[4881]: I1211 09:02:30.152310 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-r42tp"] Dec 11 09:02:30 crc kubenswrapper[4881]: I1211 09:02:30.188993 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bm7cg\" (UniqueName: \"kubernetes.io/projected/c2e04acf-3c88-4eda-99ff-19ca10380e6d-kube-api-access-bm7cg\") pod \"redhat-marketplace-r42tp\" (UID: \"c2e04acf-3c88-4eda-99ff-19ca10380e6d\") " pod="openshift-marketplace/redhat-marketplace-r42tp" Dec 11 09:02:30 crc kubenswrapper[4881]: I1211 09:02:30.189070 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c2e04acf-3c88-4eda-99ff-19ca10380e6d-catalog-content\") pod \"redhat-marketplace-r42tp\" (UID: \"c2e04acf-3c88-4eda-99ff-19ca10380e6d\") " pod="openshift-marketplace/redhat-marketplace-r42tp" Dec 11 09:02:30 crc kubenswrapper[4881]: I1211 09:02:30.189134 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c2e04acf-3c88-4eda-99ff-19ca10380e6d-utilities\") pod \"redhat-marketplace-r42tp\" (UID: \"c2e04acf-3c88-4eda-99ff-19ca10380e6d\") " pod="openshift-marketplace/redhat-marketplace-r42tp" Dec 11 09:02:30 crc kubenswrapper[4881]: I1211 09:02:30.291511 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bm7cg\" (UniqueName: \"kubernetes.io/projected/c2e04acf-3c88-4eda-99ff-19ca10380e6d-kube-api-access-bm7cg\") pod \"redhat-marketplace-r42tp\" (UID: \"c2e04acf-3c88-4eda-99ff-19ca10380e6d\") " pod="openshift-marketplace/redhat-marketplace-r42tp" Dec 11 09:02:30 crc kubenswrapper[4881]: I1211 09:02:30.291579 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c2e04acf-3c88-4eda-99ff-19ca10380e6d-catalog-content\") pod \"redhat-marketplace-r42tp\" (UID: \"c2e04acf-3c88-4eda-99ff-19ca10380e6d\") " pod="openshift-marketplace/redhat-marketplace-r42tp" Dec 11 09:02:30 crc kubenswrapper[4881]: I1211 09:02:30.291639 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c2e04acf-3c88-4eda-99ff-19ca10380e6d-utilities\") pod \"redhat-marketplace-r42tp\" (UID: \"c2e04acf-3c88-4eda-99ff-19ca10380e6d\") " pod="openshift-marketplace/redhat-marketplace-r42tp" Dec 11 09:02:30 crc kubenswrapper[4881]: I1211 09:02:30.292115 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c2e04acf-3c88-4eda-99ff-19ca10380e6d-utilities\") pod \"redhat-marketplace-r42tp\" (UID: \"c2e04acf-3c88-4eda-99ff-19ca10380e6d\") " pod="openshift-marketplace/redhat-marketplace-r42tp" Dec 11 09:02:30 crc kubenswrapper[4881]: I1211 09:02:30.292645 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c2e04acf-3c88-4eda-99ff-19ca10380e6d-catalog-content\") pod \"redhat-marketplace-r42tp\" (UID: \"c2e04acf-3c88-4eda-99ff-19ca10380e6d\") " pod="openshift-marketplace/redhat-marketplace-r42tp" Dec 11 09:02:30 crc kubenswrapper[4881]: I1211 09:02:30.321064 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bm7cg\" (UniqueName: \"kubernetes.io/projected/c2e04acf-3c88-4eda-99ff-19ca10380e6d-kube-api-access-bm7cg\") pod \"redhat-marketplace-r42tp\" (UID: \"c2e04acf-3c88-4eda-99ff-19ca10380e6d\") " pod="openshift-marketplace/redhat-marketplace-r42tp" Dec 11 09:02:30 crc kubenswrapper[4881]: I1211 09:02:30.461568 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-r42tp" Dec 11 09:02:31 crc kubenswrapper[4881]: I1211 09:02:31.083827 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-r42tp"] Dec 11 09:02:31 crc kubenswrapper[4881]: I1211 09:02:31.114105 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-r42tp" event={"ID":"c2e04acf-3c88-4eda-99ff-19ca10380e6d","Type":"ContainerStarted","Data":"33a6015fd9760bb250ba01ecf968bc5f8f0c1dd3b96885ac9aeb9bf8d4906faa"} Dec 11 09:02:32 crc kubenswrapper[4881]: I1211 09:02:32.125712 4881 generic.go:334] "Generic (PLEG): container finished" podID="c2e04acf-3c88-4eda-99ff-19ca10380e6d" containerID="1fcd302d7161f8153e71b25c498651bcec077dc531d853839a7decaa73a1f3c7" exitCode=0 Dec 11 09:02:32 crc kubenswrapper[4881]: I1211 09:02:32.125827 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-r42tp" event={"ID":"c2e04acf-3c88-4eda-99ff-19ca10380e6d","Type":"ContainerDied","Data":"1fcd302d7161f8153e71b25c498651bcec077dc531d853839a7decaa73a1f3c7"} Dec 11 09:02:34 crc kubenswrapper[4881]: I1211 09:02:34.152654 4881 generic.go:334] "Generic (PLEG): container finished" podID="c2e04acf-3c88-4eda-99ff-19ca10380e6d" containerID="b669eeab8db96a04c482158e467b73741ce60946df6ffd352900c22c5dfdb243" exitCode=0 Dec 11 09:02:34 crc kubenswrapper[4881]: I1211 09:02:34.152791 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-r42tp" event={"ID":"c2e04acf-3c88-4eda-99ff-19ca10380e6d","Type":"ContainerDied","Data":"b669eeab8db96a04c482158e467b73741ce60946df6ffd352900c22c5dfdb243"} Dec 11 09:02:35 crc kubenswrapper[4881]: I1211 09:02:35.174763 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-r42tp" event={"ID":"c2e04acf-3c88-4eda-99ff-19ca10380e6d","Type":"ContainerStarted","Data":"4a752e8c1361ba0760b863b1e79f26008b0436205a4c32497d3c466427f5aca7"} Dec 11 09:02:35 crc kubenswrapper[4881]: I1211 09:02:35.202773 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-r42tp" podStartSLOduration=2.752312593 podStartE2EDuration="5.202755281s" podCreationTimestamp="2025-12-11 09:02:30 +0000 UTC" firstStartedPulling="2025-12-11 09:02:32.128682453 +0000 UTC m=+2800.506051150" lastFinishedPulling="2025-12-11 09:02:34.579125141 +0000 UTC m=+2802.956493838" observedRunningTime="2025-12-11 09:02:35.192545463 +0000 UTC m=+2803.569914160" watchObservedRunningTime="2025-12-11 09:02:35.202755281 +0000 UTC m=+2803.580123978" Dec 11 09:02:39 crc kubenswrapper[4881]: I1211 09:02:39.215643 4881 generic.go:334] "Generic (PLEG): container finished" podID="1ad81113-10d1-4110-81ad-abd39146b84c" containerID="189884015e0984650d0da1ddc1b86df1c1e1dbaefdf021ff5c5afcc3d9a5649d" exitCode=0 Dec 11 09:02:39 crc kubenswrapper[4881]: I1211 09:02:39.215697 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-xdlgc" event={"ID":"1ad81113-10d1-4110-81ad-abd39146b84c","Type":"ContainerDied","Data":"189884015e0984650d0da1ddc1b86df1c1e1dbaefdf021ff5c5afcc3d9a5649d"} Dec 11 09:02:40 crc kubenswrapper[4881]: I1211 09:02:40.468701 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-r42tp" Dec 11 09:02:40 crc kubenswrapper[4881]: I1211 09:02:40.469009 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-r42tp" Dec 11 09:02:40 crc kubenswrapper[4881]: I1211 09:02:40.522142 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-r42tp" Dec 11 09:02:40 crc kubenswrapper[4881]: I1211 09:02:40.808998 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-xdlgc" Dec 11 09:02:40 crc kubenswrapper[4881]: I1211 09:02:40.866466 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/1ad81113-10d1-4110-81ad-abd39146b84c-ssh-key\") pod \"1ad81113-10d1-4110-81ad-abd39146b84c\" (UID: \"1ad81113-10d1-4110-81ad-abd39146b84c\") " Dec 11 09:02:40 crc kubenswrapper[4881]: I1211 09:02:40.866708 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gml4c\" (UniqueName: \"kubernetes.io/projected/1ad81113-10d1-4110-81ad-abd39146b84c-kube-api-access-gml4c\") pod \"1ad81113-10d1-4110-81ad-abd39146b84c\" (UID: \"1ad81113-10d1-4110-81ad-abd39146b84c\") " Dec 11 09:02:40 crc kubenswrapper[4881]: I1211 09:02:40.866846 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/1ad81113-10d1-4110-81ad-abd39146b84c-inventory\") pod \"1ad81113-10d1-4110-81ad-abd39146b84c\" (UID: \"1ad81113-10d1-4110-81ad-abd39146b84c\") " Dec 11 09:02:40 crc kubenswrapper[4881]: I1211 09:02:40.867374 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/1ad81113-10d1-4110-81ad-abd39146b84c-libvirt-secret-0\") pod \"1ad81113-10d1-4110-81ad-abd39146b84c\" (UID: \"1ad81113-10d1-4110-81ad-abd39146b84c\") " Dec 11 09:02:40 crc kubenswrapper[4881]: I1211 09:02:40.867423 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1ad81113-10d1-4110-81ad-abd39146b84c-libvirt-combined-ca-bundle\") pod \"1ad81113-10d1-4110-81ad-abd39146b84c\" (UID: \"1ad81113-10d1-4110-81ad-abd39146b84c\") " Dec 11 09:02:40 crc kubenswrapper[4881]: I1211 09:02:40.872504 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1ad81113-10d1-4110-81ad-abd39146b84c-libvirt-combined-ca-bundle" (OuterVolumeSpecName: "libvirt-combined-ca-bundle") pod "1ad81113-10d1-4110-81ad-abd39146b84c" (UID: "1ad81113-10d1-4110-81ad-abd39146b84c"). InnerVolumeSpecName "libvirt-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 09:02:40 crc kubenswrapper[4881]: I1211 09:02:40.872750 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1ad81113-10d1-4110-81ad-abd39146b84c-kube-api-access-gml4c" (OuterVolumeSpecName: "kube-api-access-gml4c") pod "1ad81113-10d1-4110-81ad-abd39146b84c" (UID: "1ad81113-10d1-4110-81ad-abd39146b84c"). InnerVolumeSpecName "kube-api-access-gml4c". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 09:02:40 crc kubenswrapper[4881]: I1211 09:02:40.900152 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1ad81113-10d1-4110-81ad-abd39146b84c-inventory" (OuterVolumeSpecName: "inventory") pod "1ad81113-10d1-4110-81ad-abd39146b84c" (UID: "1ad81113-10d1-4110-81ad-abd39146b84c"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 09:02:40 crc kubenswrapper[4881]: I1211 09:02:40.901369 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1ad81113-10d1-4110-81ad-abd39146b84c-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "1ad81113-10d1-4110-81ad-abd39146b84c" (UID: "1ad81113-10d1-4110-81ad-abd39146b84c"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 09:02:40 crc kubenswrapper[4881]: I1211 09:02:40.906097 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1ad81113-10d1-4110-81ad-abd39146b84c-libvirt-secret-0" (OuterVolumeSpecName: "libvirt-secret-0") pod "1ad81113-10d1-4110-81ad-abd39146b84c" (UID: "1ad81113-10d1-4110-81ad-abd39146b84c"). InnerVolumeSpecName "libvirt-secret-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 09:02:40 crc kubenswrapper[4881]: I1211 09:02:40.970640 4881 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/1ad81113-10d1-4110-81ad-abd39146b84c-inventory\") on node \"crc\" DevicePath \"\"" Dec 11 09:02:40 crc kubenswrapper[4881]: I1211 09:02:40.970696 4881 reconciler_common.go:293] "Volume detached for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/1ad81113-10d1-4110-81ad-abd39146b84c-libvirt-secret-0\") on node \"crc\" DevicePath \"\"" Dec 11 09:02:40 crc kubenswrapper[4881]: I1211 09:02:40.970715 4881 reconciler_common.go:293] "Volume detached for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1ad81113-10d1-4110-81ad-abd39146b84c-libvirt-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 11 09:02:40 crc kubenswrapper[4881]: I1211 09:02:40.970728 4881 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/1ad81113-10d1-4110-81ad-abd39146b84c-ssh-key\") on node \"crc\" DevicePath \"\"" Dec 11 09:02:40 crc kubenswrapper[4881]: I1211 09:02:40.970740 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gml4c\" (UniqueName: \"kubernetes.io/projected/1ad81113-10d1-4110-81ad-abd39146b84c-kube-api-access-gml4c\") on node \"crc\" DevicePath \"\"" Dec 11 09:02:41 crc kubenswrapper[4881]: I1211 09:02:41.236527 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-xdlgc" Dec 11 09:02:41 crc kubenswrapper[4881]: I1211 09:02:41.236549 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-xdlgc" event={"ID":"1ad81113-10d1-4110-81ad-abd39146b84c","Type":"ContainerDied","Data":"839a09b53d580a3a22e167c904d4d6d50bf1d28f7be53fc21dbf525af82fea0a"} Dec 11 09:02:41 crc kubenswrapper[4881]: I1211 09:02:41.236607 4881 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="839a09b53d580a3a22e167c904d4d6d50bf1d28f7be53fc21dbf525af82fea0a" Dec 11 09:02:41 crc kubenswrapper[4881]: I1211 09:02:41.299718 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-r42tp" Dec 11 09:02:41 crc kubenswrapper[4881]: I1211 09:02:41.375515 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-edpm-deployment-openstack-edpm-ipam-hq67p"] Dec 11 09:02:41 crc kubenswrapper[4881]: E1211 09:02:41.376162 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1ad81113-10d1-4110-81ad-abd39146b84c" containerName="libvirt-edpm-deployment-openstack-edpm-ipam" Dec 11 09:02:41 crc kubenswrapper[4881]: I1211 09:02:41.376181 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="1ad81113-10d1-4110-81ad-abd39146b84c" containerName="libvirt-edpm-deployment-openstack-edpm-ipam" Dec 11 09:02:41 crc kubenswrapper[4881]: I1211 09:02:41.376546 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="1ad81113-10d1-4110-81ad-abd39146b84c" containerName="libvirt-edpm-deployment-openstack-edpm-ipam" Dec 11 09:02:41 crc kubenswrapper[4881]: I1211 09:02:41.377622 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-hq67p" Dec 11 09:02:41 crc kubenswrapper[4881]: I1211 09:02:41.401485 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Dec 11 09:02:41 crc kubenswrapper[4881]: I1211 09:02:41.401835 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Dec 11 09:02:41 crc kubenswrapper[4881]: I1211 09:02:41.401486 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-migration-ssh-key" Dec 11 09:02:41 crc kubenswrapper[4881]: I1211 09:02:41.402006 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"nova-extra-config" Dec 11 09:02:41 crc kubenswrapper[4881]: I1211 09:02:41.402041 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-compute-config" Dec 11 09:02:41 crc kubenswrapper[4881]: I1211 09:02:41.403300 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-bnm72" Dec 11 09:02:41 crc kubenswrapper[4881]: I1211 09:02:41.403899 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Dec 11 09:02:41 crc kubenswrapper[4881]: I1211 09:02:41.414744 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-edpm-deployment-openstack-edpm-ipam-hq67p"] Dec 11 09:02:41 crc kubenswrapper[4881]: I1211 09:02:41.427866 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-r42tp"] Dec 11 09:02:41 crc kubenswrapper[4881]: I1211 09:02:41.480991 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7f9v9\" (UniqueName: \"kubernetes.io/projected/84b496b0-b36c-4ece-ba2d-e73423d502cd-kube-api-access-7f9v9\") pod \"nova-edpm-deployment-openstack-edpm-ipam-hq67p\" (UID: \"84b496b0-b36c-4ece-ba2d-e73423d502cd\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-hq67p" Dec 11 09:02:41 crc kubenswrapper[4881]: I1211 09:02:41.481357 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/84b496b0-b36c-4ece-ba2d-e73423d502cd-nova-cell1-compute-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-hq67p\" (UID: \"84b496b0-b36c-4ece-ba2d-e73423d502cd\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-hq67p" Dec 11 09:02:41 crc kubenswrapper[4881]: I1211 09:02:41.481416 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/84b496b0-b36c-4ece-ba2d-e73423d502cd-nova-extra-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-hq67p\" (UID: \"84b496b0-b36c-4ece-ba2d-e73423d502cd\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-hq67p" Dec 11 09:02:41 crc kubenswrapper[4881]: I1211 09:02:41.481621 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/84b496b0-b36c-4ece-ba2d-e73423d502cd-inventory\") pod \"nova-edpm-deployment-openstack-edpm-ipam-hq67p\" (UID: \"84b496b0-b36c-4ece-ba2d-e73423d502cd\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-hq67p" Dec 11 09:02:41 crc kubenswrapper[4881]: I1211 09:02:41.481784 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/84b496b0-b36c-4ece-ba2d-e73423d502cd-nova-migration-ssh-key-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-hq67p\" (UID: \"84b496b0-b36c-4ece-ba2d-e73423d502cd\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-hq67p" Dec 11 09:02:41 crc kubenswrapper[4881]: I1211 09:02:41.482078 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/84b496b0-b36c-4ece-ba2d-e73423d502cd-nova-combined-ca-bundle\") pod \"nova-edpm-deployment-openstack-edpm-ipam-hq67p\" (UID: \"84b496b0-b36c-4ece-ba2d-e73423d502cd\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-hq67p" Dec 11 09:02:41 crc kubenswrapper[4881]: I1211 09:02:41.482219 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/84b496b0-b36c-4ece-ba2d-e73423d502cd-nova-cell1-compute-config-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-hq67p\" (UID: \"84b496b0-b36c-4ece-ba2d-e73423d502cd\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-hq67p" Dec 11 09:02:41 crc kubenswrapper[4881]: I1211 09:02:41.482277 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/84b496b0-b36c-4ece-ba2d-e73423d502cd-ssh-key\") pod \"nova-edpm-deployment-openstack-edpm-ipam-hq67p\" (UID: \"84b496b0-b36c-4ece-ba2d-e73423d502cd\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-hq67p" Dec 11 09:02:41 crc kubenswrapper[4881]: I1211 09:02:41.482369 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/84b496b0-b36c-4ece-ba2d-e73423d502cd-nova-migration-ssh-key-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-hq67p\" (UID: \"84b496b0-b36c-4ece-ba2d-e73423d502cd\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-hq67p" Dec 11 09:02:41 crc kubenswrapper[4881]: I1211 09:02:41.587984 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/84b496b0-b36c-4ece-ba2d-e73423d502cd-nova-combined-ca-bundle\") pod \"nova-edpm-deployment-openstack-edpm-ipam-hq67p\" (UID: \"84b496b0-b36c-4ece-ba2d-e73423d502cd\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-hq67p" Dec 11 09:02:41 crc kubenswrapper[4881]: I1211 09:02:41.588130 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/84b496b0-b36c-4ece-ba2d-e73423d502cd-nova-cell1-compute-config-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-hq67p\" (UID: \"84b496b0-b36c-4ece-ba2d-e73423d502cd\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-hq67p" Dec 11 09:02:41 crc kubenswrapper[4881]: I1211 09:02:41.588166 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/84b496b0-b36c-4ece-ba2d-e73423d502cd-ssh-key\") pod \"nova-edpm-deployment-openstack-edpm-ipam-hq67p\" (UID: \"84b496b0-b36c-4ece-ba2d-e73423d502cd\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-hq67p" Dec 11 09:02:41 crc kubenswrapper[4881]: I1211 09:02:41.588205 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/84b496b0-b36c-4ece-ba2d-e73423d502cd-nova-migration-ssh-key-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-hq67p\" (UID: \"84b496b0-b36c-4ece-ba2d-e73423d502cd\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-hq67p" Dec 11 09:02:41 crc kubenswrapper[4881]: I1211 09:02:41.588266 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7f9v9\" (UniqueName: \"kubernetes.io/projected/84b496b0-b36c-4ece-ba2d-e73423d502cd-kube-api-access-7f9v9\") pod \"nova-edpm-deployment-openstack-edpm-ipam-hq67p\" (UID: \"84b496b0-b36c-4ece-ba2d-e73423d502cd\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-hq67p" Dec 11 09:02:41 crc kubenswrapper[4881]: I1211 09:02:41.588388 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/84b496b0-b36c-4ece-ba2d-e73423d502cd-nova-cell1-compute-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-hq67p\" (UID: \"84b496b0-b36c-4ece-ba2d-e73423d502cd\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-hq67p" Dec 11 09:02:41 crc kubenswrapper[4881]: I1211 09:02:41.588428 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/84b496b0-b36c-4ece-ba2d-e73423d502cd-nova-extra-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-hq67p\" (UID: \"84b496b0-b36c-4ece-ba2d-e73423d502cd\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-hq67p" Dec 11 09:02:41 crc kubenswrapper[4881]: I1211 09:02:41.588487 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/84b496b0-b36c-4ece-ba2d-e73423d502cd-inventory\") pod \"nova-edpm-deployment-openstack-edpm-ipam-hq67p\" (UID: \"84b496b0-b36c-4ece-ba2d-e73423d502cd\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-hq67p" Dec 11 09:02:41 crc kubenswrapper[4881]: I1211 09:02:41.588567 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/84b496b0-b36c-4ece-ba2d-e73423d502cd-nova-migration-ssh-key-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-hq67p\" (UID: \"84b496b0-b36c-4ece-ba2d-e73423d502cd\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-hq67p" Dec 11 09:02:41 crc kubenswrapper[4881]: I1211 09:02:41.589457 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/84b496b0-b36c-4ece-ba2d-e73423d502cd-nova-extra-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-hq67p\" (UID: \"84b496b0-b36c-4ece-ba2d-e73423d502cd\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-hq67p" Dec 11 09:02:41 crc kubenswrapper[4881]: I1211 09:02:41.594109 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/84b496b0-b36c-4ece-ba2d-e73423d502cd-nova-cell1-compute-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-hq67p\" (UID: \"84b496b0-b36c-4ece-ba2d-e73423d502cd\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-hq67p" Dec 11 09:02:41 crc kubenswrapper[4881]: I1211 09:02:41.594131 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/84b496b0-b36c-4ece-ba2d-e73423d502cd-nova-combined-ca-bundle\") pod \"nova-edpm-deployment-openstack-edpm-ipam-hq67p\" (UID: \"84b496b0-b36c-4ece-ba2d-e73423d502cd\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-hq67p" Dec 11 09:02:41 crc kubenswrapper[4881]: I1211 09:02:41.594174 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/84b496b0-b36c-4ece-ba2d-e73423d502cd-inventory\") pod \"nova-edpm-deployment-openstack-edpm-ipam-hq67p\" (UID: \"84b496b0-b36c-4ece-ba2d-e73423d502cd\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-hq67p" Dec 11 09:02:41 crc kubenswrapper[4881]: I1211 09:02:41.595198 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/84b496b0-b36c-4ece-ba2d-e73423d502cd-nova-migration-ssh-key-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-hq67p\" (UID: \"84b496b0-b36c-4ece-ba2d-e73423d502cd\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-hq67p" Dec 11 09:02:41 crc kubenswrapper[4881]: I1211 09:02:41.595671 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/84b496b0-b36c-4ece-ba2d-e73423d502cd-ssh-key\") pod \"nova-edpm-deployment-openstack-edpm-ipam-hq67p\" (UID: \"84b496b0-b36c-4ece-ba2d-e73423d502cd\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-hq67p" Dec 11 09:02:41 crc kubenswrapper[4881]: I1211 09:02:41.596954 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/84b496b0-b36c-4ece-ba2d-e73423d502cd-nova-migration-ssh-key-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-hq67p\" (UID: \"84b496b0-b36c-4ece-ba2d-e73423d502cd\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-hq67p" Dec 11 09:02:41 crc kubenswrapper[4881]: I1211 09:02:41.603567 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/84b496b0-b36c-4ece-ba2d-e73423d502cd-nova-cell1-compute-config-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-hq67p\" (UID: \"84b496b0-b36c-4ece-ba2d-e73423d502cd\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-hq67p" Dec 11 09:02:41 crc kubenswrapper[4881]: I1211 09:02:41.604103 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7f9v9\" (UniqueName: \"kubernetes.io/projected/84b496b0-b36c-4ece-ba2d-e73423d502cd-kube-api-access-7f9v9\") pod \"nova-edpm-deployment-openstack-edpm-ipam-hq67p\" (UID: \"84b496b0-b36c-4ece-ba2d-e73423d502cd\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-hq67p" Dec 11 09:02:41 crc kubenswrapper[4881]: I1211 09:02:41.727520 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-hq67p" Dec 11 09:02:42 crc kubenswrapper[4881]: I1211 09:02:42.337879 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-edpm-deployment-openstack-edpm-ipam-hq67p"] Dec 11 09:02:43 crc kubenswrapper[4881]: I1211 09:02:43.260979 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-hq67p" event={"ID":"84b496b0-b36c-4ece-ba2d-e73423d502cd","Type":"ContainerStarted","Data":"8cde8be8bfd707f0d42581eb7394bc0dcc3d49a1f2337b8773d2fe24278bd720"} Dec 11 09:02:43 crc kubenswrapper[4881]: I1211 09:02:43.261148 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-r42tp" podUID="c2e04acf-3c88-4eda-99ff-19ca10380e6d" containerName="registry-server" containerID="cri-o://4a752e8c1361ba0760b863b1e79f26008b0436205a4c32497d3c466427f5aca7" gracePeriod=2 Dec 11 09:02:43 crc kubenswrapper[4881]: I1211 09:02:43.264247 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-hq67p" event={"ID":"84b496b0-b36c-4ece-ba2d-e73423d502cd","Type":"ContainerStarted","Data":"845e7ff82816c408bfeddb7a497aa993f7a9cabd6e417cd72a0af955ab32e357"} Dec 11 09:02:43 crc kubenswrapper[4881]: I1211 09:02:43.280136 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-hq67p" podStartSLOduration=1.809364042 podStartE2EDuration="2.280114451s" podCreationTimestamp="2025-12-11 09:02:41 +0000 UTC" firstStartedPulling="2025-12-11 09:02:42.33852715 +0000 UTC m=+2810.715895847" lastFinishedPulling="2025-12-11 09:02:42.809277559 +0000 UTC m=+2811.186646256" observedRunningTime="2025-12-11 09:02:43.27603042 +0000 UTC m=+2811.653399117" watchObservedRunningTime="2025-12-11 09:02:43.280114451 +0000 UTC m=+2811.657483148" Dec 11 09:02:43 crc kubenswrapper[4881]: I1211 09:02:43.781912 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-r42tp" Dec 11 09:02:43 crc kubenswrapper[4881]: I1211 09:02:43.851520 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c2e04acf-3c88-4eda-99ff-19ca10380e6d-utilities\") pod \"c2e04acf-3c88-4eda-99ff-19ca10380e6d\" (UID: \"c2e04acf-3c88-4eda-99ff-19ca10380e6d\") " Dec 11 09:02:43 crc kubenswrapper[4881]: I1211 09:02:43.851855 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bm7cg\" (UniqueName: \"kubernetes.io/projected/c2e04acf-3c88-4eda-99ff-19ca10380e6d-kube-api-access-bm7cg\") pod \"c2e04acf-3c88-4eda-99ff-19ca10380e6d\" (UID: \"c2e04acf-3c88-4eda-99ff-19ca10380e6d\") " Dec 11 09:02:43 crc kubenswrapper[4881]: I1211 09:02:43.852740 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c2e04acf-3c88-4eda-99ff-19ca10380e6d-catalog-content\") pod \"c2e04acf-3c88-4eda-99ff-19ca10380e6d\" (UID: \"c2e04acf-3c88-4eda-99ff-19ca10380e6d\") " Dec 11 09:02:43 crc kubenswrapper[4881]: I1211 09:02:43.856808 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c2e04acf-3c88-4eda-99ff-19ca10380e6d-kube-api-access-bm7cg" (OuterVolumeSpecName: "kube-api-access-bm7cg") pod "c2e04acf-3c88-4eda-99ff-19ca10380e6d" (UID: "c2e04acf-3c88-4eda-99ff-19ca10380e6d"). InnerVolumeSpecName "kube-api-access-bm7cg". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 09:02:43 crc kubenswrapper[4881]: I1211 09:02:43.866549 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c2e04acf-3c88-4eda-99ff-19ca10380e6d-utilities" (OuterVolumeSpecName: "utilities") pod "c2e04acf-3c88-4eda-99ff-19ca10380e6d" (UID: "c2e04acf-3c88-4eda-99ff-19ca10380e6d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 09:02:43 crc kubenswrapper[4881]: I1211 09:02:43.942921 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c2e04acf-3c88-4eda-99ff-19ca10380e6d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "c2e04acf-3c88-4eda-99ff-19ca10380e6d" (UID: "c2e04acf-3c88-4eda-99ff-19ca10380e6d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 09:02:43 crc kubenswrapper[4881]: I1211 09:02:43.955880 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bm7cg\" (UniqueName: \"kubernetes.io/projected/c2e04acf-3c88-4eda-99ff-19ca10380e6d-kube-api-access-bm7cg\") on node \"crc\" DevicePath \"\"" Dec 11 09:02:43 crc kubenswrapper[4881]: I1211 09:02:43.955932 4881 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c2e04acf-3c88-4eda-99ff-19ca10380e6d-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 11 09:02:43 crc kubenswrapper[4881]: I1211 09:02:43.955947 4881 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c2e04acf-3c88-4eda-99ff-19ca10380e6d-utilities\") on node \"crc\" DevicePath \"\"" Dec 11 09:02:44 crc kubenswrapper[4881]: I1211 09:02:44.340915 4881 generic.go:334] "Generic (PLEG): container finished" podID="c2e04acf-3c88-4eda-99ff-19ca10380e6d" containerID="4a752e8c1361ba0760b863b1e79f26008b0436205a4c32497d3c466427f5aca7" exitCode=0 Dec 11 09:02:44 crc kubenswrapper[4881]: I1211 09:02:44.342285 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-r42tp" Dec 11 09:02:44 crc kubenswrapper[4881]: I1211 09:02:44.343444 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-r42tp" event={"ID":"c2e04acf-3c88-4eda-99ff-19ca10380e6d","Type":"ContainerDied","Data":"4a752e8c1361ba0760b863b1e79f26008b0436205a4c32497d3c466427f5aca7"} Dec 11 09:02:44 crc kubenswrapper[4881]: I1211 09:02:44.343519 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-r42tp" event={"ID":"c2e04acf-3c88-4eda-99ff-19ca10380e6d","Type":"ContainerDied","Data":"33a6015fd9760bb250ba01ecf968bc5f8f0c1dd3b96885ac9aeb9bf8d4906faa"} Dec 11 09:02:44 crc kubenswrapper[4881]: I1211 09:02:44.343540 4881 scope.go:117] "RemoveContainer" containerID="4a752e8c1361ba0760b863b1e79f26008b0436205a4c32497d3c466427f5aca7" Dec 11 09:02:44 crc kubenswrapper[4881]: I1211 09:02:44.384028 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-r42tp"] Dec 11 09:02:44 crc kubenswrapper[4881]: I1211 09:02:44.397746 4881 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-r42tp"] Dec 11 09:02:44 crc kubenswrapper[4881]: I1211 09:02:44.413412 4881 scope.go:117] "RemoveContainer" containerID="b669eeab8db96a04c482158e467b73741ce60946df6ffd352900c22c5dfdb243" Dec 11 09:02:44 crc kubenswrapper[4881]: I1211 09:02:44.441199 4881 scope.go:117] "RemoveContainer" containerID="1fcd302d7161f8153e71b25c498651bcec077dc531d853839a7decaa73a1f3c7" Dec 11 09:02:44 crc kubenswrapper[4881]: I1211 09:02:44.486913 4881 scope.go:117] "RemoveContainer" containerID="4a752e8c1361ba0760b863b1e79f26008b0436205a4c32497d3c466427f5aca7" Dec 11 09:02:44 crc kubenswrapper[4881]: E1211 09:02:44.487474 4881 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4a752e8c1361ba0760b863b1e79f26008b0436205a4c32497d3c466427f5aca7\": container with ID starting with 4a752e8c1361ba0760b863b1e79f26008b0436205a4c32497d3c466427f5aca7 not found: ID does not exist" containerID="4a752e8c1361ba0760b863b1e79f26008b0436205a4c32497d3c466427f5aca7" Dec 11 09:02:44 crc kubenswrapper[4881]: I1211 09:02:44.487530 4881 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4a752e8c1361ba0760b863b1e79f26008b0436205a4c32497d3c466427f5aca7"} err="failed to get container status \"4a752e8c1361ba0760b863b1e79f26008b0436205a4c32497d3c466427f5aca7\": rpc error: code = NotFound desc = could not find container \"4a752e8c1361ba0760b863b1e79f26008b0436205a4c32497d3c466427f5aca7\": container with ID starting with 4a752e8c1361ba0760b863b1e79f26008b0436205a4c32497d3c466427f5aca7 not found: ID does not exist" Dec 11 09:02:44 crc kubenswrapper[4881]: I1211 09:02:44.487562 4881 scope.go:117] "RemoveContainer" containerID="b669eeab8db96a04c482158e467b73741ce60946df6ffd352900c22c5dfdb243" Dec 11 09:02:44 crc kubenswrapper[4881]: E1211 09:02:44.488031 4881 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b669eeab8db96a04c482158e467b73741ce60946df6ffd352900c22c5dfdb243\": container with ID starting with b669eeab8db96a04c482158e467b73741ce60946df6ffd352900c22c5dfdb243 not found: ID does not exist" containerID="b669eeab8db96a04c482158e467b73741ce60946df6ffd352900c22c5dfdb243" Dec 11 09:02:44 crc kubenswrapper[4881]: I1211 09:02:44.488148 4881 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b669eeab8db96a04c482158e467b73741ce60946df6ffd352900c22c5dfdb243"} err="failed to get container status \"b669eeab8db96a04c482158e467b73741ce60946df6ffd352900c22c5dfdb243\": rpc error: code = NotFound desc = could not find container \"b669eeab8db96a04c482158e467b73741ce60946df6ffd352900c22c5dfdb243\": container with ID starting with b669eeab8db96a04c482158e467b73741ce60946df6ffd352900c22c5dfdb243 not found: ID does not exist" Dec 11 09:02:44 crc kubenswrapper[4881]: I1211 09:02:44.488239 4881 scope.go:117] "RemoveContainer" containerID="1fcd302d7161f8153e71b25c498651bcec077dc531d853839a7decaa73a1f3c7" Dec 11 09:02:44 crc kubenswrapper[4881]: E1211 09:02:44.488735 4881 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1fcd302d7161f8153e71b25c498651bcec077dc531d853839a7decaa73a1f3c7\": container with ID starting with 1fcd302d7161f8153e71b25c498651bcec077dc531d853839a7decaa73a1f3c7 not found: ID does not exist" containerID="1fcd302d7161f8153e71b25c498651bcec077dc531d853839a7decaa73a1f3c7" Dec 11 09:02:44 crc kubenswrapper[4881]: I1211 09:02:44.488759 4881 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1fcd302d7161f8153e71b25c498651bcec077dc531d853839a7decaa73a1f3c7"} err="failed to get container status \"1fcd302d7161f8153e71b25c498651bcec077dc531d853839a7decaa73a1f3c7\": rpc error: code = NotFound desc = could not find container \"1fcd302d7161f8153e71b25c498651bcec077dc531d853839a7decaa73a1f3c7\": container with ID starting with 1fcd302d7161f8153e71b25c498651bcec077dc531d853839a7decaa73a1f3c7 not found: ID does not exist" Dec 11 09:02:45 crc kubenswrapper[4881]: I1211 09:02:45.018913 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c2e04acf-3c88-4eda-99ff-19ca10380e6d" path="/var/lib/kubelet/pods/c2e04acf-3c88-4eda-99ff-19ca10380e6d/volumes" Dec 11 09:02:59 crc kubenswrapper[4881]: I1211 09:02:59.397111 4881 patch_prober.go:28] interesting pod/machine-config-daemon-z9nnh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 11 09:02:59 crc kubenswrapper[4881]: I1211 09:02:59.397603 4881 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 11 09:03:29 crc kubenswrapper[4881]: I1211 09:03:29.396879 4881 patch_prober.go:28] interesting pod/machine-config-daemon-z9nnh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 11 09:03:29 crc kubenswrapper[4881]: I1211 09:03:29.397320 4881 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 11 09:03:59 crc kubenswrapper[4881]: I1211 09:03:59.397591 4881 patch_prober.go:28] interesting pod/machine-config-daemon-z9nnh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 11 09:03:59 crc kubenswrapper[4881]: I1211 09:03:59.398245 4881 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 11 09:03:59 crc kubenswrapper[4881]: I1211 09:03:59.398305 4881 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" Dec 11 09:03:59 crc kubenswrapper[4881]: I1211 09:03:59.399236 4881 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"e510f860249c2369bd34a09d0c999610be3cd247e6b9eafae5e3404be20264de"} pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Dec 11 09:03:59 crc kubenswrapper[4881]: I1211 09:03:59.399287 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" containerName="machine-config-daemon" containerID="cri-o://e510f860249c2369bd34a09d0c999610be3cd247e6b9eafae5e3404be20264de" gracePeriod=600 Dec 11 09:04:00 crc kubenswrapper[4881]: I1211 09:04:00.175719 4881 generic.go:334] "Generic (PLEG): container finished" podID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" containerID="e510f860249c2369bd34a09d0c999610be3cd247e6b9eafae5e3404be20264de" exitCode=0 Dec 11 09:04:00 crc kubenswrapper[4881]: I1211 09:04:00.176323 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" event={"ID":"56d69133-af36-4cbd-af7d-3a58cc4dd8ca","Type":"ContainerDied","Data":"e510f860249c2369bd34a09d0c999610be3cd247e6b9eafae5e3404be20264de"} Dec 11 09:04:00 crc kubenswrapper[4881]: I1211 09:04:00.176397 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" event={"ID":"56d69133-af36-4cbd-af7d-3a58cc4dd8ca","Type":"ContainerStarted","Data":"003d013ffd5b740337ff0333759338b1b3fbe344072684ee96e763f5cd66a8c1"} Dec 11 09:04:00 crc kubenswrapper[4881]: I1211 09:04:00.176422 4881 scope.go:117] "RemoveContainer" containerID="2c1b273a42ceaf987004a76e656f9e43f9531662645d97e9202713754e0053d2" Dec 11 09:04:07 crc kubenswrapper[4881]: I1211 09:04:07.063802 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-prnd8"] Dec 11 09:04:07 crc kubenswrapper[4881]: E1211 09:04:07.065933 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c2e04acf-3c88-4eda-99ff-19ca10380e6d" containerName="registry-server" Dec 11 09:04:07 crc kubenswrapper[4881]: I1211 09:04:07.066033 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="c2e04acf-3c88-4eda-99ff-19ca10380e6d" containerName="registry-server" Dec 11 09:04:07 crc kubenswrapper[4881]: E1211 09:04:07.066130 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c2e04acf-3c88-4eda-99ff-19ca10380e6d" containerName="extract-content" Dec 11 09:04:07 crc kubenswrapper[4881]: I1211 09:04:07.066209 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="c2e04acf-3c88-4eda-99ff-19ca10380e6d" containerName="extract-content" Dec 11 09:04:07 crc kubenswrapper[4881]: E1211 09:04:07.066622 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c2e04acf-3c88-4eda-99ff-19ca10380e6d" containerName="extract-utilities" Dec 11 09:04:07 crc kubenswrapper[4881]: I1211 09:04:07.066719 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="c2e04acf-3c88-4eda-99ff-19ca10380e6d" containerName="extract-utilities" Dec 11 09:04:07 crc kubenswrapper[4881]: I1211 09:04:07.067076 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="c2e04acf-3c88-4eda-99ff-19ca10380e6d" containerName="registry-server" Dec 11 09:04:07 crc kubenswrapper[4881]: I1211 09:04:07.069268 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-prnd8" Dec 11 09:04:07 crc kubenswrapper[4881]: I1211 09:04:07.087996 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-prnd8"] Dec 11 09:04:07 crc kubenswrapper[4881]: I1211 09:04:07.168154 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4b4ebe53-8b8a-4bdd-847f-cf220f3789aa-catalog-content\") pod \"certified-operators-prnd8\" (UID: \"4b4ebe53-8b8a-4bdd-847f-cf220f3789aa\") " pod="openshift-marketplace/certified-operators-prnd8" Dec 11 09:04:07 crc kubenswrapper[4881]: I1211 09:04:07.168470 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w4nph\" (UniqueName: \"kubernetes.io/projected/4b4ebe53-8b8a-4bdd-847f-cf220f3789aa-kube-api-access-w4nph\") pod \"certified-operators-prnd8\" (UID: \"4b4ebe53-8b8a-4bdd-847f-cf220f3789aa\") " pod="openshift-marketplace/certified-operators-prnd8" Dec 11 09:04:07 crc kubenswrapper[4881]: I1211 09:04:07.169041 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4b4ebe53-8b8a-4bdd-847f-cf220f3789aa-utilities\") pod \"certified-operators-prnd8\" (UID: \"4b4ebe53-8b8a-4bdd-847f-cf220f3789aa\") " pod="openshift-marketplace/certified-operators-prnd8" Dec 11 09:04:07 crc kubenswrapper[4881]: I1211 09:04:07.271954 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4b4ebe53-8b8a-4bdd-847f-cf220f3789aa-utilities\") pod \"certified-operators-prnd8\" (UID: \"4b4ebe53-8b8a-4bdd-847f-cf220f3789aa\") " pod="openshift-marketplace/certified-operators-prnd8" Dec 11 09:04:07 crc kubenswrapper[4881]: I1211 09:04:07.272039 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4b4ebe53-8b8a-4bdd-847f-cf220f3789aa-catalog-content\") pod \"certified-operators-prnd8\" (UID: \"4b4ebe53-8b8a-4bdd-847f-cf220f3789aa\") " pod="openshift-marketplace/certified-operators-prnd8" Dec 11 09:04:07 crc kubenswrapper[4881]: I1211 09:04:07.272123 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w4nph\" (UniqueName: \"kubernetes.io/projected/4b4ebe53-8b8a-4bdd-847f-cf220f3789aa-kube-api-access-w4nph\") pod \"certified-operators-prnd8\" (UID: \"4b4ebe53-8b8a-4bdd-847f-cf220f3789aa\") " pod="openshift-marketplace/certified-operators-prnd8" Dec 11 09:04:07 crc kubenswrapper[4881]: I1211 09:04:07.272535 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4b4ebe53-8b8a-4bdd-847f-cf220f3789aa-utilities\") pod \"certified-operators-prnd8\" (UID: \"4b4ebe53-8b8a-4bdd-847f-cf220f3789aa\") " pod="openshift-marketplace/certified-operators-prnd8" Dec 11 09:04:07 crc kubenswrapper[4881]: I1211 09:04:07.272711 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4b4ebe53-8b8a-4bdd-847f-cf220f3789aa-catalog-content\") pod \"certified-operators-prnd8\" (UID: \"4b4ebe53-8b8a-4bdd-847f-cf220f3789aa\") " pod="openshift-marketplace/certified-operators-prnd8" Dec 11 09:04:07 crc kubenswrapper[4881]: I1211 09:04:07.293033 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w4nph\" (UniqueName: \"kubernetes.io/projected/4b4ebe53-8b8a-4bdd-847f-cf220f3789aa-kube-api-access-w4nph\") pod \"certified-operators-prnd8\" (UID: \"4b4ebe53-8b8a-4bdd-847f-cf220f3789aa\") " pod="openshift-marketplace/certified-operators-prnd8" Dec 11 09:04:07 crc kubenswrapper[4881]: I1211 09:04:07.442559 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-prnd8" Dec 11 09:04:07 crc kubenswrapper[4881]: I1211 09:04:07.968598 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-prnd8"] Dec 11 09:04:08 crc kubenswrapper[4881]: I1211 09:04:08.268430 4881 generic.go:334] "Generic (PLEG): container finished" podID="4b4ebe53-8b8a-4bdd-847f-cf220f3789aa" containerID="273d5f6c654716c80afe55566d6bab93f187dd8f4f8ccaf628e832a8b61596f3" exitCode=0 Dec 11 09:04:08 crc kubenswrapper[4881]: I1211 09:04:08.268475 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-prnd8" event={"ID":"4b4ebe53-8b8a-4bdd-847f-cf220f3789aa","Type":"ContainerDied","Data":"273d5f6c654716c80afe55566d6bab93f187dd8f4f8ccaf628e832a8b61596f3"} Dec 11 09:04:08 crc kubenswrapper[4881]: I1211 09:04:08.268500 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-prnd8" event={"ID":"4b4ebe53-8b8a-4bdd-847f-cf220f3789aa","Type":"ContainerStarted","Data":"7c1f78da93da88d48968c3ffbfcbeb9691d616c0824f4ea01eced635ae84c0ba"} Dec 11 09:04:08 crc kubenswrapper[4881]: I1211 09:04:08.271684 4881 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Dec 11 09:04:09 crc kubenswrapper[4881]: I1211 09:04:09.282925 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-prnd8" event={"ID":"4b4ebe53-8b8a-4bdd-847f-cf220f3789aa","Type":"ContainerStarted","Data":"bf50d77798f889de6086f569b3d516dd007b9654b3776958f626dac494a5c787"} Dec 11 09:04:10 crc kubenswrapper[4881]: I1211 09:04:10.296663 4881 generic.go:334] "Generic (PLEG): container finished" podID="4b4ebe53-8b8a-4bdd-847f-cf220f3789aa" containerID="bf50d77798f889de6086f569b3d516dd007b9654b3776958f626dac494a5c787" exitCode=0 Dec 11 09:04:10 crc kubenswrapper[4881]: I1211 09:04:10.296759 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-prnd8" event={"ID":"4b4ebe53-8b8a-4bdd-847f-cf220f3789aa","Type":"ContainerDied","Data":"bf50d77798f889de6086f569b3d516dd007b9654b3776958f626dac494a5c787"} Dec 11 09:04:14 crc kubenswrapper[4881]: I1211 09:04:14.347271 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-prnd8" event={"ID":"4b4ebe53-8b8a-4bdd-847f-cf220f3789aa","Type":"ContainerStarted","Data":"ad2d00c4e5c53e9186590b2cbd8f8598a2b2ab9642b16b939f876b31efd8585f"} Dec 11 09:04:14 crc kubenswrapper[4881]: I1211 09:04:14.416253 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-prnd8" podStartSLOduration=1.860253685 podStartE2EDuration="7.416232s" podCreationTimestamp="2025-12-11 09:04:07 +0000 UTC" firstStartedPulling="2025-12-11 09:04:08.271466221 +0000 UTC m=+2896.648834918" lastFinishedPulling="2025-12-11 09:04:13.827444546 +0000 UTC m=+2902.204813233" observedRunningTime="2025-12-11 09:04:14.403276923 +0000 UTC m=+2902.780645640" watchObservedRunningTime="2025-12-11 09:04:14.416232 +0000 UTC m=+2902.793600697" Dec 11 09:04:17 crc kubenswrapper[4881]: I1211 09:04:17.443610 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-prnd8" Dec 11 09:04:17 crc kubenswrapper[4881]: I1211 09:04:17.444096 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-prnd8" Dec 11 09:04:17 crc kubenswrapper[4881]: I1211 09:04:17.494682 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-prnd8" Dec 11 09:04:27 crc kubenswrapper[4881]: I1211 09:04:27.495632 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-prnd8" Dec 11 09:04:27 crc kubenswrapper[4881]: I1211 09:04:27.548665 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-prnd8"] Dec 11 09:04:28 crc kubenswrapper[4881]: I1211 09:04:28.505446 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-prnd8" podUID="4b4ebe53-8b8a-4bdd-847f-cf220f3789aa" containerName="registry-server" containerID="cri-o://ad2d00c4e5c53e9186590b2cbd8f8598a2b2ab9642b16b939f876b31efd8585f" gracePeriod=2 Dec 11 09:04:29 crc kubenswrapper[4881]: I1211 09:04:29.118214 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-prnd8" Dec 11 09:04:29 crc kubenswrapper[4881]: I1211 09:04:29.243713 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4b4ebe53-8b8a-4bdd-847f-cf220f3789aa-utilities\") pod \"4b4ebe53-8b8a-4bdd-847f-cf220f3789aa\" (UID: \"4b4ebe53-8b8a-4bdd-847f-cf220f3789aa\") " Dec 11 09:04:29 crc kubenswrapper[4881]: I1211 09:04:29.243796 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4b4ebe53-8b8a-4bdd-847f-cf220f3789aa-catalog-content\") pod \"4b4ebe53-8b8a-4bdd-847f-cf220f3789aa\" (UID: \"4b4ebe53-8b8a-4bdd-847f-cf220f3789aa\") " Dec 11 09:04:29 crc kubenswrapper[4881]: I1211 09:04:29.243875 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w4nph\" (UniqueName: \"kubernetes.io/projected/4b4ebe53-8b8a-4bdd-847f-cf220f3789aa-kube-api-access-w4nph\") pod \"4b4ebe53-8b8a-4bdd-847f-cf220f3789aa\" (UID: \"4b4ebe53-8b8a-4bdd-847f-cf220f3789aa\") " Dec 11 09:04:29 crc kubenswrapper[4881]: I1211 09:04:29.245379 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4b4ebe53-8b8a-4bdd-847f-cf220f3789aa-utilities" (OuterVolumeSpecName: "utilities") pod "4b4ebe53-8b8a-4bdd-847f-cf220f3789aa" (UID: "4b4ebe53-8b8a-4bdd-847f-cf220f3789aa"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 09:04:29 crc kubenswrapper[4881]: I1211 09:04:29.257999 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4b4ebe53-8b8a-4bdd-847f-cf220f3789aa-kube-api-access-w4nph" (OuterVolumeSpecName: "kube-api-access-w4nph") pod "4b4ebe53-8b8a-4bdd-847f-cf220f3789aa" (UID: "4b4ebe53-8b8a-4bdd-847f-cf220f3789aa"). InnerVolumeSpecName "kube-api-access-w4nph". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 09:04:29 crc kubenswrapper[4881]: I1211 09:04:29.335359 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4b4ebe53-8b8a-4bdd-847f-cf220f3789aa-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "4b4ebe53-8b8a-4bdd-847f-cf220f3789aa" (UID: "4b4ebe53-8b8a-4bdd-847f-cf220f3789aa"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 09:04:29 crc kubenswrapper[4881]: I1211 09:04:29.347530 4881 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4b4ebe53-8b8a-4bdd-847f-cf220f3789aa-utilities\") on node \"crc\" DevicePath \"\"" Dec 11 09:04:29 crc kubenswrapper[4881]: I1211 09:04:29.347570 4881 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4b4ebe53-8b8a-4bdd-847f-cf220f3789aa-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 11 09:04:29 crc kubenswrapper[4881]: I1211 09:04:29.347586 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w4nph\" (UniqueName: \"kubernetes.io/projected/4b4ebe53-8b8a-4bdd-847f-cf220f3789aa-kube-api-access-w4nph\") on node \"crc\" DevicePath \"\"" Dec 11 09:04:29 crc kubenswrapper[4881]: I1211 09:04:29.517387 4881 generic.go:334] "Generic (PLEG): container finished" podID="4b4ebe53-8b8a-4bdd-847f-cf220f3789aa" containerID="ad2d00c4e5c53e9186590b2cbd8f8598a2b2ab9642b16b939f876b31efd8585f" exitCode=0 Dec 11 09:04:29 crc kubenswrapper[4881]: I1211 09:04:29.517430 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-prnd8" event={"ID":"4b4ebe53-8b8a-4bdd-847f-cf220f3789aa","Type":"ContainerDied","Data":"ad2d00c4e5c53e9186590b2cbd8f8598a2b2ab9642b16b939f876b31efd8585f"} Dec 11 09:04:29 crc kubenswrapper[4881]: I1211 09:04:29.517491 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-prnd8" event={"ID":"4b4ebe53-8b8a-4bdd-847f-cf220f3789aa","Type":"ContainerDied","Data":"7c1f78da93da88d48968c3ffbfcbeb9691d616c0824f4ea01eced635ae84c0ba"} Dec 11 09:04:29 crc kubenswrapper[4881]: I1211 09:04:29.517513 4881 scope.go:117] "RemoveContainer" containerID="ad2d00c4e5c53e9186590b2cbd8f8598a2b2ab9642b16b939f876b31efd8585f" Dec 11 09:04:29 crc kubenswrapper[4881]: I1211 09:04:29.517442 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-prnd8" Dec 11 09:04:29 crc kubenswrapper[4881]: I1211 09:04:29.556590 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-prnd8"] Dec 11 09:04:29 crc kubenswrapper[4881]: I1211 09:04:29.556618 4881 scope.go:117] "RemoveContainer" containerID="bf50d77798f889de6086f569b3d516dd007b9654b3776958f626dac494a5c787" Dec 11 09:04:29 crc kubenswrapper[4881]: I1211 09:04:29.575788 4881 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-prnd8"] Dec 11 09:04:29 crc kubenswrapper[4881]: I1211 09:04:29.590813 4881 scope.go:117] "RemoveContainer" containerID="273d5f6c654716c80afe55566d6bab93f187dd8f4f8ccaf628e832a8b61596f3" Dec 11 09:04:29 crc kubenswrapper[4881]: I1211 09:04:29.647071 4881 scope.go:117] "RemoveContainer" containerID="ad2d00c4e5c53e9186590b2cbd8f8598a2b2ab9642b16b939f876b31efd8585f" Dec 11 09:04:29 crc kubenswrapper[4881]: E1211 09:04:29.647882 4881 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ad2d00c4e5c53e9186590b2cbd8f8598a2b2ab9642b16b939f876b31efd8585f\": container with ID starting with ad2d00c4e5c53e9186590b2cbd8f8598a2b2ab9642b16b939f876b31efd8585f not found: ID does not exist" containerID="ad2d00c4e5c53e9186590b2cbd8f8598a2b2ab9642b16b939f876b31efd8585f" Dec 11 09:04:29 crc kubenswrapper[4881]: I1211 09:04:29.647931 4881 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ad2d00c4e5c53e9186590b2cbd8f8598a2b2ab9642b16b939f876b31efd8585f"} err="failed to get container status \"ad2d00c4e5c53e9186590b2cbd8f8598a2b2ab9642b16b939f876b31efd8585f\": rpc error: code = NotFound desc = could not find container \"ad2d00c4e5c53e9186590b2cbd8f8598a2b2ab9642b16b939f876b31efd8585f\": container with ID starting with ad2d00c4e5c53e9186590b2cbd8f8598a2b2ab9642b16b939f876b31efd8585f not found: ID does not exist" Dec 11 09:04:29 crc kubenswrapper[4881]: I1211 09:04:29.647962 4881 scope.go:117] "RemoveContainer" containerID="bf50d77798f889de6086f569b3d516dd007b9654b3776958f626dac494a5c787" Dec 11 09:04:29 crc kubenswrapper[4881]: E1211 09:04:29.648384 4881 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bf50d77798f889de6086f569b3d516dd007b9654b3776958f626dac494a5c787\": container with ID starting with bf50d77798f889de6086f569b3d516dd007b9654b3776958f626dac494a5c787 not found: ID does not exist" containerID="bf50d77798f889de6086f569b3d516dd007b9654b3776958f626dac494a5c787" Dec 11 09:04:29 crc kubenswrapper[4881]: I1211 09:04:29.648415 4881 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bf50d77798f889de6086f569b3d516dd007b9654b3776958f626dac494a5c787"} err="failed to get container status \"bf50d77798f889de6086f569b3d516dd007b9654b3776958f626dac494a5c787\": rpc error: code = NotFound desc = could not find container \"bf50d77798f889de6086f569b3d516dd007b9654b3776958f626dac494a5c787\": container with ID starting with bf50d77798f889de6086f569b3d516dd007b9654b3776958f626dac494a5c787 not found: ID does not exist" Dec 11 09:04:29 crc kubenswrapper[4881]: I1211 09:04:29.648439 4881 scope.go:117] "RemoveContainer" containerID="273d5f6c654716c80afe55566d6bab93f187dd8f4f8ccaf628e832a8b61596f3" Dec 11 09:04:29 crc kubenswrapper[4881]: E1211 09:04:29.648805 4881 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"273d5f6c654716c80afe55566d6bab93f187dd8f4f8ccaf628e832a8b61596f3\": container with ID starting with 273d5f6c654716c80afe55566d6bab93f187dd8f4f8ccaf628e832a8b61596f3 not found: ID does not exist" containerID="273d5f6c654716c80afe55566d6bab93f187dd8f4f8ccaf628e832a8b61596f3" Dec 11 09:04:29 crc kubenswrapper[4881]: I1211 09:04:29.648831 4881 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"273d5f6c654716c80afe55566d6bab93f187dd8f4f8ccaf628e832a8b61596f3"} err="failed to get container status \"273d5f6c654716c80afe55566d6bab93f187dd8f4f8ccaf628e832a8b61596f3\": rpc error: code = NotFound desc = could not find container \"273d5f6c654716c80afe55566d6bab93f187dd8f4f8ccaf628e832a8b61596f3\": container with ID starting with 273d5f6c654716c80afe55566d6bab93f187dd8f4f8ccaf628e832a8b61596f3 not found: ID does not exist" Dec 11 09:04:31 crc kubenswrapper[4881]: I1211 09:04:31.018060 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4b4ebe53-8b8a-4bdd-847f-cf220f3789aa" path="/var/lib/kubelet/pods/4b4ebe53-8b8a-4bdd-847f-cf220f3789aa/volumes" Dec 11 09:05:11 crc kubenswrapper[4881]: I1211 09:05:11.041494 4881 scope.go:117] "RemoveContainer" containerID="da9f885a2c287a5f47f076eb596949ddbb7c6254cefd3cf152e0f9695739f036" Dec 11 09:05:11 crc kubenswrapper[4881]: I1211 09:05:11.066553 4881 scope.go:117] "RemoveContainer" containerID="fe70cf65eafe0d763af0376ce12991d7b80ac96f64fa8c2c1b5d189ae46fc138" Dec 11 09:05:30 crc kubenswrapper[4881]: I1211 09:05:30.294681 4881 generic.go:334] "Generic (PLEG): container finished" podID="84b496b0-b36c-4ece-ba2d-e73423d502cd" containerID="8cde8be8bfd707f0d42581eb7394bc0dcc3d49a1f2337b8773d2fe24278bd720" exitCode=0 Dec 11 09:05:30 crc kubenswrapper[4881]: I1211 09:05:30.294760 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-hq67p" event={"ID":"84b496b0-b36c-4ece-ba2d-e73423d502cd","Type":"ContainerDied","Data":"8cde8be8bfd707f0d42581eb7394bc0dcc3d49a1f2337b8773d2fe24278bd720"} Dec 11 09:05:31 crc kubenswrapper[4881]: I1211 09:05:31.785191 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-hq67p" Dec 11 09:05:31 crc kubenswrapper[4881]: I1211 09:05:31.899410 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/84b496b0-b36c-4ece-ba2d-e73423d502cd-ssh-key\") pod \"84b496b0-b36c-4ece-ba2d-e73423d502cd\" (UID: \"84b496b0-b36c-4ece-ba2d-e73423d502cd\") " Dec 11 09:05:31 crc kubenswrapper[4881]: I1211 09:05:31.899453 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7f9v9\" (UniqueName: \"kubernetes.io/projected/84b496b0-b36c-4ece-ba2d-e73423d502cd-kube-api-access-7f9v9\") pod \"84b496b0-b36c-4ece-ba2d-e73423d502cd\" (UID: \"84b496b0-b36c-4ece-ba2d-e73423d502cd\") " Dec 11 09:05:31 crc kubenswrapper[4881]: I1211 09:05:31.899522 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/84b496b0-b36c-4ece-ba2d-e73423d502cd-nova-migration-ssh-key-0\") pod \"84b496b0-b36c-4ece-ba2d-e73423d502cd\" (UID: \"84b496b0-b36c-4ece-ba2d-e73423d502cd\") " Dec 11 09:05:31 crc kubenswrapper[4881]: I1211 09:05:31.899570 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/84b496b0-b36c-4ece-ba2d-e73423d502cd-inventory\") pod \"84b496b0-b36c-4ece-ba2d-e73423d502cd\" (UID: \"84b496b0-b36c-4ece-ba2d-e73423d502cd\") " Dec 11 09:05:31 crc kubenswrapper[4881]: I1211 09:05:31.899588 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/84b496b0-b36c-4ece-ba2d-e73423d502cd-nova-migration-ssh-key-1\") pod \"84b496b0-b36c-4ece-ba2d-e73423d502cd\" (UID: \"84b496b0-b36c-4ece-ba2d-e73423d502cd\") " Dec 11 09:05:31 crc kubenswrapper[4881]: I1211 09:05:31.899652 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/84b496b0-b36c-4ece-ba2d-e73423d502cd-nova-cell1-compute-config-1\") pod \"84b496b0-b36c-4ece-ba2d-e73423d502cd\" (UID: \"84b496b0-b36c-4ece-ba2d-e73423d502cd\") " Dec 11 09:05:31 crc kubenswrapper[4881]: I1211 09:05:31.899712 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/84b496b0-b36c-4ece-ba2d-e73423d502cd-nova-cell1-compute-config-0\") pod \"84b496b0-b36c-4ece-ba2d-e73423d502cd\" (UID: \"84b496b0-b36c-4ece-ba2d-e73423d502cd\") " Dec 11 09:05:31 crc kubenswrapper[4881]: I1211 09:05:31.899780 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/84b496b0-b36c-4ece-ba2d-e73423d502cd-nova-extra-config-0\") pod \"84b496b0-b36c-4ece-ba2d-e73423d502cd\" (UID: \"84b496b0-b36c-4ece-ba2d-e73423d502cd\") " Dec 11 09:05:31 crc kubenswrapper[4881]: I1211 09:05:31.899846 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/84b496b0-b36c-4ece-ba2d-e73423d502cd-nova-combined-ca-bundle\") pod \"84b496b0-b36c-4ece-ba2d-e73423d502cd\" (UID: \"84b496b0-b36c-4ece-ba2d-e73423d502cd\") " Dec 11 09:05:31 crc kubenswrapper[4881]: I1211 09:05:31.907122 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/84b496b0-b36c-4ece-ba2d-e73423d502cd-nova-combined-ca-bundle" (OuterVolumeSpecName: "nova-combined-ca-bundle") pod "84b496b0-b36c-4ece-ba2d-e73423d502cd" (UID: "84b496b0-b36c-4ece-ba2d-e73423d502cd"). InnerVolumeSpecName "nova-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 09:05:31 crc kubenswrapper[4881]: I1211 09:05:31.907647 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/84b496b0-b36c-4ece-ba2d-e73423d502cd-kube-api-access-7f9v9" (OuterVolumeSpecName: "kube-api-access-7f9v9") pod "84b496b0-b36c-4ece-ba2d-e73423d502cd" (UID: "84b496b0-b36c-4ece-ba2d-e73423d502cd"). InnerVolumeSpecName "kube-api-access-7f9v9". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 09:05:31 crc kubenswrapper[4881]: I1211 09:05:31.931407 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/84b496b0-b36c-4ece-ba2d-e73423d502cd-nova-extra-config-0" (OuterVolumeSpecName: "nova-extra-config-0") pod "84b496b0-b36c-4ece-ba2d-e73423d502cd" (UID: "84b496b0-b36c-4ece-ba2d-e73423d502cd"). InnerVolumeSpecName "nova-extra-config-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 09:05:31 crc kubenswrapper[4881]: I1211 09:05:31.950522 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/84b496b0-b36c-4ece-ba2d-e73423d502cd-inventory" (OuterVolumeSpecName: "inventory") pod "84b496b0-b36c-4ece-ba2d-e73423d502cd" (UID: "84b496b0-b36c-4ece-ba2d-e73423d502cd"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 09:05:31 crc kubenswrapper[4881]: I1211 09:05:31.951134 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/84b496b0-b36c-4ece-ba2d-e73423d502cd-nova-migration-ssh-key-1" (OuterVolumeSpecName: "nova-migration-ssh-key-1") pod "84b496b0-b36c-4ece-ba2d-e73423d502cd" (UID: "84b496b0-b36c-4ece-ba2d-e73423d502cd"). InnerVolumeSpecName "nova-migration-ssh-key-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 09:05:31 crc kubenswrapper[4881]: I1211 09:05:31.953487 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/84b496b0-b36c-4ece-ba2d-e73423d502cd-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "84b496b0-b36c-4ece-ba2d-e73423d502cd" (UID: "84b496b0-b36c-4ece-ba2d-e73423d502cd"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 09:05:31 crc kubenswrapper[4881]: I1211 09:05:31.956195 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/84b496b0-b36c-4ece-ba2d-e73423d502cd-nova-cell1-compute-config-0" (OuterVolumeSpecName: "nova-cell1-compute-config-0") pod "84b496b0-b36c-4ece-ba2d-e73423d502cd" (UID: "84b496b0-b36c-4ece-ba2d-e73423d502cd"). InnerVolumeSpecName "nova-cell1-compute-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 09:05:31 crc kubenswrapper[4881]: I1211 09:05:31.958964 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/84b496b0-b36c-4ece-ba2d-e73423d502cd-nova-migration-ssh-key-0" (OuterVolumeSpecName: "nova-migration-ssh-key-0") pod "84b496b0-b36c-4ece-ba2d-e73423d502cd" (UID: "84b496b0-b36c-4ece-ba2d-e73423d502cd"). InnerVolumeSpecName "nova-migration-ssh-key-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 09:05:31 crc kubenswrapper[4881]: I1211 09:05:31.975124 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/84b496b0-b36c-4ece-ba2d-e73423d502cd-nova-cell1-compute-config-1" (OuterVolumeSpecName: "nova-cell1-compute-config-1") pod "84b496b0-b36c-4ece-ba2d-e73423d502cd" (UID: "84b496b0-b36c-4ece-ba2d-e73423d502cd"). InnerVolumeSpecName "nova-cell1-compute-config-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 09:05:32 crc kubenswrapper[4881]: I1211 09:05:32.003419 4881 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/84b496b0-b36c-4ece-ba2d-e73423d502cd-ssh-key\") on node \"crc\" DevicePath \"\"" Dec 11 09:05:32 crc kubenswrapper[4881]: I1211 09:05:32.003451 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7f9v9\" (UniqueName: \"kubernetes.io/projected/84b496b0-b36c-4ece-ba2d-e73423d502cd-kube-api-access-7f9v9\") on node \"crc\" DevicePath \"\"" Dec 11 09:05:32 crc kubenswrapper[4881]: I1211 09:05:32.003464 4881 reconciler_common.go:293] "Volume detached for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/84b496b0-b36c-4ece-ba2d-e73423d502cd-nova-migration-ssh-key-0\") on node \"crc\" DevicePath \"\"" Dec 11 09:05:32 crc kubenswrapper[4881]: I1211 09:05:32.003474 4881 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/84b496b0-b36c-4ece-ba2d-e73423d502cd-inventory\") on node \"crc\" DevicePath \"\"" Dec 11 09:05:32 crc kubenswrapper[4881]: I1211 09:05:32.003485 4881 reconciler_common.go:293] "Volume detached for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/84b496b0-b36c-4ece-ba2d-e73423d502cd-nova-migration-ssh-key-1\") on node \"crc\" DevicePath \"\"" Dec 11 09:05:32 crc kubenswrapper[4881]: I1211 09:05:32.003495 4881 reconciler_common.go:293] "Volume detached for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/84b496b0-b36c-4ece-ba2d-e73423d502cd-nova-cell1-compute-config-1\") on node \"crc\" DevicePath \"\"" Dec 11 09:05:32 crc kubenswrapper[4881]: I1211 09:05:32.003504 4881 reconciler_common.go:293] "Volume detached for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/84b496b0-b36c-4ece-ba2d-e73423d502cd-nova-cell1-compute-config-0\") on node \"crc\" DevicePath \"\"" Dec 11 09:05:32 crc kubenswrapper[4881]: I1211 09:05:32.003515 4881 reconciler_common.go:293] "Volume detached for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/84b496b0-b36c-4ece-ba2d-e73423d502cd-nova-extra-config-0\") on node \"crc\" DevicePath \"\"" Dec 11 09:05:32 crc kubenswrapper[4881]: I1211 09:05:32.003523 4881 reconciler_common.go:293] "Volume detached for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/84b496b0-b36c-4ece-ba2d-e73423d502cd-nova-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 11 09:05:32 crc kubenswrapper[4881]: I1211 09:05:32.322654 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-hq67p" event={"ID":"84b496b0-b36c-4ece-ba2d-e73423d502cd","Type":"ContainerDied","Data":"845e7ff82816c408bfeddb7a497aa993f7a9cabd6e417cd72a0af955ab32e357"} Dec 11 09:05:32 crc kubenswrapper[4881]: I1211 09:05:32.322959 4881 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="845e7ff82816c408bfeddb7a497aa993f7a9cabd6e417cd72a0af955ab32e357" Dec 11 09:05:32 crc kubenswrapper[4881]: I1211 09:05:32.322715 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-hq67p" Dec 11 09:05:32 crc kubenswrapper[4881]: I1211 09:05:32.430857 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/telemetry-edpm-deployment-openstack-edpm-ipam-plpct"] Dec 11 09:05:32 crc kubenswrapper[4881]: E1211 09:05:32.431434 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4b4ebe53-8b8a-4bdd-847f-cf220f3789aa" containerName="extract-utilities" Dec 11 09:05:32 crc kubenswrapper[4881]: I1211 09:05:32.431463 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="4b4ebe53-8b8a-4bdd-847f-cf220f3789aa" containerName="extract-utilities" Dec 11 09:05:32 crc kubenswrapper[4881]: E1211 09:05:32.431490 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4b4ebe53-8b8a-4bdd-847f-cf220f3789aa" containerName="extract-content" Dec 11 09:05:32 crc kubenswrapper[4881]: I1211 09:05:32.431504 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="4b4ebe53-8b8a-4bdd-847f-cf220f3789aa" containerName="extract-content" Dec 11 09:05:32 crc kubenswrapper[4881]: E1211 09:05:32.431692 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4b4ebe53-8b8a-4bdd-847f-cf220f3789aa" containerName="registry-server" Dec 11 09:05:32 crc kubenswrapper[4881]: I1211 09:05:32.431700 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="4b4ebe53-8b8a-4bdd-847f-cf220f3789aa" containerName="registry-server" Dec 11 09:05:32 crc kubenswrapper[4881]: E1211 09:05:32.431729 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="84b496b0-b36c-4ece-ba2d-e73423d502cd" containerName="nova-edpm-deployment-openstack-edpm-ipam" Dec 11 09:05:32 crc kubenswrapper[4881]: I1211 09:05:32.431737 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="84b496b0-b36c-4ece-ba2d-e73423d502cd" containerName="nova-edpm-deployment-openstack-edpm-ipam" Dec 11 09:05:32 crc kubenswrapper[4881]: I1211 09:05:32.432026 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="84b496b0-b36c-4ece-ba2d-e73423d502cd" containerName="nova-edpm-deployment-openstack-edpm-ipam" Dec 11 09:05:32 crc kubenswrapper[4881]: I1211 09:05:32.432048 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="4b4ebe53-8b8a-4bdd-847f-cf220f3789aa" containerName="registry-server" Dec 11 09:05:32 crc kubenswrapper[4881]: I1211 09:05:32.433040 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-plpct" Dec 11 09:05:32 crc kubenswrapper[4881]: I1211 09:05:32.437478 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Dec 11 09:05:32 crc kubenswrapper[4881]: I1211 09:05:32.437834 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-compute-config-data" Dec 11 09:05:32 crc kubenswrapper[4881]: I1211 09:05:32.437963 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Dec 11 09:05:32 crc kubenswrapper[4881]: I1211 09:05:32.438901 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Dec 11 09:05:32 crc kubenswrapper[4881]: I1211 09:05:32.438981 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-bnm72" Dec 11 09:05:32 crc kubenswrapper[4881]: I1211 09:05:32.459736 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/telemetry-edpm-deployment-openstack-edpm-ipam-plpct"] Dec 11 09:05:32 crc kubenswrapper[4881]: I1211 09:05:32.515632 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/e87175a4-03cc-472f-90ac-18cb8573131f-ssh-key\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-plpct\" (UID: \"e87175a4-03cc-472f-90ac-18cb8573131f\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-plpct" Dec 11 09:05:32 crc kubenswrapper[4881]: I1211 09:05:32.515916 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/e87175a4-03cc-472f-90ac-18cb8573131f-ceilometer-compute-config-data-1\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-plpct\" (UID: \"e87175a4-03cc-472f-90ac-18cb8573131f\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-plpct" Dec 11 09:05:32 crc kubenswrapper[4881]: I1211 09:05:32.516052 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e87175a4-03cc-472f-90ac-18cb8573131f-inventory\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-plpct\" (UID: \"e87175a4-03cc-472f-90ac-18cb8573131f\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-plpct" Dec 11 09:05:32 crc kubenswrapper[4881]: I1211 09:05:32.516220 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sw7v7\" (UniqueName: \"kubernetes.io/projected/e87175a4-03cc-472f-90ac-18cb8573131f-kube-api-access-sw7v7\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-plpct\" (UID: \"e87175a4-03cc-472f-90ac-18cb8573131f\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-plpct" Dec 11 09:05:32 crc kubenswrapper[4881]: I1211 09:05:32.516406 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/e87175a4-03cc-472f-90ac-18cb8573131f-ceilometer-compute-config-data-2\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-plpct\" (UID: \"e87175a4-03cc-472f-90ac-18cb8573131f\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-plpct" Dec 11 09:05:32 crc kubenswrapper[4881]: I1211 09:05:32.516478 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e87175a4-03cc-472f-90ac-18cb8573131f-telemetry-combined-ca-bundle\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-plpct\" (UID: \"e87175a4-03cc-472f-90ac-18cb8573131f\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-plpct" Dec 11 09:05:32 crc kubenswrapper[4881]: I1211 09:05:32.516717 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/e87175a4-03cc-472f-90ac-18cb8573131f-ceilometer-compute-config-data-0\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-plpct\" (UID: \"e87175a4-03cc-472f-90ac-18cb8573131f\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-plpct" Dec 11 09:05:32 crc kubenswrapper[4881]: E1211 09:05:32.612761 4881 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod84b496b0_b36c_4ece_ba2d_e73423d502cd.slice\": RecentStats: unable to find data in memory cache]" Dec 11 09:05:32 crc kubenswrapper[4881]: I1211 09:05:32.618622 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sw7v7\" (UniqueName: \"kubernetes.io/projected/e87175a4-03cc-472f-90ac-18cb8573131f-kube-api-access-sw7v7\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-plpct\" (UID: \"e87175a4-03cc-472f-90ac-18cb8573131f\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-plpct" Dec 11 09:05:32 crc kubenswrapper[4881]: I1211 09:05:32.618686 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/e87175a4-03cc-472f-90ac-18cb8573131f-ceilometer-compute-config-data-2\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-plpct\" (UID: \"e87175a4-03cc-472f-90ac-18cb8573131f\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-plpct" Dec 11 09:05:32 crc kubenswrapper[4881]: I1211 09:05:32.618721 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e87175a4-03cc-472f-90ac-18cb8573131f-telemetry-combined-ca-bundle\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-plpct\" (UID: \"e87175a4-03cc-472f-90ac-18cb8573131f\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-plpct" Dec 11 09:05:32 crc kubenswrapper[4881]: I1211 09:05:32.618791 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/e87175a4-03cc-472f-90ac-18cb8573131f-ceilometer-compute-config-data-0\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-plpct\" (UID: \"e87175a4-03cc-472f-90ac-18cb8573131f\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-plpct" Dec 11 09:05:32 crc kubenswrapper[4881]: I1211 09:05:32.618908 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/e87175a4-03cc-472f-90ac-18cb8573131f-ssh-key\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-plpct\" (UID: \"e87175a4-03cc-472f-90ac-18cb8573131f\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-plpct" Dec 11 09:05:32 crc kubenswrapper[4881]: I1211 09:05:32.618927 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/e87175a4-03cc-472f-90ac-18cb8573131f-ceilometer-compute-config-data-1\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-plpct\" (UID: \"e87175a4-03cc-472f-90ac-18cb8573131f\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-plpct" Dec 11 09:05:32 crc kubenswrapper[4881]: I1211 09:05:32.618973 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e87175a4-03cc-472f-90ac-18cb8573131f-inventory\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-plpct\" (UID: \"e87175a4-03cc-472f-90ac-18cb8573131f\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-plpct" Dec 11 09:05:32 crc kubenswrapper[4881]: I1211 09:05:32.623612 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e87175a4-03cc-472f-90ac-18cb8573131f-inventory\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-plpct\" (UID: \"e87175a4-03cc-472f-90ac-18cb8573131f\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-plpct" Dec 11 09:05:32 crc kubenswrapper[4881]: I1211 09:05:32.623653 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/e87175a4-03cc-472f-90ac-18cb8573131f-ceilometer-compute-config-data-0\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-plpct\" (UID: \"e87175a4-03cc-472f-90ac-18cb8573131f\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-plpct" Dec 11 09:05:32 crc kubenswrapper[4881]: I1211 09:05:32.624889 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/e87175a4-03cc-472f-90ac-18cb8573131f-ceilometer-compute-config-data-1\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-plpct\" (UID: \"e87175a4-03cc-472f-90ac-18cb8573131f\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-plpct" Dec 11 09:05:32 crc kubenswrapper[4881]: I1211 09:05:32.626598 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/e87175a4-03cc-472f-90ac-18cb8573131f-ssh-key\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-plpct\" (UID: \"e87175a4-03cc-472f-90ac-18cb8573131f\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-plpct" Dec 11 09:05:32 crc kubenswrapper[4881]: I1211 09:05:32.627458 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/e87175a4-03cc-472f-90ac-18cb8573131f-ceilometer-compute-config-data-2\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-plpct\" (UID: \"e87175a4-03cc-472f-90ac-18cb8573131f\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-plpct" Dec 11 09:05:32 crc kubenswrapper[4881]: I1211 09:05:32.633092 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e87175a4-03cc-472f-90ac-18cb8573131f-telemetry-combined-ca-bundle\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-plpct\" (UID: \"e87175a4-03cc-472f-90ac-18cb8573131f\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-plpct" Dec 11 09:05:32 crc kubenswrapper[4881]: I1211 09:05:32.644267 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sw7v7\" (UniqueName: \"kubernetes.io/projected/e87175a4-03cc-472f-90ac-18cb8573131f-kube-api-access-sw7v7\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-plpct\" (UID: \"e87175a4-03cc-472f-90ac-18cb8573131f\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-plpct" Dec 11 09:05:32 crc kubenswrapper[4881]: I1211 09:05:32.750760 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-plpct" Dec 11 09:05:33 crc kubenswrapper[4881]: I1211 09:05:33.387598 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/telemetry-edpm-deployment-openstack-edpm-ipam-plpct"] Dec 11 09:05:33 crc kubenswrapper[4881]: W1211 09:05:33.390187 4881 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode87175a4_03cc_472f_90ac_18cb8573131f.slice/crio-d4d29f4114e1d64ffa47961f10878b5e84e481fd07faf242d4e1ab246657cb3d WatchSource:0}: Error finding container d4d29f4114e1d64ffa47961f10878b5e84e481fd07faf242d4e1ab246657cb3d: Status 404 returned error can't find the container with id d4d29f4114e1d64ffa47961f10878b5e84e481fd07faf242d4e1ab246657cb3d Dec 11 09:05:34 crc kubenswrapper[4881]: I1211 09:05:34.391141 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-plpct" event={"ID":"e87175a4-03cc-472f-90ac-18cb8573131f","Type":"ContainerStarted","Data":"d4d29f4114e1d64ffa47961f10878b5e84e481fd07faf242d4e1ab246657cb3d"} Dec 11 09:05:36 crc kubenswrapper[4881]: I1211 09:05:36.417220 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-plpct" event={"ID":"e87175a4-03cc-472f-90ac-18cb8573131f","Type":"ContainerStarted","Data":"52f8401d51c16d6a9112c7c4bc8fc6d0856467bd699293c7c91b42cf9b6c2e3e"} Dec 11 09:05:36 crc kubenswrapper[4881]: I1211 09:05:36.441765 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-plpct" podStartSLOduration=1.905004173 podStartE2EDuration="4.441743313s" podCreationTimestamp="2025-12-11 09:05:32 +0000 UTC" firstStartedPulling="2025-12-11 09:05:33.393574341 +0000 UTC m=+2981.770943038" lastFinishedPulling="2025-12-11 09:05:35.930313471 +0000 UTC m=+2984.307682178" observedRunningTime="2025-12-11 09:05:36.434191458 +0000 UTC m=+2984.811560155" watchObservedRunningTime="2025-12-11 09:05:36.441743313 +0000 UTC m=+2984.819112010" Dec 11 09:05:59 crc kubenswrapper[4881]: I1211 09:05:59.397536 4881 patch_prober.go:28] interesting pod/machine-config-daemon-z9nnh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 11 09:05:59 crc kubenswrapper[4881]: I1211 09:05:59.399102 4881 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 11 09:06:11 crc kubenswrapper[4881]: I1211 09:06:11.169176 4881 scope.go:117] "RemoveContainer" containerID="78a668d7e8ff891cee651b43f25a80324f7193b1882555d470868eebd4805055" Dec 11 09:06:29 crc kubenswrapper[4881]: I1211 09:06:29.396926 4881 patch_prober.go:28] interesting pod/machine-config-daemon-z9nnh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 11 09:06:29 crc kubenswrapper[4881]: I1211 09:06:29.397502 4881 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 11 09:06:59 crc kubenswrapper[4881]: I1211 09:06:59.397103 4881 patch_prober.go:28] interesting pod/machine-config-daemon-z9nnh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 11 09:06:59 crc kubenswrapper[4881]: I1211 09:06:59.398150 4881 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 11 09:06:59 crc kubenswrapper[4881]: I1211 09:06:59.398212 4881 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" Dec 11 09:06:59 crc kubenswrapper[4881]: I1211 09:06:59.399680 4881 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"003d013ffd5b740337ff0333759338b1b3fbe344072684ee96e763f5cd66a8c1"} pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Dec 11 09:06:59 crc kubenswrapper[4881]: I1211 09:06:59.399757 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" containerName="machine-config-daemon" containerID="cri-o://003d013ffd5b740337ff0333759338b1b3fbe344072684ee96e763f5cd66a8c1" gracePeriod=600 Dec 11 09:07:00 crc kubenswrapper[4881]: E1211 09:07:00.113778 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 09:07:00 crc kubenswrapper[4881]: I1211 09:07:00.410450 4881 generic.go:334] "Generic (PLEG): container finished" podID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" containerID="003d013ffd5b740337ff0333759338b1b3fbe344072684ee96e763f5cd66a8c1" exitCode=0 Dec 11 09:07:00 crc kubenswrapper[4881]: I1211 09:07:00.410530 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" event={"ID":"56d69133-af36-4cbd-af7d-3a58cc4dd8ca","Type":"ContainerDied","Data":"003d013ffd5b740337ff0333759338b1b3fbe344072684ee96e763f5cd66a8c1"} Dec 11 09:07:00 crc kubenswrapper[4881]: I1211 09:07:00.410878 4881 scope.go:117] "RemoveContainer" containerID="e510f860249c2369bd34a09d0c999610be3cd247e6b9eafae5e3404be20264de" Dec 11 09:07:00 crc kubenswrapper[4881]: I1211 09:07:00.411776 4881 scope.go:117] "RemoveContainer" containerID="003d013ffd5b740337ff0333759338b1b3fbe344072684ee96e763f5cd66a8c1" Dec 11 09:07:00 crc kubenswrapper[4881]: E1211 09:07:00.412113 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 09:07:15 crc kubenswrapper[4881]: I1211 09:07:15.007283 4881 scope.go:117] "RemoveContainer" containerID="003d013ffd5b740337ff0333759338b1b3fbe344072684ee96e763f5cd66a8c1" Dec 11 09:07:15 crc kubenswrapper[4881]: E1211 09:07:15.008563 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 09:07:27 crc kubenswrapper[4881]: I1211 09:07:27.005389 4881 scope.go:117] "RemoveContainer" containerID="003d013ffd5b740337ff0333759338b1b3fbe344072684ee96e763f5cd66a8c1" Dec 11 09:07:27 crc kubenswrapper[4881]: E1211 09:07:27.006213 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 09:07:40 crc kubenswrapper[4881]: I1211 09:07:40.005708 4881 scope.go:117] "RemoveContainer" containerID="003d013ffd5b740337ff0333759338b1b3fbe344072684ee96e763f5cd66a8c1" Dec 11 09:07:40 crc kubenswrapper[4881]: E1211 09:07:40.006618 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 09:07:52 crc kubenswrapper[4881]: I1211 09:07:52.005661 4881 scope.go:117] "RemoveContainer" containerID="003d013ffd5b740337ff0333759338b1b3fbe344072684ee96e763f5cd66a8c1" Dec 11 09:07:52 crc kubenswrapper[4881]: E1211 09:07:52.006526 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 09:07:58 crc kubenswrapper[4881]: I1211 09:07:58.083619 4881 generic.go:334] "Generic (PLEG): container finished" podID="e87175a4-03cc-472f-90ac-18cb8573131f" containerID="52f8401d51c16d6a9112c7c4bc8fc6d0856467bd699293c7c91b42cf9b6c2e3e" exitCode=0 Dec 11 09:07:58 crc kubenswrapper[4881]: I1211 09:07:58.083703 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-plpct" event={"ID":"e87175a4-03cc-472f-90ac-18cb8573131f","Type":"ContainerDied","Data":"52f8401d51c16d6a9112c7c4bc8fc6d0856467bd699293c7c91b42cf9b6c2e3e"} Dec 11 09:07:59 crc kubenswrapper[4881]: I1211 09:07:59.673683 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-plpct" Dec 11 09:07:59 crc kubenswrapper[4881]: I1211 09:07:59.737309 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/e87175a4-03cc-472f-90ac-18cb8573131f-ceilometer-compute-config-data-2\") pod \"e87175a4-03cc-472f-90ac-18cb8573131f\" (UID: \"e87175a4-03cc-472f-90ac-18cb8573131f\") " Dec 11 09:07:59 crc kubenswrapper[4881]: I1211 09:07:59.737402 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/e87175a4-03cc-472f-90ac-18cb8573131f-ssh-key\") pod \"e87175a4-03cc-472f-90ac-18cb8573131f\" (UID: \"e87175a4-03cc-472f-90ac-18cb8573131f\") " Dec 11 09:07:59 crc kubenswrapper[4881]: I1211 09:07:59.737453 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/e87175a4-03cc-472f-90ac-18cb8573131f-ceilometer-compute-config-data-1\") pod \"e87175a4-03cc-472f-90ac-18cb8573131f\" (UID: \"e87175a4-03cc-472f-90ac-18cb8573131f\") " Dec 11 09:07:59 crc kubenswrapper[4881]: I1211 09:07:59.737518 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/e87175a4-03cc-472f-90ac-18cb8573131f-ceilometer-compute-config-data-0\") pod \"e87175a4-03cc-472f-90ac-18cb8573131f\" (UID: \"e87175a4-03cc-472f-90ac-18cb8573131f\") " Dec 11 09:07:59 crc kubenswrapper[4881]: I1211 09:07:59.737637 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sw7v7\" (UniqueName: \"kubernetes.io/projected/e87175a4-03cc-472f-90ac-18cb8573131f-kube-api-access-sw7v7\") pod \"e87175a4-03cc-472f-90ac-18cb8573131f\" (UID: \"e87175a4-03cc-472f-90ac-18cb8573131f\") " Dec 11 09:07:59 crc kubenswrapper[4881]: I1211 09:07:59.737702 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e87175a4-03cc-472f-90ac-18cb8573131f-inventory\") pod \"e87175a4-03cc-472f-90ac-18cb8573131f\" (UID: \"e87175a4-03cc-472f-90ac-18cb8573131f\") " Dec 11 09:07:59 crc kubenswrapper[4881]: I1211 09:07:59.737779 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e87175a4-03cc-472f-90ac-18cb8573131f-telemetry-combined-ca-bundle\") pod \"e87175a4-03cc-472f-90ac-18cb8573131f\" (UID: \"e87175a4-03cc-472f-90ac-18cb8573131f\") " Dec 11 09:07:59 crc kubenswrapper[4881]: I1211 09:07:59.745000 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e87175a4-03cc-472f-90ac-18cb8573131f-kube-api-access-sw7v7" (OuterVolumeSpecName: "kube-api-access-sw7v7") pod "e87175a4-03cc-472f-90ac-18cb8573131f" (UID: "e87175a4-03cc-472f-90ac-18cb8573131f"). InnerVolumeSpecName "kube-api-access-sw7v7". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 09:07:59 crc kubenswrapper[4881]: I1211 09:07:59.753959 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e87175a4-03cc-472f-90ac-18cb8573131f-telemetry-combined-ca-bundle" (OuterVolumeSpecName: "telemetry-combined-ca-bundle") pod "e87175a4-03cc-472f-90ac-18cb8573131f" (UID: "e87175a4-03cc-472f-90ac-18cb8573131f"). InnerVolumeSpecName "telemetry-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 09:07:59 crc kubenswrapper[4881]: I1211 09:07:59.772392 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e87175a4-03cc-472f-90ac-18cb8573131f-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "e87175a4-03cc-472f-90ac-18cb8573131f" (UID: "e87175a4-03cc-472f-90ac-18cb8573131f"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 09:07:59 crc kubenswrapper[4881]: I1211 09:07:59.775509 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e87175a4-03cc-472f-90ac-18cb8573131f-ceilometer-compute-config-data-2" (OuterVolumeSpecName: "ceilometer-compute-config-data-2") pod "e87175a4-03cc-472f-90ac-18cb8573131f" (UID: "e87175a4-03cc-472f-90ac-18cb8573131f"). InnerVolumeSpecName "ceilometer-compute-config-data-2". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 09:07:59 crc kubenswrapper[4881]: I1211 09:07:59.775639 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e87175a4-03cc-472f-90ac-18cb8573131f-ceilometer-compute-config-data-1" (OuterVolumeSpecName: "ceilometer-compute-config-data-1") pod "e87175a4-03cc-472f-90ac-18cb8573131f" (UID: "e87175a4-03cc-472f-90ac-18cb8573131f"). InnerVolumeSpecName "ceilometer-compute-config-data-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 09:07:59 crc kubenswrapper[4881]: I1211 09:07:59.782671 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e87175a4-03cc-472f-90ac-18cb8573131f-inventory" (OuterVolumeSpecName: "inventory") pod "e87175a4-03cc-472f-90ac-18cb8573131f" (UID: "e87175a4-03cc-472f-90ac-18cb8573131f"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 09:07:59 crc kubenswrapper[4881]: I1211 09:07:59.782888 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e87175a4-03cc-472f-90ac-18cb8573131f-ceilometer-compute-config-data-0" (OuterVolumeSpecName: "ceilometer-compute-config-data-0") pod "e87175a4-03cc-472f-90ac-18cb8573131f" (UID: "e87175a4-03cc-472f-90ac-18cb8573131f"). InnerVolumeSpecName "ceilometer-compute-config-data-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 09:07:59 crc kubenswrapper[4881]: I1211 09:07:59.841690 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sw7v7\" (UniqueName: \"kubernetes.io/projected/e87175a4-03cc-472f-90ac-18cb8573131f-kube-api-access-sw7v7\") on node \"crc\" DevicePath \"\"" Dec 11 09:07:59 crc kubenswrapper[4881]: I1211 09:07:59.841766 4881 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e87175a4-03cc-472f-90ac-18cb8573131f-inventory\") on node \"crc\" DevicePath \"\"" Dec 11 09:07:59 crc kubenswrapper[4881]: I1211 09:07:59.841782 4881 reconciler_common.go:293] "Volume detached for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e87175a4-03cc-472f-90ac-18cb8573131f-telemetry-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 11 09:07:59 crc kubenswrapper[4881]: I1211 09:07:59.841795 4881 reconciler_common.go:293] "Volume detached for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/e87175a4-03cc-472f-90ac-18cb8573131f-ceilometer-compute-config-data-2\") on node \"crc\" DevicePath \"\"" Dec 11 09:07:59 crc kubenswrapper[4881]: I1211 09:07:59.841809 4881 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/e87175a4-03cc-472f-90ac-18cb8573131f-ssh-key\") on node \"crc\" DevicePath \"\"" Dec 11 09:07:59 crc kubenswrapper[4881]: I1211 09:07:59.841852 4881 reconciler_common.go:293] "Volume detached for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/e87175a4-03cc-472f-90ac-18cb8573131f-ceilometer-compute-config-data-1\") on node \"crc\" DevicePath \"\"" Dec 11 09:07:59 crc kubenswrapper[4881]: I1211 09:07:59.841865 4881 reconciler_common.go:293] "Volume detached for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/e87175a4-03cc-472f-90ac-18cb8573131f-ceilometer-compute-config-data-0\") on node \"crc\" DevicePath \"\"" Dec 11 09:08:00 crc kubenswrapper[4881]: I1211 09:08:00.107101 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-plpct" event={"ID":"e87175a4-03cc-472f-90ac-18cb8573131f","Type":"ContainerDied","Data":"d4d29f4114e1d64ffa47961f10878b5e84e481fd07faf242d4e1ab246657cb3d"} Dec 11 09:08:00 crc kubenswrapper[4881]: I1211 09:08:00.107467 4881 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d4d29f4114e1d64ffa47961f10878b5e84e481fd07faf242d4e1ab246657cb3d" Dec 11 09:08:00 crc kubenswrapper[4881]: I1211 09:08:00.107189 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-plpct" Dec 11 09:08:00 crc kubenswrapper[4881]: I1211 09:08:00.212509 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-h628s"] Dec 11 09:08:00 crc kubenswrapper[4881]: E1211 09:08:00.213183 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e87175a4-03cc-472f-90ac-18cb8573131f" containerName="telemetry-edpm-deployment-openstack-edpm-ipam" Dec 11 09:08:00 crc kubenswrapper[4881]: I1211 09:08:00.213209 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="e87175a4-03cc-472f-90ac-18cb8573131f" containerName="telemetry-edpm-deployment-openstack-edpm-ipam" Dec 11 09:08:00 crc kubenswrapper[4881]: I1211 09:08:00.213483 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="e87175a4-03cc-472f-90ac-18cb8573131f" containerName="telemetry-edpm-deployment-openstack-edpm-ipam" Dec 11 09:08:00 crc kubenswrapper[4881]: I1211 09:08:00.214427 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-h628s" Dec 11 09:08:00 crc kubenswrapper[4881]: I1211 09:08:00.219578 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Dec 11 09:08:00 crc kubenswrapper[4881]: I1211 09:08:00.219680 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-bnm72" Dec 11 09:08:00 crc kubenswrapper[4881]: I1211 09:08:00.219814 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-ipmi-config-data" Dec 11 09:08:00 crc kubenswrapper[4881]: I1211 09:08:00.219942 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Dec 11 09:08:00 crc kubenswrapper[4881]: I1211 09:08:00.219942 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Dec 11 09:08:00 crc kubenswrapper[4881]: I1211 09:08:00.227072 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-h628s"] Dec 11 09:08:00 crc kubenswrapper[4881]: I1211 09:08:00.352387 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pdjl6\" (UniqueName: \"kubernetes.io/projected/1272adc3-399f-4c39-b62c-3bc18dda3b59-kube-api-access-pdjl6\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-h628s\" (UID: \"1272adc3-399f-4c39-b62c-3bc18dda3b59\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-h628s" Dec 11 09:08:00 crc kubenswrapper[4881]: I1211 09:08:00.352453 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/1272adc3-399f-4c39-b62c-3bc18dda3b59-inventory\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-h628s\" (UID: \"1272adc3-399f-4c39-b62c-3bc18dda3b59\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-h628s" Dec 11 09:08:00 crc kubenswrapper[4881]: I1211 09:08:00.352487 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/1272adc3-399f-4c39-b62c-3bc18dda3b59-ssh-key\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-h628s\" (UID: \"1272adc3-399f-4c39-b62c-3bc18dda3b59\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-h628s" Dec 11 09:08:00 crc kubenswrapper[4881]: I1211 09:08:00.354064 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-ipmi-config-data-0\" (UniqueName: \"kubernetes.io/secret/1272adc3-399f-4c39-b62c-3bc18dda3b59-ceilometer-ipmi-config-data-0\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-h628s\" (UID: \"1272adc3-399f-4c39-b62c-3bc18dda3b59\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-h628s" Dec 11 09:08:00 crc kubenswrapper[4881]: I1211 09:08:00.354124 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-ipmi-config-data-2\" (UniqueName: \"kubernetes.io/secret/1272adc3-399f-4c39-b62c-3bc18dda3b59-ceilometer-ipmi-config-data-2\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-h628s\" (UID: \"1272adc3-399f-4c39-b62c-3bc18dda3b59\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-h628s" Dec 11 09:08:00 crc kubenswrapper[4881]: I1211 09:08:00.354151 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-ipmi-config-data-1\" (UniqueName: \"kubernetes.io/secret/1272adc3-399f-4c39-b62c-3bc18dda3b59-ceilometer-ipmi-config-data-1\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-h628s\" (UID: \"1272adc3-399f-4c39-b62c-3bc18dda3b59\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-h628s" Dec 11 09:08:00 crc kubenswrapper[4881]: I1211 09:08:00.354216 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"telemetry-power-monitoring-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1272adc3-399f-4c39-b62c-3bc18dda3b59-telemetry-power-monitoring-combined-ca-bundle\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-h628s\" (UID: \"1272adc3-399f-4c39-b62c-3bc18dda3b59\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-h628s" Dec 11 09:08:00 crc kubenswrapper[4881]: I1211 09:08:00.456286 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-ipmi-config-data-0\" (UniqueName: \"kubernetes.io/secret/1272adc3-399f-4c39-b62c-3bc18dda3b59-ceilometer-ipmi-config-data-0\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-h628s\" (UID: \"1272adc3-399f-4c39-b62c-3bc18dda3b59\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-h628s" Dec 11 09:08:00 crc kubenswrapper[4881]: I1211 09:08:00.456426 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-ipmi-config-data-2\" (UniqueName: \"kubernetes.io/secret/1272adc3-399f-4c39-b62c-3bc18dda3b59-ceilometer-ipmi-config-data-2\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-h628s\" (UID: \"1272adc3-399f-4c39-b62c-3bc18dda3b59\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-h628s" Dec 11 09:08:00 crc kubenswrapper[4881]: I1211 09:08:00.456469 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-ipmi-config-data-1\" (UniqueName: \"kubernetes.io/secret/1272adc3-399f-4c39-b62c-3bc18dda3b59-ceilometer-ipmi-config-data-1\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-h628s\" (UID: \"1272adc3-399f-4c39-b62c-3bc18dda3b59\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-h628s" Dec 11 09:08:00 crc kubenswrapper[4881]: I1211 09:08:00.456527 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"telemetry-power-monitoring-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1272adc3-399f-4c39-b62c-3bc18dda3b59-telemetry-power-monitoring-combined-ca-bundle\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-h628s\" (UID: \"1272adc3-399f-4c39-b62c-3bc18dda3b59\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-h628s" Dec 11 09:08:00 crc kubenswrapper[4881]: I1211 09:08:00.456703 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pdjl6\" (UniqueName: \"kubernetes.io/projected/1272adc3-399f-4c39-b62c-3bc18dda3b59-kube-api-access-pdjl6\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-h628s\" (UID: \"1272adc3-399f-4c39-b62c-3bc18dda3b59\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-h628s" Dec 11 09:08:00 crc kubenswrapper[4881]: I1211 09:08:00.456764 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/1272adc3-399f-4c39-b62c-3bc18dda3b59-inventory\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-h628s\" (UID: \"1272adc3-399f-4c39-b62c-3bc18dda3b59\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-h628s" Dec 11 09:08:00 crc kubenswrapper[4881]: I1211 09:08:00.456817 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/1272adc3-399f-4c39-b62c-3bc18dda3b59-ssh-key\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-h628s\" (UID: \"1272adc3-399f-4c39-b62c-3bc18dda3b59\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-h628s" Dec 11 09:08:00 crc kubenswrapper[4881]: I1211 09:08:00.461287 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"telemetry-power-monitoring-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1272adc3-399f-4c39-b62c-3bc18dda3b59-telemetry-power-monitoring-combined-ca-bundle\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-h628s\" (UID: \"1272adc3-399f-4c39-b62c-3bc18dda3b59\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-h628s" Dec 11 09:08:00 crc kubenswrapper[4881]: I1211 09:08:00.461384 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-ipmi-config-data-0\" (UniqueName: \"kubernetes.io/secret/1272adc3-399f-4c39-b62c-3bc18dda3b59-ceilometer-ipmi-config-data-0\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-h628s\" (UID: \"1272adc3-399f-4c39-b62c-3bc18dda3b59\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-h628s" Dec 11 09:08:00 crc kubenswrapper[4881]: I1211 09:08:00.467280 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/1272adc3-399f-4c39-b62c-3bc18dda3b59-inventory\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-h628s\" (UID: \"1272adc3-399f-4c39-b62c-3bc18dda3b59\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-h628s" Dec 11 09:08:00 crc kubenswrapper[4881]: I1211 09:08:00.472696 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-ipmi-config-data-2\" (UniqueName: \"kubernetes.io/secret/1272adc3-399f-4c39-b62c-3bc18dda3b59-ceilometer-ipmi-config-data-2\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-h628s\" (UID: \"1272adc3-399f-4c39-b62c-3bc18dda3b59\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-h628s" Dec 11 09:08:00 crc kubenswrapper[4881]: I1211 09:08:00.472799 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/1272adc3-399f-4c39-b62c-3bc18dda3b59-ssh-key\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-h628s\" (UID: \"1272adc3-399f-4c39-b62c-3bc18dda3b59\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-h628s" Dec 11 09:08:00 crc kubenswrapper[4881]: I1211 09:08:00.473058 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-ipmi-config-data-1\" (UniqueName: \"kubernetes.io/secret/1272adc3-399f-4c39-b62c-3bc18dda3b59-ceilometer-ipmi-config-data-1\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-h628s\" (UID: \"1272adc3-399f-4c39-b62c-3bc18dda3b59\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-h628s" Dec 11 09:08:00 crc kubenswrapper[4881]: I1211 09:08:00.476613 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pdjl6\" (UniqueName: \"kubernetes.io/projected/1272adc3-399f-4c39-b62c-3bc18dda3b59-kube-api-access-pdjl6\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-h628s\" (UID: \"1272adc3-399f-4c39-b62c-3bc18dda3b59\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-h628s" Dec 11 09:08:00 crc kubenswrapper[4881]: I1211 09:08:00.533050 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-h628s" Dec 11 09:08:01 crc kubenswrapper[4881]: I1211 09:08:01.114176 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-h628s"] Dec 11 09:08:02 crc kubenswrapper[4881]: I1211 09:08:02.130359 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-h628s" event={"ID":"1272adc3-399f-4c39-b62c-3bc18dda3b59","Type":"ContainerStarted","Data":"c9b94cc2db1ef3b33646f6492141cabb1cd62b03df683ffc84fd7c94025f35ad"} Dec 11 09:08:03 crc kubenswrapper[4881]: I1211 09:08:03.015663 4881 scope.go:117] "RemoveContainer" containerID="003d013ffd5b740337ff0333759338b1b3fbe344072684ee96e763f5cd66a8c1" Dec 11 09:08:03 crc kubenswrapper[4881]: E1211 09:08:03.016619 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 09:08:04 crc kubenswrapper[4881]: I1211 09:08:04.162430 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-h628s" event={"ID":"1272adc3-399f-4c39-b62c-3bc18dda3b59","Type":"ContainerStarted","Data":"c8f0aa71dd198cd8e82d5c5b195882ddfd2f48329142bcdf549734cbc505ef17"} Dec 11 09:08:04 crc kubenswrapper[4881]: I1211 09:08:04.192707 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-h628s" podStartSLOduration=2.412455844 podStartE2EDuration="4.192688789s" podCreationTimestamp="2025-12-11 09:08:00 +0000 UTC" firstStartedPulling="2025-12-11 09:08:01.119740164 +0000 UTC m=+3129.497108861" lastFinishedPulling="2025-12-11 09:08:02.899973109 +0000 UTC m=+3131.277341806" observedRunningTime="2025-12-11 09:08:04.183286647 +0000 UTC m=+3132.560655344" watchObservedRunningTime="2025-12-11 09:08:04.192688789 +0000 UTC m=+3132.570057486" Dec 11 09:08:17 crc kubenswrapper[4881]: I1211 09:08:17.005758 4881 scope.go:117] "RemoveContainer" containerID="003d013ffd5b740337ff0333759338b1b3fbe344072684ee96e763f5cd66a8c1" Dec 11 09:08:17 crc kubenswrapper[4881]: E1211 09:08:17.008126 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 09:08:30 crc kubenswrapper[4881]: I1211 09:08:30.006464 4881 scope.go:117] "RemoveContainer" containerID="003d013ffd5b740337ff0333759338b1b3fbe344072684ee96e763f5cd66a8c1" Dec 11 09:08:30 crc kubenswrapper[4881]: E1211 09:08:30.007327 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 09:08:42 crc kubenswrapper[4881]: I1211 09:08:42.007073 4881 scope.go:117] "RemoveContainer" containerID="003d013ffd5b740337ff0333759338b1b3fbe344072684ee96e763f5cd66a8c1" Dec 11 09:08:42 crc kubenswrapper[4881]: E1211 09:08:42.007900 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 09:08:54 crc kubenswrapper[4881]: I1211 09:08:54.006459 4881 scope.go:117] "RemoveContainer" containerID="003d013ffd5b740337ff0333759338b1b3fbe344072684ee96e763f5cd66a8c1" Dec 11 09:08:54 crc kubenswrapper[4881]: E1211 09:08:54.007407 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 09:09:10 crc kubenswrapper[4881]: I1211 09:09:10.006031 4881 scope.go:117] "RemoveContainer" containerID="003d013ffd5b740337ff0333759338b1b3fbe344072684ee96e763f5cd66a8c1" Dec 11 09:09:10 crc kubenswrapper[4881]: E1211 09:09:10.007159 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 09:09:21 crc kubenswrapper[4881]: I1211 09:09:21.005358 4881 scope.go:117] "RemoveContainer" containerID="003d013ffd5b740337ff0333759338b1b3fbe344072684ee96e763f5cd66a8c1" Dec 11 09:09:21 crc kubenswrapper[4881]: E1211 09:09:21.006363 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 09:09:22 crc kubenswrapper[4881]: I1211 09:09:22.230940 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-2cg92"] Dec 11 09:09:22 crc kubenswrapper[4881]: I1211 09:09:22.235959 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-2cg92" Dec 11 09:09:22 crc kubenswrapper[4881]: I1211 09:09:22.249315 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-2cg92"] Dec 11 09:09:22 crc kubenswrapper[4881]: I1211 09:09:22.429796 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-58zqz\" (UniqueName: \"kubernetes.io/projected/b6c32baa-3cbd-45e0-9806-422ed585fbb1-kube-api-access-58zqz\") pod \"community-operators-2cg92\" (UID: \"b6c32baa-3cbd-45e0-9806-422ed585fbb1\") " pod="openshift-marketplace/community-operators-2cg92" Dec 11 09:09:22 crc kubenswrapper[4881]: I1211 09:09:22.429980 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b6c32baa-3cbd-45e0-9806-422ed585fbb1-catalog-content\") pod \"community-operators-2cg92\" (UID: \"b6c32baa-3cbd-45e0-9806-422ed585fbb1\") " pod="openshift-marketplace/community-operators-2cg92" Dec 11 09:09:22 crc kubenswrapper[4881]: I1211 09:09:22.430138 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b6c32baa-3cbd-45e0-9806-422ed585fbb1-utilities\") pod \"community-operators-2cg92\" (UID: \"b6c32baa-3cbd-45e0-9806-422ed585fbb1\") " pod="openshift-marketplace/community-operators-2cg92" Dec 11 09:09:22 crc kubenswrapper[4881]: I1211 09:09:22.532951 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b6c32baa-3cbd-45e0-9806-422ed585fbb1-utilities\") pod \"community-operators-2cg92\" (UID: \"b6c32baa-3cbd-45e0-9806-422ed585fbb1\") " pod="openshift-marketplace/community-operators-2cg92" Dec 11 09:09:22 crc kubenswrapper[4881]: I1211 09:09:22.533046 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b6c32baa-3cbd-45e0-9806-422ed585fbb1-utilities\") pod \"community-operators-2cg92\" (UID: \"b6c32baa-3cbd-45e0-9806-422ed585fbb1\") " pod="openshift-marketplace/community-operators-2cg92" Dec 11 09:09:22 crc kubenswrapper[4881]: I1211 09:09:22.533457 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-58zqz\" (UniqueName: \"kubernetes.io/projected/b6c32baa-3cbd-45e0-9806-422ed585fbb1-kube-api-access-58zqz\") pod \"community-operators-2cg92\" (UID: \"b6c32baa-3cbd-45e0-9806-422ed585fbb1\") " pod="openshift-marketplace/community-operators-2cg92" Dec 11 09:09:22 crc kubenswrapper[4881]: I1211 09:09:22.533679 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b6c32baa-3cbd-45e0-9806-422ed585fbb1-catalog-content\") pod \"community-operators-2cg92\" (UID: \"b6c32baa-3cbd-45e0-9806-422ed585fbb1\") " pod="openshift-marketplace/community-operators-2cg92" Dec 11 09:09:22 crc kubenswrapper[4881]: I1211 09:09:22.534281 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b6c32baa-3cbd-45e0-9806-422ed585fbb1-catalog-content\") pod \"community-operators-2cg92\" (UID: \"b6c32baa-3cbd-45e0-9806-422ed585fbb1\") " pod="openshift-marketplace/community-operators-2cg92" Dec 11 09:09:22 crc kubenswrapper[4881]: I1211 09:09:22.563926 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-58zqz\" (UniqueName: \"kubernetes.io/projected/b6c32baa-3cbd-45e0-9806-422ed585fbb1-kube-api-access-58zqz\") pod \"community-operators-2cg92\" (UID: \"b6c32baa-3cbd-45e0-9806-422ed585fbb1\") " pod="openshift-marketplace/community-operators-2cg92" Dec 11 09:09:22 crc kubenswrapper[4881]: I1211 09:09:22.570881 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-2cg92" Dec 11 09:09:23 crc kubenswrapper[4881]: I1211 09:09:23.198736 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-2cg92"] Dec 11 09:09:24 crc kubenswrapper[4881]: I1211 09:09:24.092125 4881 generic.go:334] "Generic (PLEG): container finished" podID="b6c32baa-3cbd-45e0-9806-422ed585fbb1" containerID="24238b78ae8a03ba9d908a6ac2eeeb37d213d8ac305f9b1bade544825ac920d1" exitCode=0 Dec 11 09:09:24 crc kubenswrapper[4881]: I1211 09:09:24.092549 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-2cg92" event={"ID":"b6c32baa-3cbd-45e0-9806-422ed585fbb1","Type":"ContainerDied","Data":"24238b78ae8a03ba9d908a6ac2eeeb37d213d8ac305f9b1bade544825ac920d1"} Dec 11 09:09:24 crc kubenswrapper[4881]: I1211 09:09:24.092578 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-2cg92" event={"ID":"b6c32baa-3cbd-45e0-9806-422ed585fbb1","Type":"ContainerStarted","Data":"9747f52c85568059a5a55515035968434028b98067826d70c3c4f4f727d43119"} Dec 11 09:09:24 crc kubenswrapper[4881]: I1211 09:09:24.095497 4881 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Dec 11 09:09:26 crc kubenswrapper[4881]: I1211 09:09:26.116473 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-2cg92" event={"ID":"b6c32baa-3cbd-45e0-9806-422ed585fbb1","Type":"ContainerStarted","Data":"78ebf124e22d0e42c4d38788b0e351fa4a0decea10d3518b5b13c80575d5f519"} Dec 11 09:09:27 crc kubenswrapper[4881]: I1211 09:09:27.132966 4881 generic.go:334] "Generic (PLEG): container finished" podID="b6c32baa-3cbd-45e0-9806-422ed585fbb1" containerID="78ebf124e22d0e42c4d38788b0e351fa4a0decea10d3518b5b13c80575d5f519" exitCode=0 Dec 11 09:09:27 crc kubenswrapper[4881]: I1211 09:09:27.133040 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-2cg92" event={"ID":"b6c32baa-3cbd-45e0-9806-422ed585fbb1","Type":"ContainerDied","Data":"78ebf124e22d0e42c4d38788b0e351fa4a0decea10d3518b5b13c80575d5f519"} Dec 11 09:09:28 crc kubenswrapper[4881]: I1211 09:09:28.145977 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-2cg92" event={"ID":"b6c32baa-3cbd-45e0-9806-422ed585fbb1","Type":"ContainerStarted","Data":"53db5558de3f1dedb50dce15e1f42deeaa836eecd441e0504816794291de60c2"} Dec 11 09:09:28 crc kubenswrapper[4881]: I1211 09:09:28.186850 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-2cg92" podStartSLOduration=2.505383843 podStartE2EDuration="6.186820617s" podCreationTimestamp="2025-12-11 09:09:22 +0000 UTC" firstStartedPulling="2025-12-11 09:09:24.095177709 +0000 UTC m=+3212.472546406" lastFinishedPulling="2025-12-11 09:09:27.776614483 +0000 UTC m=+3216.153983180" observedRunningTime="2025-12-11 09:09:28.164474748 +0000 UTC m=+3216.541843445" watchObservedRunningTime="2025-12-11 09:09:28.186820617 +0000 UTC m=+3216.564189314" Dec 11 09:09:32 crc kubenswrapper[4881]: I1211 09:09:32.571713 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-2cg92" Dec 11 09:09:32 crc kubenswrapper[4881]: I1211 09:09:32.572582 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-2cg92" Dec 11 09:09:32 crc kubenswrapper[4881]: I1211 09:09:32.629795 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-2cg92" Dec 11 09:09:33 crc kubenswrapper[4881]: I1211 09:09:33.350424 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-2cg92" Dec 11 09:09:33 crc kubenswrapper[4881]: I1211 09:09:33.410770 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-2cg92"] Dec 11 09:09:35 crc kubenswrapper[4881]: I1211 09:09:35.223728 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-2cg92" podUID="b6c32baa-3cbd-45e0-9806-422ed585fbb1" containerName="registry-server" containerID="cri-o://53db5558de3f1dedb50dce15e1f42deeaa836eecd441e0504816794291de60c2" gracePeriod=2 Dec 11 09:09:36 crc kubenswrapper[4881]: I1211 09:09:36.006147 4881 scope.go:117] "RemoveContainer" containerID="003d013ffd5b740337ff0333759338b1b3fbe344072684ee96e763f5cd66a8c1" Dec 11 09:09:36 crc kubenswrapper[4881]: E1211 09:09:36.006886 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 09:09:36 crc kubenswrapper[4881]: I1211 09:09:36.239717 4881 generic.go:334] "Generic (PLEG): container finished" podID="b6c32baa-3cbd-45e0-9806-422ed585fbb1" containerID="53db5558de3f1dedb50dce15e1f42deeaa836eecd441e0504816794291de60c2" exitCode=0 Dec 11 09:09:36 crc kubenswrapper[4881]: I1211 09:09:36.239748 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-2cg92" event={"ID":"b6c32baa-3cbd-45e0-9806-422ed585fbb1","Type":"ContainerDied","Data":"53db5558de3f1dedb50dce15e1f42deeaa836eecd441e0504816794291de60c2"} Dec 11 09:09:36 crc kubenswrapper[4881]: I1211 09:09:36.239802 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-2cg92" event={"ID":"b6c32baa-3cbd-45e0-9806-422ed585fbb1","Type":"ContainerDied","Data":"9747f52c85568059a5a55515035968434028b98067826d70c3c4f4f727d43119"} Dec 11 09:09:36 crc kubenswrapper[4881]: I1211 09:09:36.239820 4881 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9747f52c85568059a5a55515035968434028b98067826d70c3c4f4f727d43119" Dec 11 09:09:36 crc kubenswrapper[4881]: I1211 09:09:36.329269 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-2cg92" Dec 11 09:09:36 crc kubenswrapper[4881]: I1211 09:09:36.500645 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b6c32baa-3cbd-45e0-9806-422ed585fbb1-utilities\") pod \"b6c32baa-3cbd-45e0-9806-422ed585fbb1\" (UID: \"b6c32baa-3cbd-45e0-9806-422ed585fbb1\") " Dec 11 09:09:36 crc kubenswrapper[4881]: I1211 09:09:36.500855 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b6c32baa-3cbd-45e0-9806-422ed585fbb1-catalog-content\") pod \"b6c32baa-3cbd-45e0-9806-422ed585fbb1\" (UID: \"b6c32baa-3cbd-45e0-9806-422ed585fbb1\") " Dec 11 09:09:36 crc kubenswrapper[4881]: I1211 09:09:36.501093 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-58zqz\" (UniqueName: \"kubernetes.io/projected/b6c32baa-3cbd-45e0-9806-422ed585fbb1-kube-api-access-58zqz\") pod \"b6c32baa-3cbd-45e0-9806-422ed585fbb1\" (UID: \"b6c32baa-3cbd-45e0-9806-422ed585fbb1\") " Dec 11 09:09:36 crc kubenswrapper[4881]: I1211 09:09:36.501756 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b6c32baa-3cbd-45e0-9806-422ed585fbb1-utilities" (OuterVolumeSpecName: "utilities") pod "b6c32baa-3cbd-45e0-9806-422ed585fbb1" (UID: "b6c32baa-3cbd-45e0-9806-422ed585fbb1"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 09:09:36 crc kubenswrapper[4881]: I1211 09:09:36.510373 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6c32baa-3cbd-45e0-9806-422ed585fbb1-kube-api-access-58zqz" (OuterVolumeSpecName: "kube-api-access-58zqz") pod "b6c32baa-3cbd-45e0-9806-422ed585fbb1" (UID: "b6c32baa-3cbd-45e0-9806-422ed585fbb1"). InnerVolumeSpecName "kube-api-access-58zqz". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 09:09:36 crc kubenswrapper[4881]: I1211 09:09:36.565211 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b6c32baa-3cbd-45e0-9806-422ed585fbb1-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b6c32baa-3cbd-45e0-9806-422ed585fbb1" (UID: "b6c32baa-3cbd-45e0-9806-422ed585fbb1"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 09:09:36 crc kubenswrapper[4881]: I1211 09:09:36.603981 4881 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b6c32baa-3cbd-45e0-9806-422ed585fbb1-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 11 09:09:36 crc kubenswrapper[4881]: I1211 09:09:36.604020 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-58zqz\" (UniqueName: \"kubernetes.io/projected/b6c32baa-3cbd-45e0-9806-422ed585fbb1-kube-api-access-58zqz\") on node \"crc\" DevicePath \"\"" Dec 11 09:09:36 crc kubenswrapper[4881]: I1211 09:09:36.604036 4881 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b6c32baa-3cbd-45e0-9806-422ed585fbb1-utilities\") on node \"crc\" DevicePath \"\"" Dec 11 09:09:37 crc kubenswrapper[4881]: I1211 09:09:37.254497 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-2cg92" Dec 11 09:09:37 crc kubenswrapper[4881]: I1211 09:09:37.286269 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-2cg92"] Dec 11 09:09:37 crc kubenswrapper[4881]: I1211 09:09:37.298823 4881 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-2cg92"] Dec 11 09:09:39 crc kubenswrapper[4881]: I1211 09:09:39.024449 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6c32baa-3cbd-45e0-9806-422ed585fbb1" path="/var/lib/kubelet/pods/b6c32baa-3cbd-45e0-9806-422ed585fbb1/volumes" Dec 11 09:09:47 crc kubenswrapper[4881]: I1211 09:09:47.006028 4881 scope.go:117] "RemoveContainer" containerID="003d013ffd5b740337ff0333759338b1b3fbe344072684ee96e763f5cd66a8c1" Dec 11 09:09:47 crc kubenswrapper[4881]: E1211 09:09:47.006900 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 09:09:58 crc kubenswrapper[4881]: I1211 09:09:58.006648 4881 scope.go:117] "RemoveContainer" containerID="003d013ffd5b740337ff0333759338b1b3fbe344072684ee96e763f5cd66a8c1" Dec 11 09:09:58 crc kubenswrapper[4881]: E1211 09:09:58.008064 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 09:09:59 crc kubenswrapper[4881]: I1211 09:09:59.522698 4881 generic.go:334] "Generic (PLEG): container finished" podID="1272adc3-399f-4c39-b62c-3bc18dda3b59" containerID="c8f0aa71dd198cd8e82d5c5b195882ddfd2f48329142bcdf549734cbc505ef17" exitCode=0 Dec 11 09:09:59 crc kubenswrapper[4881]: I1211 09:09:59.522812 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-h628s" event={"ID":"1272adc3-399f-4c39-b62c-3bc18dda3b59","Type":"ContainerDied","Data":"c8f0aa71dd198cd8e82d5c5b195882ddfd2f48329142bcdf549734cbc505ef17"} Dec 11 09:10:01 crc kubenswrapper[4881]: I1211 09:10:01.145206 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-h628s" Dec 11 09:10:01 crc kubenswrapper[4881]: I1211 09:10:01.265088 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pdjl6\" (UniqueName: \"kubernetes.io/projected/1272adc3-399f-4c39-b62c-3bc18dda3b59-kube-api-access-pdjl6\") pod \"1272adc3-399f-4c39-b62c-3bc18dda3b59\" (UID: \"1272adc3-399f-4c39-b62c-3bc18dda3b59\") " Dec 11 09:10:01 crc kubenswrapper[4881]: I1211 09:10:01.265185 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-ipmi-config-data-0\" (UniqueName: \"kubernetes.io/secret/1272adc3-399f-4c39-b62c-3bc18dda3b59-ceilometer-ipmi-config-data-0\") pod \"1272adc3-399f-4c39-b62c-3bc18dda3b59\" (UID: \"1272adc3-399f-4c39-b62c-3bc18dda3b59\") " Dec 11 09:10:01 crc kubenswrapper[4881]: I1211 09:10:01.265351 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/1272adc3-399f-4c39-b62c-3bc18dda3b59-ssh-key\") pod \"1272adc3-399f-4c39-b62c-3bc18dda3b59\" (UID: \"1272adc3-399f-4c39-b62c-3bc18dda3b59\") " Dec 11 09:10:01 crc kubenswrapper[4881]: I1211 09:10:01.265400 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/1272adc3-399f-4c39-b62c-3bc18dda3b59-inventory\") pod \"1272adc3-399f-4c39-b62c-3bc18dda3b59\" (UID: \"1272adc3-399f-4c39-b62c-3bc18dda3b59\") " Dec 11 09:10:01 crc kubenswrapper[4881]: I1211 09:10:01.265501 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-ipmi-config-data-2\" (UniqueName: \"kubernetes.io/secret/1272adc3-399f-4c39-b62c-3bc18dda3b59-ceilometer-ipmi-config-data-2\") pod \"1272adc3-399f-4c39-b62c-3bc18dda3b59\" (UID: \"1272adc3-399f-4c39-b62c-3bc18dda3b59\") " Dec 11 09:10:01 crc kubenswrapper[4881]: I1211 09:10:01.265598 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"telemetry-power-monitoring-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1272adc3-399f-4c39-b62c-3bc18dda3b59-telemetry-power-monitoring-combined-ca-bundle\") pod \"1272adc3-399f-4c39-b62c-3bc18dda3b59\" (UID: \"1272adc3-399f-4c39-b62c-3bc18dda3b59\") " Dec 11 09:10:01 crc kubenswrapper[4881]: I1211 09:10:01.265643 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-ipmi-config-data-1\" (UniqueName: \"kubernetes.io/secret/1272adc3-399f-4c39-b62c-3bc18dda3b59-ceilometer-ipmi-config-data-1\") pod \"1272adc3-399f-4c39-b62c-3bc18dda3b59\" (UID: \"1272adc3-399f-4c39-b62c-3bc18dda3b59\") " Dec 11 09:10:01 crc kubenswrapper[4881]: I1211 09:10:01.278737 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1272adc3-399f-4c39-b62c-3bc18dda3b59-kube-api-access-pdjl6" (OuterVolumeSpecName: "kube-api-access-pdjl6") pod "1272adc3-399f-4c39-b62c-3bc18dda3b59" (UID: "1272adc3-399f-4c39-b62c-3bc18dda3b59"). InnerVolumeSpecName "kube-api-access-pdjl6". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 09:10:01 crc kubenswrapper[4881]: I1211 09:10:01.281536 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1272adc3-399f-4c39-b62c-3bc18dda3b59-telemetry-power-monitoring-combined-ca-bundle" (OuterVolumeSpecName: "telemetry-power-monitoring-combined-ca-bundle") pod "1272adc3-399f-4c39-b62c-3bc18dda3b59" (UID: "1272adc3-399f-4c39-b62c-3bc18dda3b59"). InnerVolumeSpecName "telemetry-power-monitoring-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 09:10:01 crc kubenswrapper[4881]: I1211 09:10:01.300445 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1272adc3-399f-4c39-b62c-3bc18dda3b59-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "1272adc3-399f-4c39-b62c-3bc18dda3b59" (UID: "1272adc3-399f-4c39-b62c-3bc18dda3b59"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 09:10:01 crc kubenswrapper[4881]: I1211 09:10:01.309470 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1272adc3-399f-4c39-b62c-3bc18dda3b59-ceilometer-ipmi-config-data-0" (OuterVolumeSpecName: "ceilometer-ipmi-config-data-0") pod "1272adc3-399f-4c39-b62c-3bc18dda3b59" (UID: "1272adc3-399f-4c39-b62c-3bc18dda3b59"). InnerVolumeSpecName "ceilometer-ipmi-config-data-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 09:10:01 crc kubenswrapper[4881]: I1211 09:10:01.311024 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1272adc3-399f-4c39-b62c-3bc18dda3b59-ceilometer-ipmi-config-data-1" (OuterVolumeSpecName: "ceilometer-ipmi-config-data-1") pod "1272adc3-399f-4c39-b62c-3bc18dda3b59" (UID: "1272adc3-399f-4c39-b62c-3bc18dda3b59"). InnerVolumeSpecName "ceilometer-ipmi-config-data-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 09:10:01 crc kubenswrapper[4881]: I1211 09:10:01.319181 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1272adc3-399f-4c39-b62c-3bc18dda3b59-ceilometer-ipmi-config-data-2" (OuterVolumeSpecName: "ceilometer-ipmi-config-data-2") pod "1272adc3-399f-4c39-b62c-3bc18dda3b59" (UID: "1272adc3-399f-4c39-b62c-3bc18dda3b59"). InnerVolumeSpecName "ceilometer-ipmi-config-data-2". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 09:10:01 crc kubenswrapper[4881]: I1211 09:10:01.324252 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1272adc3-399f-4c39-b62c-3bc18dda3b59-inventory" (OuterVolumeSpecName: "inventory") pod "1272adc3-399f-4c39-b62c-3bc18dda3b59" (UID: "1272adc3-399f-4c39-b62c-3bc18dda3b59"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 09:10:01 crc kubenswrapper[4881]: I1211 09:10:01.369258 4881 reconciler_common.go:293] "Volume detached for volume \"telemetry-power-monitoring-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1272adc3-399f-4c39-b62c-3bc18dda3b59-telemetry-power-monitoring-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 11 09:10:01 crc kubenswrapper[4881]: I1211 09:10:01.369298 4881 reconciler_common.go:293] "Volume detached for volume \"ceilometer-ipmi-config-data-1\" (UniqueName: \"kubernetes.io/secret/1272adc3-399f-4c39-b62c-3bc18dda3b59-ceilometer-ipmi-config-data-1\") on node \"crc\" DevicePath \"\"" Dec 11 09:10:01 crc kubenswrapper[4881]: I1211 09:10:01.369313 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pdjl6\" (UniqueName: \"kubernetes.io/projected/1272adc3-399f-4c39-b62c-3bc18dda3b59-kube-api-access-pdjl6\") on node \"crc\" DevicePath \"\"" Dec 11 09:10:01 crc kubenswrapper[4881]: I1211 09:10:01.369325 4881 reconciler_common.go:293] "Volume detached for volume \"ceilometer-ipmi-config-data-0\" (UniqueName: \"kubernetes.io/secret/1272adc3-399f-4c39-b62c-3bc18dda3b59-ceilometer-ipmi-config-data-0\") on node \"crc\" DevicePath \"\"" Dec 11 09:10:01 crc kubenswrapper[4881]: I1211 09:10:01.369349 4881 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/1272adc3-399f-4c39-b62c-3bc18dda3b59-ssh-key\") on node \"crc\" DevicePath \"\"" Dec 11 09:10:01 crc kubenswrapper[4881]: I1211 09:10:01.369361 4881 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/1272adc3-399f-4c39-b62c-3bc18dda3b59-inventory\") on node \"crc\" DevicePath \"\"" Dec 11 09:10:01 crc kubenswrapper[4881]: I1211 09:10:01.369371 4881 reconciler_common.go:293] "Volume detached for volume \"ceilometer-ipmi-config-data-2\" (UniqueName: \"kubernetes.io/secret/1272adc3-399f-4c39-b62c-3bc18dda3b59-ceilometer-ipmi-config-data-2\") on node \"crc\" DevicePath \"\"" Dec 11 09:10:01 crc kubenswrapper[4881]: I1211 09:10:01.544188 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-h628s" event={"ID":"1272adc3-399f-4c39-b62c-3bc18dda3b59","Type":"ContainerDied","Data":"c9b94cc2db1ef3b33646f6492141cabb1cd62b03df683ffc84fd7c94025f35ad"} Dec 11 09:10:01 crc kubenswrapper[4881]: I1211 09:10:01.544229 4881 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c9b94cc2db1ef3b33646f6492141cabb1cd62b03df683ffc84fd7c94025f35ad" Dec 11 09:10:01 crc kubenswrapper[4881]: I1211 09:10:01.544544 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-h628s" Dec 11 09:10:01 crc kubenswrapper[4881]: I1211 09:10:01.658508 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/logging-edpm-deployment-openstack-edpm-ipam-wqjnk"] Dec 11 09:10:01 crc kubenswrapper[4881]: E1211 09:10:01.659306 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b6c32baa-3cbd-45e0-9806-422ed585fbb1" containerName="registry-server" Dec 11 09:10:01 crc kubenswrapper[4881]: I1211 09:10:01.659325 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="b6c32baa-3cbd-45e0-9806-422ed585fbb1" containerName="registry-server" Dec 11 09:10:01 crc kubenswrapper[4881]: E1211 09:10:01.659356 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1272adc3-399f-4c39-b62c-3bc18dda3b59" containerName="telemetry-power-monitoring-edpm-deployment-openstack-edpm-ipam" Dec 11 09:10:01 crc kubenswrapper[4881]: I1211 09:10:01.659366 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="1272adc3-399f-4c39-b62c-3bc18dda3b59" containerName="telemetry-power-monitoring-edpm-deployment-openstack-edpm-ipam" Dec 11 09:10:01 crc kubenswrapper[4881]: E1211 09:10:01.659376 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b6c32baa-3cbd-45e0-9806-422ed585fbb1" containerName="extract-utilities" Dec 11 09:10:01 crc kubenswrapper[4881]: I1211 09:10:01.659383 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="b6c32baa-3cbd-45e0-9806-422ed585fbb1" containerName="extract-utilities" Dec 11 09:10:01 crc kubenswrapper[4881]: E1211 09:10:01.659412 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b6c32baa-3cbd-45e0-9806-422ed585fbb1" containerName="extract-content" Dec 11 09:10:01 crc kubenswrapper[4881]: I1211 09:10:01.659417 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="b6c32baa-3cbd-45e0-9806-422ed585fbb1" containerName="extract-content" Dec 11 09:10:01 crc kubenswrapper[4881]: I1211 09:10:01.659645 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="b6c32baa-3cbd-45e0-9806-422ed585fbb1" containerName="registry-server" Dec 11 09:10:01 crc kubenswrapper[4881]: I1211 09:10:01.659660 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="1272adc3-399f-4c39-b62c-3bc18dda3b59" containerName="telemetry-power-monitoring-edpm-deployment-openstack-edpm-ipam" Dec 11 09:10:01 crc kubenswrapper[4881]: I1211 09:10:01.660513 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-wqjnk" Dec 11 09:10:01 crc kubenswrapper[4881]: I1211 09:10:01.662922 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Dec 11 09:10:01 crc kubenswrapper[4881]: I1211 09:10:01.663379 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Dec 11 09:10:01 crc kubenswrapper[4881]: I1211 09:10:01.663499 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-bnm72" Dec 11 09:10:01 crc kubenswrapper[4881]: I1211 09:10:01.663632 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"logging-compute-config-data" Dec 11 09:10:01 crc kubenswrapper[4881]: I1211 09:10:01.663959 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Dec 11 09:10:01 crc kubenswrapper[4881]: I1211 09:10:01.671520 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/logging-edpm-deployment-openstack-edpm-ipam-wqjnk"] Dec 11 09:10:01 crc kubenswrapper[4881]: I1211 09:10:01.780358 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/7afa083c-c63d-4f07-9a8f-15b00a918860-ssh-key\") pod \"logging-edpm-deployment-openstack-edpm-ipam-wqjnk\" (UID: \"7afa083c-c63d-4f07-9a8f-15b00a918860\") " pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-wqjnk" Dec 11 09:10:01 crc kubenswrapper[4881]: I1211 09:10:01.780642 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8d5vv\" (UniqueName: \"kubernetes.io/projected/7afa083c-c63d-4f07-9a8f-15b00a918860-kube-api-access-8d5vv\") pod \"logging-edpm-deployment-openstack-edpm-ipam-wqjnk\" (UID: \"7afa083c-c63d-4f07-9a8f-15b00a918860\") " pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-wqjnk" Dec 11 09:10:01 crc kubenswrapper[4881]: I1211 09:10:01.780747 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/7afa083c-c63d-4f07-9a8f-15b00a918860-logging-compute-config-data-1\") pod \"logging-edpm-deployment-openstack-edpm-ipam-wqjnk\" (UID: \"7afa083c-c63d-4f07-9a8f-15b00a918860\") " pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-wqjnk" Dec 11 09:10:01 crc kubenswrapper[4881]: I1211 09:10:01.781184 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/7afa083c-c63d-4f07-9a8f-15b00a918860-logging-compute-config-data-0\") pod \"logging-edpm-deployment-openstack-edpm-ipam-wqjnk\" (UID: \"7afa083c-c63d-4f07-9a8f-15b00a918860\") " pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-wqjnk" Dec 11 09:10:01 crc kubenswrapper[4881]: I1211 09:10:01.781358 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7afa083c-c63d-4f07-9a8f-15b00a918860-inventory\") pod \"logging-edpm-deployment-openstack-edpm-ipam-wqjnk\" (UID: \"7afa083c-c63d-4f07-9a8f-15b00a918860\") " pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-wqjnk" Dec 11 09:10:01 crc kubenswrapper[4881]: I1211 09:10:01.883734 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7afa083c-c63d-4f07-9a8f-15b00a918860-inventory\") pod \"logging-edpm-deployment-openstack-edpm-ipam-wqjnk\" (UID: \"7afa083c-c63d-4f07-9a8f-15b00a918860\") " pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-wqjnk" Dec 11 09:10:01 crc kubenswrapper[4881]: I1211 09:10:01.883907 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/7afa083c-c63d-4f07-9a8f-15b00a918860-ssh-key\") pod \"logging-edpm-deployment-openstack-edpm-ipam-wqjnk\" (UID: \"7afa083c-c63d-4f07-9a8f-15b00a918860\") " pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-wqjnk" Dec 11 09:10:01 crc kubenswrapper[4881]: I1211 09:10:01.883974 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8d5vv\" (UniqueName: \"kubernetes.io/projected/7afa083c-c63d-4f07-9a8f-15b00a918860-kube-api-access-8d5vv\") pod \"logging-edpm-deployment-openstack-edpm-ipam-wqjnk\" (UID: \"7afa083c-c63d-4f07-9a8f-15b00a918860\") " pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-wqjnk" Dec 11 09:10:01 crc kubenswrapper[4881]: I1211 09:10:01.884020 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/7afa083c-c63d-4f07-9a8f-15b00a918860-logging-compute-config-data-1\") pod \"logging-edpm-deployment-openstack-edpm-ipam-wqjnk\" (UID: \"7afa083c-c63d-4f07-9a8f-15b00a918860\") " pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-wqjnk" Dec 11 09:10:01 crc kubenswrapper[4881]: I1211 09:10:01.884104 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/7afa083c-c63d-4f07-9a8f-15b00a918860-logging-compute-config-data-0\") pod \"logging-edpm-deployment-openstack-edpm-ipam-wqjnk\" (UID: \"7afa083c-c63d-4f07-9a8f-15b00a918860\") " pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-wqjnk" Dec 11 09:10:01 crc kubenswrapper[4881]: I1211 09:10:01.891201 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/7afa083c-c63d-4f07-9a8f-15b00a918860-logging-compute-config-data-0\") pod \"logging-edpm-deployment-openstack-edpm-ipam-wqjnk\" (UID: \"7afa083c-c63d-4f07-9a8f-15b00a918860\") " pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-wqjnk" Dec 11 09:10:01 crc kubenswrapper[4881]: I1211 09:10:01.897862 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/7afa083c-c63d-4f07-9a8f-15b00a918860-logging-compute-config-data-1\") pod \"logging-edpm-deployment-openstack-edpm-ipam-wqjnk\" (UID: \"7afa083c-c63d-4f07-9a8f-15b00a918860\") " pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-wqjnk" Dec 11 09:10:01 crc kubenswrapper[4881]: I1211 09:10:01.899311 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/7afa083c-c63d-4f07-9a8f-15b00a918860-ssh-key\") pod \"logging-edpm-deployment-openstack-edpm-ipam-wqjnk\" (UID: \"7afa083c-c63d-4f07-9a8f-15b00a918860\") " pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-wqjnk" Dec 11 09:10:01 crc kubenswrapper[4881]: I1211 09:10:01.899744 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7afa083c-c63d-4f07-9a8f-15b00a918860-inventory\") pod \"logging-edpm-deployment-openstack-edpm-ipam-wqjnk\" (UID: \"7afa083c-c63d-4f07-9a8f-15b00a918860\") " pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-wqjnk" Dec 11 09:10:01 crc kubenswrapper[4881]: I1211 09:10:01.904862 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8d5vv\" (UniqueName: \"kubernetes.io/projected/7afa083c-c63d-4f07-9a8f-15b00a918860-kube-api-access-8d5vv\") pod \"logging-edpm-deployment-openstack-edpm-ipam-wqjnk\" (UID: \"7afa083c-c63d-4f07-9a8f-15b00a918860\") " pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-wqjnk" Dec 11 09:10:02 crc kubenswrapper[4881]: I1211 09:10:02.055407 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-wqjnk" Dec 11 09:10:02 crc kubenswrapper[4881]: I1211 09:10:02.615572 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/logging-edpm-deployment-openstack-edpm-ipam-wqjnk"] Dec 11 09:10:03 crc kubenswrapper[4881]: I1211 09:10:03.574537 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-wqjnk" event={"ID":"7afa083c-c63d-4f07-9a8f-15b00a918860","Type":"ContainerStarted","Data":"b5438f331cfb37b98cea6175031ff7ec3172e50a64157981e3d09ca1f8dd9534"} Dec 11 09:10:04 crc kubenswrapper[4881]: I1211 09:10:04.584825 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-wqjnk" event={"ID":"7afa083c-c63d-4f07-9a8f-15b00a918860","Type":"ContainerStarted","Data":"d0ec4ed254492c21ad1b05292a1b8a7e0cb216cfee92a02f66fa60f40239881a"} Dec 11 09:10:04 crc kubenswrapper[4881]: I1211 09:10:04.609748 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-wqjnk" podStartSLOduration=2.326181603 podStartE2EDuration="3.609729167s" podCreationTimestamp="2025-12-11 09:10:01 +0000 UTC" firstStartedPulling="2025-12-11 09:10:02.61907878 +0000 UTC m=+3250.996447477" lastFinishedPulling="2025-12-11 09:10:03.902626344 +0000 UTC m=+3252.279995041" observedRunningTime="2025-12-11 09:10:04.604302814 +0000 UTC m=+3252.981671511" watchObservedRunningTime="2025-12-11 09:10:04.609729167 +0000 UTC m=+3252.987097854" Dec 11 09:10:13 crc kubenswrapper[4881]: I1211 09:10:13.014097 4881 scope.go:117] "RemoveContainer" containerID="003d013ffd5b740337ff0333759338b1b3fbe344072684ee96e763f5cd66a8c1" Dec 11 09:10:13 crc kubenswrapper[4881]: E1211 09:10:13.014926 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 09:10:18 crc kubenswrapper[4881]: I1211 09:10:18.748938 4881 generic.go:334] "Generic (PLEG): container finished" podID="7afa083c-c63d-4f07-9a8f-15b00a918860" containerID="d0ec4ed254492c21ad1b05292a1b8a7e0cb216cfee92a02f66fa60f40239881a" exitCode=0 Dec 11 09:10:18 crc kubenswrapper[4881]: I1211 09:10:18.749057 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-wqjnk" event={"ID":"7afa083c-c63d-4f07-9a8f-15b00a918860","Type":"ContainerDied","Data":"d0ec4ed254492c21ad1b05292a1b8a7e0cb216cfee92a02f66fa60f40239881a"} Dec 11 09:10:20 crc kubenswrapper[4881]: I1211 09:10:20.231529 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-wqjnk" Dec 11 09:10:20 crc kubenswrapper[4881]: I1211 09:10:20.381943 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/7afa083c-c63d-4f07-9a8f-15b00a918860-ssh-key\") pod \"7afa083c-c63d-4f07-9a8f-15b00a918860\" (UID: \"7afa083c-c63d-4f07-9a8f-15b00a918860\") " Dec 11 09:10:20 crc kubenswrapper[4881]: I1211 09:10:20.382381 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logging-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/7afa083c-c63d-4f07-9a8f-15b00a918860-logging-compute-config-data-1\") pod \"7afa083c-c63d-4f07-9a8f-15b00a918860\" (UID: \"7afa083c-c63d-4f07-9a8f-15b00a918860\") " Dec 11 09:10:20 crc kubenswrapper[4881]: I1211 09:10:20.382402 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8d5vv\" (UniqueName: \"kubernetes.io/projected/7afa083c-c63d-4f07-9a8f-15b00a918860-kube-api-access-8d5vv\") pod \"7afa083c-c63d-4f07-9a8f-15b00a918860\" (UID: \"7afa083c-c63d-4f07-9a8f-15b00a918860\") " Dec 11 09:10:20 crc kubenswrapper[4881]: I1211 09:10:20.382518 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7afa083c-c63d-4f07-9a8f-15b00a918860-inventory\") pod \"7afa083c-c63d-4f07-9a8f-15b00a918860\" (UID: \"7afa083c-c63d-4f07-9a8f-15b00a918860\") " Dec 11 09:10:20 crc kubenswrapper[4881]: I1211 09:10:20.382604 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logging-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/7afa083c-c63d-4f07-9a8f-15b00a918860-logging-compute-config-data-0\") pod \"7afa083c-c63d-4f07-9a8f-15b00a918860\" (UID: \"7afa083c-c63d-4f07-9a8f-15b00a918860\") " Dec 11 09:10:20 crc kubenswrapper[4881]: I1211 09:10:20.388057 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7afa083c-c63d-4f07-9a8f-15b00a918860-kube-api-access-8d5vv" (OuterVolumeSpecName: "kube-api-access-8d5vv") pod "7afa083c-c63d-4f07-9a8f-15b00a918860" (UID: "7afa083c-c63d-4f07-9a8f-15b00a918860"). InnerVolumeSpecName "kube-api-access-8d5vv". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 09:10:20 crc kubenswrapper[4881]: I1211 09:10:20.420182 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7afa083c-c63d-4f07-9a8f-15b00a918860-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "7afa083c-c63d-4f07-9a8f-15b00a918860" (UID: "7afa083c-c63d-4f07-9a8f-15b00a918860"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 09:10:20 crc kubenswrapper[4881]: I1211 09:10:20.420559 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7afa083c-c63d-4f07-9a8f-15b00a918860-logging-compute-config-data-1" (OuterVolumeSpecName: "logging-compute-config-data-1") pod "7afa083c-c63d-4f07-9a8f-15b00a918860" (UID: "7afa083c-c63d-4f07-9a8f-15b00a918860"). InnerVolumeSpecName "logging-compute-config-data-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 09:10:20 crc kubenswrapper[4881]: I1211 09:10:20.420572 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7afa083c-c63d-4f07-9a8f-15b00a918860-logging-compute-config-data-0" (OuterVolumeSpecName: "logging-compute-config-data-0") pod "7afa083c-c63d-4f07-9a8f-15b00a918860" (UID: "7afa083c-c63d-4f07-9a8f-15b00a918860"). InnerVolumeSpecName "logging-compute-config-data-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 09:10:20 crc kubenswrapper[4881]: I1211 09:10:20.422938 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7afa083c-c63d-4f07-9a8f-15b00a918860-inventory" (OuterVolumeSpecName: "inventory") pod "7afa083c-c63d-4f07-9a8f-15b00a918860" (UID: "7afa083c-c63d-4f07-9a8f-15b00a918860"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 09:10:20 crc kubenswrapper[4881]: I1211 09:10:20.485401 4881 reconciler_common.go:293] "Volume detached for volume \"logging-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/7afa083c-c63d-4f07-9a8f-15b00a918860-logging-compute-config-data-0\") on node \"crc\" DevicePath \"\"" Dec 11 09:10:20 crc kubenswrapper[4881]: I1211 09:10:20.485449 4881 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/7afa083c-c63d-4f07-9a8f-15b00a918860-ssh-key\") on node \"crc\" DevicePath \"\"" Dec 11 09:10:20 crc kubenswrapper[4881]: I1211 09:10:20.485464 4881 reconciler_common.go:293] "Volume detached for volume \"logging-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/7afa083c-c63d-4f07-9a8f-15b00a918860-logging-compute-config-data-1\") on node \"crc\" DevicePath \"\"" Dec 11 09:10:20 crc kubenswrapper[4881]: I1211 09:10:20.485478 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8d5vv\" (UniqueName: \"kubernetes.io/projected/7afa083c-c63d-4f07-9a8f-15b00a918860-kube-api-access-8d5vv\") on node \"crc\" DevicePath \"\"" Dec 11 09:10:20 crc kubenswrapper[4881]: I1211 09:10:20.485490 4881 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7afa083c-c63d-4f07-9a8f-15b00a918860-inventory\") on node \"crc\" DevicePath \"\"" Dec 11 09:10:20 crc kubenswrapper[4881]: I1211 09:10:20.771346 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-wqjnk" event={"ID":"7afa083c-c63d-4f07-9a8f-15b00a918860","Type":"ContainerDied","Data":"b5438f331cfb37b98cea6175031ff7ec3172e50a64157981e3d09ca1f8dd9534"} Dec 11 09:10:20 crc kubenswrapper[4881]: I1211 09:10:20.771392 4881 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b5438f331cfb37b98cea6175031ff7ec3172e50a64157981e3d09ca1f8dd9534" Dec 11 09:10:20 crc kubenswrapper[4881]: I1211 09:10:20.771724 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-wqjnk" Dec 11 09:10:28 crc kubenswrapper[4881]: I1211 09:10:28.005913 4881 scope.go:117] "RemoveContainer" containerID="003d013ffd5b740337ff0333759338b1b3fbe344072684ee96e763f5cd66a8c1" Dec 11 09:10:28 crc kubenswrapper[4881]: E1211 09:10:28.006866 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 09:10:39 crc kubenswrapper[4881]: I1211 09:10:39.006499 4881 scope.go:117] "RemoveContainer" containerID="003d013ffd5b740337ff0333759338b1b3fbe344072684ee96e763f5cd66a8c1" Dec 11 09:10:39 crc kubenswrapper[4881]: E1211 09:10:39.007388 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 09:10:53 crc kubenswrapper[4881]: I1211 09:10:53.012775 4881 scope.go:117] "RemoveContainer" containerID="003d013ffd5b740337ff0333759338b1b3fbe344072684ee96e763f5cd66a8c1" Dec 11 09:10:53 crc kubenswrapper[4881]: E1211 09:10:53.014824 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 09:11:04 crc kubenswrapper[4881]: I1211 09:11:04.005568 4881 scope.go:117] "RemoveContainer" containerID="003d013ffd5b740337ff0333759338b1b3fbe344072684ee96e763f5cd66a8c1" Dec 11 09:11:04 crc kubenswrapper[4881]: E1211 09:11:04.006493 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 09:11:16 crc kubenswrapper[4881]: I1211 09:11:16.005622 4881 scope.go:117] "RemoveContainer" containerID="003d013ffd5b740337ff0333759338b1b3fbe344072684ee96e763f5cd66a8c1" Dec 11 09:11:16 crc kubenswrapper[4881]: E1211 09:11:16.007695 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 09:11:27 crc kubenswrapper[4881]: I1211 09:11:27.005707 4881 scope.go:117] "RemoveContainer" containerID="003d013ffd5b740337ff0333759338b1b3fbe344072684ee96e763f5cd66a8c1" Dec 11 09:11:27 crc kubenswrapper[4881]: E1211 09:11:27.006701 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 09:11:40 crc kubenswrapper[4881]: I1211 09:11:40.006113 4881 scope.go:117] "RemoveContainer" containerID="003d013ffd5b740337ff0333759338b1b3fbe344072684ee96e763f5cd66a8c1" Dec 11 09:11:40 crc kubenswrapper[4881]: E1211 09:11:40.006802 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 09:11:41 crc kubenswrapper[4881]: I1211 09:11:41.837685 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-gnf6l"] Dec 11 09:11:41 crc kubenswrapper[4881]: E1211 09:11:41.848282 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7afa083c-c63d-4f07-9a8f-15b00a918860" containerName="logging-edpm-deployment-openstack-edpm-ipam" Dec 11 09:11:41 crc kubenswrapper[4881]: I1211 09:11:41.848303 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="7afa083c-c63d-4f07-9a8f-15b00a918860" containerName="logging-edpm-deployment-openstack-edpm-ipam" Dec 11 09:11:41 crc kubenswrapper[4881]: I1211 09:11:41.848562 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="7afa083c-c63d-4f07-9a8f-15b00a918860" containerName="logging-edpm-deployment-openstack-edpm-ipam" Dec 11 09:11:41 crc kubenswrapper[4881]: I1211 09:11:41.850374 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-gnf6l" Dec 11 09:11:41 crc kubenswrapper[4881]: I1211 09:11:41.851287 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-gnf6l"] Dec 11 09:11:41 crc kubenswrapper[4881]: I1211 09:11:41.991306 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/20b4c4bc-86b2-4b8a-abe3-8a33d7ce24cb-utilities\") pod \"redhat-operators-gnf6l\" (UID: \"20b4c4bc-86b2-4b8a-abe3-8a33d7ce24cb\") " pod="openshift-marketplace/redhat-operators-gnf6l" Dec 11 09:11:41 crc kubenswrapper[4881]: I1211 09:11:41.991599 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l8bt6\" (UniqueName: \"kubernetes.io/projected/20b4c4bc-86b2-4b8a-abe3-8a33d7ce24cb-kube-api-access-l8bt6\") pod \"redhat-operators-gnf6l\" (UID: \"20b4c4bc-86b2-4b8a-abe3-8a33d7ce24cb\") " pod="openshift-marketplace/redhat-operators-gnf6l" Dec 11 09:11:41 crc kubenswrapper[4881]: I1211 09:11:41.991763 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/20b4c4bc-86b2-4b8a-abe3-8a33d7ce24cb-catalog-content\") pod \"redhat-operators-gnf6l\" (UID: \"20b4c4bc-86b2-4b8a-abe3-8a33d7ce24cb\") " pod="openshift-marketplace/redhat-operators-gnf6l" Dec 11 09:11:42 crc kubenswrapper[4881]: I1211 09:11:42.094646 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/20b4c4bc-86b2-4b8a-abe3-8a33d7ce24cb-utilities\") pod \"redhat-operators-gnf6l\" (UID: \"20b4c4bc-86b2-4b8a-abe3-8a33d7ce24cb\") " pod="openshift-marketplace/redhat-operators-gnf6l" Dec 11 09:11:42 crc kubenswrapper[4881]: I1211 09:11:42.094691 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l8bt6\" (UniqueName: \"kubernetes.io/projected/20b4c4bc-86b2-4b8a-abe3-8a33d7ce24cb-kube-api-access-l8bt6\") pod \"redhat-operators-gnf6l\" (UID: \"20b4c4bc-86b2-4b8a-abe3-8a33d7ce24cb\") " pod="openshift-marketplace/redhat-operators-gnf6l" Dec 11 09:11:42 crc kubenswrapper[4881]: I1211 09:11:42.094774 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/20b4c4bc-86b2-4b8a-abe3-8a33d7ce24cb-catalog-content\") pod \"redhat-operators-gnf6l\" (UID: \"20b4c4bc-86b2-4b8a-abe3-8a33d7ce24cb\") " pod="openshift-marketplace/redhat-operators-gnf6l" Dec 11 09:11:42 crc kubenswrapper[4881]: I1211 09:11:42.095283 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/20b4c4bc-86b2-4b8a-abe3-8a33d7ce24cb-utilities\") pod \"redhat-operators-gnf6l\" (UID: \"20b4c4bc-86b2-4b8a-abe3-8a33d7ce24cb\") " pod="openshift-marketplace/redhat-operators-gnf6l" Dec 11 09:11:42 crc kubenswrapper[4881]: I1211 09:11:42.095654 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/20b4c4bc-86b2-4b8a-abe3-8a33d7ce24cb-catalog-content\") pod \"redhat-operators-gnf6l\" (UID: \"20b4c4bc-86b2-4b8a-abe3-8a33d7ce24cb\") " pod="openshift-marketplace/redhat-operators-gnf6l" Dec 11 09:11:42 crc kubenswrapper[4881]: I1211 09:11:42.116798 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l8bt6\" (UniqueName: \"kubernetes.io/projected/20b4c4bc-86b2-4b8a-abe3-8a33d7ce24cb-kube-api-access-l8bt6\") pod \"redhat-operators-gnf6l\" (UID: \"20b4c4bc-86b2-4b8a-abe3-8a33d7ce24cb\") " pod="openshift-marketplace/redhat-operators-gnf6l" Dec 11 09:11:42 crc kubenswrapper[4881]: I1211 09:11:42.209467 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-gnf6l" Dec 11 09:11:42 crc kubenswrapper[4881]: I1211 09:11:42.686460 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-gnf6l"] Dec 11 09:11:42 crc kubenswrapper[4881]: I1211 09:11:42.785765 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-gnf6l" event={"ID":"20b4c4bc-86b2-4b8a-abe3-8a33d7ce24cb","Type":"ContainerStarted","Data":"53987a53e2292655f617d3353d1853496b5b7fe01c096cf5db4a5fa2d480dcd5"} Dec 11 09:11:43 crc kubenswrapper[4881]: I1211 09:11:43.799106 4881 generic.go:334] "Generic (PLEG): container finished" podID="20b4c4bc-86b2-4b8a-abe3-8a33d7ce24cb" containerID="6fe49310b1b96c4f779e31b1813b95cb4aa4ddcdb2f32102b6764d2a4b996efd" exitCode=0 Dec 11 09:11:43 crc kubenswrapper[4881]: I1211 09:11:43.799196 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-gnf6l" event={"ID":"20b4c4bc-86b2-4b8a-abe3-8a33d7ce24cb","Type":"ContainerDied","Data":"6fe49310b1b96c4f779e31b1813b95cb4aa4ddcdb2f32102b6764d2a4b996efd"} Dec 11 09:11:44 crc kubenswrapper[4881]: I1211 09:11:44.812845 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-gnf6l" event={"ID":"20b4c4bc-86b2-4b8a-abe3-8a33d7ce24cb","Type":"ContainerStarted","Data":"d2043256e143fda82b4c728098809a9eee25cd9688bb5575bb42626bc61b16ab"} Dec 11 09:11:50 crc kubenswrapper[4881]: I1211 09:11:50.084591 4881 generic.go:334] "Generic (PLEG): container finished" podID="20b4c4bc-86b2-4b8a-abe3-8a33d7ce24cb" containerID="d2043256e143fda82b4c728098809a9eee25cd9688bb5575bb42626bc61b16ab" exitCode=0 Dec 11 09:11:50 crc kubenswrapper[4881]: I1211 09:11:50.084790 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-gnf6l" event={"ID":"20b4c4bc-86b2-4b8a-abe3-8a33d7ce24cb","Type":"ContainerDied","Data":"d2043256e143fda82b4c728098809a9eee25cd9688bb5575bb42626bc61b16ab"} Dec 11 09:11:51 crc kubenswrapper[4881]: I1211 09:11:51.107614 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-gnf6l" event={"ID":"20b4c4bc-86b2-4b8a-abe3-8a33d7ce24cb","Type":"ContainerStarted","Data":"f953d81fe25ec292faafddb2d295baf2bc3569cdc6a7ae3f6bbf9059f990fda0"} Dec 11 09:11:51 crc kubenswrapper[4881]: I1211 09:11:51.150157 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-gnf6l" podStartSLOduration=3.416206562 podStartE2EDuration="10.150132616s" podCreationTimestamp="2025-12-11 09:11:41 +0000 UTC" firstStartedPulling="2025-12-11 09:11:43.801596256 +0000 UTC m=+3352.178964953" lastFinishedPulling="2025-12-11 09:11:50.53552231 +0000 UTC m=+3358.912891007" observedRunningTime="2025-12-11 09:11:51.141984789 +0000 UTC m=+3359.519353496" watchObservedRunningTime="2025-12-11 09:11:51.150132616 +0000 UTC m=+3359.527501323" Dec 11 09:11:52 crc kubenswrapper[4881]: I1211 09:11:52.210066 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-gnf6l" Dec 11 09:11:52 crc kubenswrapper[4881]: I1211 09:11:52.210130 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-gnf6l" Dec 11 09:11:53 crc kubenswrapper[4881]: I1211 09:11:53.255526 4881 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-gnf6l" podUID="20b4c4bc-86b2-4b8a-abe3-8a33d7ce24cb" containerName="registry-server" probeResult="failure" output=< Dec 11 09:11:53 crc kubenswrapper[4881]: timeout: failed to connect service ":50051" within 1s Dec 11 09:11:53 crc kubenswrapper[4881]: > Dec 11 09:11:54 crc kubenswrapper[4881]: I1211 09:11:54.005477 4881 scope.go:117] "RemoveContainer" containerID="003d013ffd5b740337ff0333759338b1b3fbe344072684ee96e763f5cd66a8c1" Dec 11 09:11:54 crc kubenswrapper[4881]: E1211 09:11:54.005973 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 09:12:02 crc kubenswrapper[4881]: I1211 09:12:02.258846 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-gnf6l" Dec 11 09:12:02 crc kubenswrapper[4881]: I1211 09:12:02.309729 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-gnf6l" Dec 11 09:12:02 crc kubenswrapper[4881]: I1211 09:12:02.498843 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-gnf6l"] Dec 11 09:12:04 crc kubenswrapper[4881]: I1211 09:12:04.242984 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-gnf6l" podUID="20b4c4bc-86b2-4b8a-abe3-8a33d7ce24cb" containerName="registry-server" containerID="cri-o://f953d81fe25ec292faafddb2d295baf2bc3569cdc6a7ae3f6bbf9059f990fda0" gracePeriod=2 Dec 11 09:12:04 crc kubenswrapper[4881]: I1211 09:12:04.786401 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-gnf6l" Dec 11 09:12:04 crc kubenswrapper[4881]: I1211 09:12:04.905464 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-l8bt6\" (UniqueName: \"kubernetes.io/projected/20b4c4bc-86b2-4b8a-abe3-8a33d7ce24cb-kube-api-access-l8bt6\") pod \"20b4c4bc-86b2-4b8a-abe3-8a33d7ce24cb\" (UID: \"20b4c4bc-86b2-4b8a-abe3-8a33d7ce24cb\") " Dec 11 09:12:04 crc kubenswrapper[4881]: I1211 09:12:04.905647 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/20b4c4bc-86b2-4b8a-abe3-8a33d7ce24cb-catalog-content\") pod \"20b4c4bc-86b2-4b8a-abe3-8a33d7ce24cb\" (UID: \"20b4c4bc-86b2-4b8a-abe3-8a33d7ce24cb\") " Dec 11 09:12:04 crc kubenswrapper[4881]: I1211 09:12:04.905715 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/20b4c4bc-86b2-4b8a-abe3-8a33d7ce24cb-utilities\") pod \"20b4c4bc-86b2-4b8a-abe3-8a33d7ce24cb\" (UID: \"20b4c4bc-86b2-4b8a-abe3-8a33d7ce24cb\") " Dec 11 09:12:04 crc kubenswrapper[4881]: I1211 09:12:04.906632 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/20b4c4bc-86b2-4b8a-abe3-8a33d7ce24cb-utilities" (OuterVolumeSpecName: "utilities") pod "20b4c4bc-86b2-4b8a-abe3-8a33d7ce24cb" (UID: "20b4c4bc-86b2-4b8a-abe3-8a33d7ce24cb"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 09:12:04 crc kubenswrapper[4881]: I1211 09:12:04.913232 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/20b4c4bc-86b2-4b8a-abe3-8a33d7ce24cb-kube-api-access-l8bt6" (OuterVolumeSpecName: "kube-api-access-l8bt6") pod "20b4c4bc-86b2-4b8a-abe3-8a33d7ce24cb" (UID: "20b4c4bc-86b2-4b8a-abe3-8a33d7ce24cb"). InnerVolumeSpecName "kube-api-access-l8bt6". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 09:12:05 crc kubenswrapper[4881]: I1211 09:12:05.005948 4881 scope.go:117] "RemoveContainer" containerID="003d013ffd5b740337ff0333759338b1b3fbe344072684ee96e763f5cd66a8c1" Dec 11 09:12:05 crc kubenswrapper[4881]: I1211 09:12:05.008290 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-l8bt6\" (UniqueName: \"kubernetes.io/projected/20b4c4bc-86b2-4b8a-abe3-8a33d7ce24cb-kube-api-access-l8bt6\") on node \"crc\" DevicePath \"\"" Dec 11 09:12:05 crc kubenswrapper[4881]: I1211 09:12:05.008318 4881 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/20b4c4bc-86b2-4b8a-abe3-8a33d7ce24cb-utilities\") on node \"crc\" DevicePath \"\"" Dec 11 09:12:05 crc kubenswrapper[4881]: I1211 09:12:05.012360 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/20b4c4bc-86b2-4b8a-abe3-8a33d7ce24cb-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "20b4c4bc-86b2-4b8a-abe3-8a33d7ce24cb" (UID: "20b4c4bc-86b2-4b8a-abe3-8a33d7ce24cb"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 09:12:05 crc kubenswrapper[4881]: I1211 09:12:05.110667 4881 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/20b4c4bc-86b2-4b8a-abe3-8a33d7ce24cb-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 11 09:12:05 crc kubenswrapper[4881]: I1211 09:12:05.260315 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" event={"ID":"56d69133-af36-4cbd-af7d-3a58cc4dd8ca","Type":"ContainerStarted","Data":"5961927930adfb6a635fae221aae4cca63733e70ddb8264459aeb0e8385fdd51"} Dec 11 09:12:05 crc kubenswrapper[4881]: I1211 09:12:05.264842 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-gnf6l" Dec 11 09:12:05 crc kubenswrapper[4881]: I1211 09:12:05.264761 4881 generic.go:334] "Generic (PLEG): container finished" podID="20b4c4bc-86b2-4b8a-abe3-8a33d7ce24cb" containerID="f953d81fe25ec292faafddb2d295baf2bc3569cdc6a7ae3f6bbf9059f990fda0" exitCode=0 Dec 11 09:12:05 crc kubenswrapper[4881]: I1211 09:12:05.264847 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-gnf6l" event={"ID":"20b4c4bc-86b2-4b8a-abe3-8a33d7ce24cb","Type":"ContainerDied","Data":"f953d81fe25ec292faafddb2d295baf2bc3569cdc6a7ae3f6bbf9059f990fda0"} Dec 11 09:12:05 crc kubenswrapper[4881]: I1211 09:12:05.266577 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-gnf6l" event={"ID":"20b4c4bc-86b2-4b8a-abe3-8a33d7ce24cb","Type":"ContainerDied","Data":"53987a53e2292655f617d3353d1853496b5b7fe01c096cf5db4a5fa2d480dcd5"} Dec 11 09:12:05 crc kubenswrapper[4881]: I1211 09:12:05.266700 4881 scope.go:117] "RemoveContainer" containerID="f953d81fe25ec292faafddb2d295baf2bc3569cdc6a7ae3f6bbf9059f990fda0" Dec 11 09:12:05 crc kubenswrapper[4881]: I1211 09:12:05.296146 4881 scope.go:117] "RemoveContainer" containerID="d2043256e143fda82b4c728098809a9eee25cd9688bb5575bb42626bc61b16ab" Dec 11 09:12:05 crc kubenswrapper[4881]: I1211 09:12:05.335494 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-gnf6l"] Dec 11 09:12:05 crc kubenswrapper[4881]: I1211 09:12:05.357740 4881 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-gnf6l"] Dec 11 09:12:05 crc kubenswrapper[4881]: I1211 09:12:05.361119 4881 scope.go:117] "RemoveContainer" containerID="6fe49310b1b96c4f779e31b1813b95cb4aa4ddcdb2f32102b6764d2a4b996efd" Dec 11 09:12:05 crc kubenswrapper[4881]: I1211 09:12:05.392650 4881 scope.go:117] "RemoveContainer" containerID="f953d81fe25ec292faafddb2d295baf2bc3569cdc6a7ae3f6bbf9059f990fda0" Dec 11 09:12:05 crc kubenswrapper[4881]: E1211 09:12:05.393421 4881 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f953d81fe25ec292faafddb2d295baf2bc3569cdc6a7ae3f6bbf9059f990fda0\": container with ID starting with f953d81fe25ec292faafddb2d295baf2bc3569cdc6a7ae3f6bbf9059f990fda0 not found: ID does not exist" containerID="f953d81fe25ec292faafddb2d295baf2bc3569cdc6a7ae3f6bbf9059f990fda0" Dec 11 09:12:05 crc kubenswrapper[4881]: I1211 09:12:05.393475 4881 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f953d81fe25ec292faafddb2d295baf2bc3569cdc6a7ae3f6bbf9059f990fda0"} err="failed to get container status \"f953d81fe25ec292faafddb2d295baf2bc3569cdc6a7ae3f6bbf9059f990fda0\": rpc error: code = NotFound desc = could not find container \"f953d81fe25ec292faafddb2d295baf2bc3569cdc6a7ae3f6bbf9059f990fda0\": container with ID starting with f953d81fe25ec292faafddb2d295baf2bc3569cdc6a7ae3f6bbf9059f990fda0 not found: ID does not exist" Dec 11 09:12:05 crc kubenswrapper[4881]: I1211 09:12:05.393563 4881 scope.go:117] "RemoveContainer" containerID="d2043256e143fda82b4c728098809a9eee25cd9688bb5575bb42626bc61b16ab" Dec 11 09:12:05 crc kubenswrapper[4881]: E1211 09:12:05.393918 4881 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d2043256e143fda82b4c728098809a9eee25cd9688bb5575bb42626bc61b16ab\": container with ID starting with d2043256e143fda82b4c728098809a9eee25cd9688bb5575bb42626bc61b16ab not found: ID does not exist" containerID="d2043256e143fda82b4c728098809a9eee25cd9688bb5575bb42626bc61b16ab" Dec 11 09:12:05 crc kubenswrapper[4881]: I1211 09:12:05.393953 4881 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d2043256e143fda82b4c728098809a9eee25cd9688bb5575bb42626bc61b16ab"} err="failed to get container status \"d2043256e143fda82b4c728098809a9eee25cd9688bb5575bb42626bc61b16ab\": rpc error: code = NotFound desc = could not find container \"d2043256e143fda82b4c728098809a9eee25cd9688bb5575bb42626bc61b16ab\": container with ID starting with d2043256e143fda82b4c728098809a9eee25cd9688bb5575bb42626bc61b16ab not found: ID does not exist" Dec 11 09:12:05 crc kubenswrapper[4881]: I1211 09:12:05.393975 4881 scope.go:117] "RemoveContainer" containerID="6fe49310b1b96c4f779e31b1813b95cb4aa4ddcdb2f32102b6764d2a4b996efd" Dec 11 09:12:05 crc kubenswrapper[4881]: E1211 09:12:05.394762 4881 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6fe49310b1b96c4f779e31b1813b95cb4aa4ddcdb2f32102b6764d2a4b996efd\": container with ID starting with 6fe49310b1b96c4f779e31b1813b95cb4aa4ddcdb2f32102b6764d2a4b996efd not found: ID does not exist" containerID="6fe49310b1b96c4f779e31b1813b95cb4aa4ddcdb2f32102b6764d2a4b996efd" Dec 11 09:12:05 crc kubenswrapper[4881]: I1211 09:12:05.394801 4881 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6fe49310b1b96c4f779e31b1813b95cb4aa4ddcdb2f32102b6764d2a4b996efd"} err="failed to get container status \"6fe49310b1b96c4f779e31b1813b95cb4aa4ddcdb2f32102b6764d2a4b996efd\": rpc error: code = NotFound desc = could not find container \"6fe49310b1b96c4f779e31b1813b95cb4aa4ddcdb2f32102b6764d2a4b996efd\": container with ID starting with 6fe49310b1b96c4f779e31b1813b95cb4aa4ddcdb2f32102b6764d2a4b996efd not found: ID does not exist" Dec 11 09:12:07 crc kubenswrapper[4881]: I1211 09:12:07.019274 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="20b4c4bc-86b2-4b8a-abe3-8a33d7ce24cb" path="/var/lib/kubelet/pods/20b4c4bc-86b2-4b8a-abe3-8a33d7ce24cb/volumes" Dec 11 09:13:04 crc kubenswrapper[4881]: I1211 09:13:04.489966 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-hwftf"] Dec 11 09:13:04 crc kubenswrapper[4881]: E1211 09:13:04.491125 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="20b4c4bc-86b2-4b8a-abe3-8a33d7ce24cb" containerName="extract-utilities" Dec 11 09:13:04 crc kubenswrapper[4881]: I1211 09:13:04.491143 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="20b4c4bc-86b2-4b8a-abe3-8a33d7ce24cb" containerName="extract-utilities" Dec 11 09:13:04 crc kubenswrapper[4881]: E1211 09:13:04.491165 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="20b4c4bc-86b2-4b8a-abe3-8a33d7ce24cb" containerName="registry-server" Dec 11 09:13:04 crc kubenswrapper[4881]: I1211 09:13:04.491177 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="20b4c4bc-86b2-4b8a-abe3-8a33d7ce24cb" containerName="registry-server" Dec 11 09:13:04 crc kubenswrapper[4881]: E1211 09:13:04.491198 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="20b4c4bc-86b2-4b8a-abe3-8a33d7ce24cb" containerName="extract-content" Dec 11 09:13:04 crc kubenswrapper[4881]: I1211 09:13:04.491206 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="20b4c4bc-86b2-4b8a-abe3-8a33d7ce24cb" containerName="extract-content" Dec 11 09:13:04 crc kubenswrapper[4881]: I1211 09:13:04.491576 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="20b4c4bc-86b2-4b8a-abe3-8a33d7ce24cb" containerName="registry-server" Dec 11 09:13:04 crc kubenswrapper[4881]: I1211 09:13:04.493817 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-hwftf" Dec 11 09:13:04 crc kubenswrapper[4881]: I1211 09:13:04.501268 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-hwftf"] Dec 11 09:13:04 crc kubenswrapper[4881]: I1211 09:13:04.611081 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e313c473-47d0-4680-b24c-d21950163bc0-utilities\") pod \"redhat-marketplace-hwftf\" (UID: \"e313c473-47d0-4680-b24c-d21950163bc0\") " pod="openshift-marketplace/redhat-marketplace-hwftf" Dec 11 09:13:04 crc kubenswrapper[4881]: I1211 09:13:04.611188 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e313c473-47d0-4680-b24c-d21950163bc0-catalog-content\") pod \"redhat-marketplace-hwftf\" (UID: \"e313c473-47d0-4680-b24c-d21950163bc0\") " pod="openshift-marketplace/redhat-marketplace-hwftf" Dec 11 09:13:04 crc kubenswrapper[4881]: I1211 09:13:04.611490 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xxd64\" (UniqueName: \"kubernetes.io/projected/e313c473-47d0-4680-b24c-d21950163bc0-kube-api-access-xxd64\") pod \"redhat-marketplace-hwftf\" (UID: \"e313c473-47d0-4680-b24c-d21950163bc0\") " pod="openshift-marketplace/redhat-marketplace-hwftf" Dec 11 09:13:04 crc kubenswrapper[4881]: I1211 09:13:04.714470 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e313c473-47d0-4680-b24c-d21950163bc0-utilities\") pod \"redhat-marketplace-hwftf\" (UID: \"e313c473-47d0-4680-b24c-d21950163bc0\") " pod="openshift-marketplace/redhat-marketplace-hwftf" Dec 11 09:13:04 crc kubenswrapper[4881]: I1211 09:13:04.714661 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e313c473-47d0-4680-b24c-d21950163bc0-catalog-content\") pod \"redhat-marketplace-hwftf\" (UID: \"e313c473-47d0-4680-b24c-d21950163bc0\") " pod="openshift-marketplace/redhat-marketplace-hwftf" Dec 11 09:13:04 crc kubenswrapper[4881]: I1211 09:13:04.714900 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e313c473-47d0-4680-b24c-d21950163bc0-utilities\") pod \"redhat-marketplace-hwftf\" (UID: \"e313c473-47d0-4680-b24c-d21950163bc0\") " pod="openshift-marketplace/redhat-marketplace-hwftf" Dec 11 09:13:04 crc kubenswrapper[4881]: I1211 09:13:04.715097 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xxd64\" (UniqueName: \"kubernetes.io/projected/e313c473-47d0-4680-b24c-d21950163bc0-kube-api-access-xxd64\") pod \"redhat-marketplace-hwftf\" (UID: \"e313c473-47d0-4680-b24c-d21950163bc0\") " pod="openshift-marketplace/redhat-marketplace-hwftf" Dec 11 09:13:04 crc kubenswrapper[4881]: I1211 09:13:04.715202 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e313c473-47d0-4680-b24c-d21950163bc0-catalog-content\") pod \"redhat-marketplace-hwftf\" (UID: \"e313c473-47d0-4680-b24c-d21950163bc0\") " pod="openshift-marketplace/redhat-marketplace-hwftf" Dec 11 09:13:04 crc kubenswrapper[4881]: I1211 09:13:04.740001 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xxd64\" (UniqueName: \"kubernetes.io/projected/e313c473-47d0-4680-b24c-d21950163bc0-kube-api-access-xxd64\") pod \"redhat-marketplace-hwftf\" (UID: \"e313c473-47d0-4680-b24c-d21950163bc0\") " pod="openshift-marketplace/redhat-marketplace-hwftf" Dec 11 09:13:04 crc kubenswrapper[4881]: I1211 09:13:04.832718 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-hwftf" Dec 11 09:13:05 crc kubenswrapper[4881]: I1211 09:13:05.381584 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-hwftf"] Dec 11 09:13:05 crc kubenswrapper[4881]: I1211 09:13:05.980962 4881 generic.go:334] "Generic (PLEG): container finished" podID="e313c473-47d0-4680-b24c-d21950163bc0" containerID="c024982f6e42cfb720ab7a032eeae80db727dbb06dd43a19a7adaa9c8e91a740" exitCode=0 Dec 11 09:13:05 crc kubenswrapper[4881]: I1211 09:13:05.981231 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-hwftf" event={"ID":"e313c473-47d0-4680-b24c-d21950163bc0","Type":"ContainerDied","Data":"c024982f6e42cfb720ab7a032eeae80db727dbb06dd43a19a7adaa9c8e91a740"} Dec 11 09:13:05 crc kubenswrapper[4881]: I1211 09:13:05.981269 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-hwftf" event={"ID":"e313c473-47d0-4680-b24c-d21950163bc0","Type":"ContainerStarted","Data":"152be775a4d2472cbff3ba8f24c7bc5c38dbbd548027b9d254292f35f0ff2536"} Dec 11 09:13:09 crc kubenswrapper[4881]: I1211 09:13:09.024744 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-hwftf" event={"ID":"e313c473-47d0-4680-b24c-d21950163bc0","Type":"ContainerStarted","Data":"c90ed84507c2bfd5427d1250fd1cb71b532822a1782eb2a60a49362c2badd99a"} Dec 11 09:13:10 crc kubenswrapper[4881]: I1211 09:13:10.029739 4881 generic.go:334] "Generic (PLEG): container finished" podID="e313c473-47d0-4680-b24c-d21950163bc0" containerID="c90ed84507c2bfd5427d1250fd1cb71b532822a1782eb2a60a49362c2badd99a" exitCode=0 Dec 11 09:13:10 crc kubenswrapper[4881]: I1211 09:13:10.029843 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-hwftf" event={"ID":"e313c473-47d0-4680-b24c-d21950163bc0","Type":"ContainerDied","Data":"c90ed84507c2bfd5427d1250fd1cb71b532822a1782eb2a60a49362c2badd99a"} Dec 11 09:13:12 crc kubenswrapper[4881]: I1211 09:13:12.052032 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-hwftf" event={"ID":"e313c473-47d0-4680-b24c-d21950163bc0","Type":"ContainerStarted","Data":"0a2ef0b6f0627717d3184344105ca49ae22c7e9963fc7585831e81828ea48466"} Dec 11 09:13:12 crc kubenswrapper[4881]: I1211 09:13:12.084785 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-hwftf" podStartSLOduration=2.437391891 podStartE2EDuration="8.08476084s" podCreationTimestamp="2025-12-11 09:13:04 +0000 UTC" firstStartedPulling="2025-12-11 09:13:05.98302592 +0000 UTC m=+3434.360394617" lastFinishedPulling="2025-12-11 09:13:11.630394869 +0000 UTC m=+3440.007763566" observedRunningTime="2025-12-11 09:13:12.070605226 +0000 UTC m=+3440.447973933" watchObservedRunningTime="2025-12-11 09:13:12.08476084 +0000 UTC m=+3440.462129537" Dec 11 09:13:14 crc kubenswrapper[4881]: I1211 09:13:14.833709 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-hwftf" Dec 11 09:13:14 crc kubenswrapper[4881]: I1211 09:13:14.834370 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-hwftf" Dec 11 09:13:14 crc kubenswrapper[4881]: I1211 09:13:14.884952 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-hwftf" Dec 11 09:13:24 crc kubenswrapper[4881]: I1211 09:13:24.888578 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-hwftf" Dec 11 09:13:24 crc kubenswrapper[4881]: I1211 09:13:24.939041 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-hwftf"] Dec 11 09:13:25 crc kubenswrapper[4881]: I1211 09:13:25.199105 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-hwftf" podUID="e313c473-47d0-4680-b24c-d21950163bc0" containerName="registry-server" containerID="cri-o://0a2ef0b6f0627717d3184344105ca49ae22c7e9963fc7585831e81828ea48466" gracePeriod=2 Dec 11 09:13:25 crc kubenswrapper[4881]: I1211 09:13:25.723506 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-hwftf" Dec 11 09:13:25 crc kubenswrapper[4881]: I1211 09:13:25.853129 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e313c473-47d0-4680-b24c-d21950163bc0-catalog-content\") pod \"e313c473-47d0-4680-b24c-d21950163bc0\" (UID: \"e313c473-47d0-4680-b24c-d21950163bc0\") " Dec 11 09:13:25 crc kubenswrapper[4881]: I1211 09:13:25.853714 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xxd64\" (UniqueName: \"kubernetes.io/projected/e313c473-47d0-4680-b24c-d21950163bc0-kube-api-access-xxd64\") pod \"e313c473-47d0-4680-b24c-d21950163bc0\" (UID: \"e313c473-47d0-4680-b24c-d21950163bc0\") " Dec 11 09:13:25 crc kubenswrapper[4881]: I1211 09:13:25.853892 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e313c473-47d0-4680-b24c-d21950163bc0-utilities\") pod \"e313c473-47d0-4680-b24c-d21950163bc0\" (UID: \"e313c473-47d0-4680-b24c-d21950163bc0\") " Dec 11 09:13:25 crc kubenswrapper[4881]: I1211 09:13:25.854613 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e313c473-47d0-4680-b24c-d21950163bc0-utilities" (OuterVolumeSpecName: "utilities") pod "e313c473-47d0-4680-b24c-d21950163bc0" (UID: "e313c473-47d0-4680-b24c-d21950163bc0"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 09:13:25 crc kubenswrapper[4881]: I1211 09:13:25.859508 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e313c473-47d0-4680-b24c-d21950163bc0-kube-api-access-xxd64" (OuterVolumeSpecName: "kube-api-access-xxd64") pod "e313c473-47d0-4680-b24c-d21950163bc0" (UID: "e313c473-47d0-4680-b24c-d21950163bc0"). InnerVolumeSpecName "kube-api-access-xxd64". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 09:13:25 crc kubenswrapper[4881]: I1211 09:13:25.877923 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e313c473-47d0-4680-b24c-d21950163bc0-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "e313c473-47d0-4680-b24c-d21950163bc0" (UID: "e313c473-47d0-4680-b24c-d21950163bc0"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 09:13:25 crc kubenswrapper[4881]: I1211 09:13:25.957007 4881 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e313c473-47d0-4680-b24c-d21950163bc0-utilities\") on node \"crc\" DevicePath \"\"" Dec 11 09:13:25 crc kubenswrapper[4881]: I1211 09:13:25.958213 4881 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e313c473-47d0-4680-b24c-d21950163bc0-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 11 09:13:25 crc kubenswrapper[4881]: I1211 09:13:25.958292 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xxd64\" (UniqueName: \"kubernetes.io/projected/e313c473-47d0-4680-b24c-d21950163bc0-kube-api-access-xxd64\") on node \"crc\" DevicePath \"\"" Dec 11 09:13:26 crc kubenswrapper[4881]: I1211 09:13:26.212010 4881 generic.go:334] "Generic (PLEG): container finished" podID="e313c473-47d0-4680-b24c-d21950163bc0" containerID="0a2ef0b6f0627717d3184344105ca49ae22c7e9963fc7585831e81828ea48466" exitCode=0 Dec 11 09:13:26 crc kubenswrapper[4881]: I1211 09:13:26.212054 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-hwftf" Dec 11 09:13:26 crc kubenswrapper[4881]: I1211 09:13:26.212061 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-hwftf" event={"ID":"e313c473-47d0-4680-b24c-d21950163bc0","Type":"ContainerDied","Data":"0a2ef0b6f0627717d3184344105ca49ae22c7e9963fc7585831e81828ea48466"} Dec 11 09:13:26 crc kubenswrapper[4881]: I1211 09:13:26.212093 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-hwftf" event={"ID":"e313c473-47d0-4680-b24c-d21950163bc0","Type":"ContainerDied","Data":"152be775a4d2472cbff3ba8f24c7bc5c38dbbd548027b9d254292f35f0ff2536"} Dec 11 09:13:26 crc kubenswrapper[4881]: I1211 09:13:26.212123 4881 scope.go:117] "RemoveContainer" containerID="0a2ef0b6f0627717d3184344105ca49ae22c7e9963fc7585831e81828ea48466" Dec 11 09:13:26 crc kubenswrapper[4881]: I1211 09:13:26.241981 4881 scope.go:117] "RemoveContainer" containerID="c90ed84507c2bfd5427d1250fd1cb71b532822a1782eb2a60a49362c2badd99a" Dec 11 09:13:26 crc kubenswrapper[4881]: I1211 09:13:26.272551 4881 scope.go:117] "RemoveContainer" containerID="c024982f6e42cfb720ab7a032eeae80db727dbb06dd43a19a7adaa9c8e91a740" Dec 11 09:13:26 crc kubenswrapper[4881]: I1211 09:13:26.304519 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-hwftf"] Dec 11 09:13:26 crc kubenswrapper[4881]: I1211 09:13:26.329615 4881 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-hwftf"] Dec 11 09:13:26 crc kubenswrapper[4881]: I1211 09:13:26.397519 4881 scope.go:117] "RemoveContainer" containerID="0a2ef0b6f0627717d3184344105ca49ae22c7e9963fc7585831e81828ea48466" Dec 11 09:13:26 crc kubenswrapper[4881]: E1211 09:13:26.418604 4881 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0a2ef0b6f0627717d3184344105ca49ae22c7e9963fc7585831e81828ea48466\": container with ID starting with 0a2ef0b6f0627717d3184344105ca49ae22c7e9963fc7585831e81828ea48466 not found: ID does not exist" containerID="0a2ef0b6f0627717d3184344105ca49ae22c7e9963fc7585831e81828ea48466" Dec 11 09:13:26 crc kubenswrapper[4881]: I1211 09:13:26.418652 4881 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0a2ef0b6f0627717d3184344105ca49ae22c7e9963fc7585831e81828ea48466"} err="failed to get container status \"0a2ef0b6f0627717d3184344105ca49ae22c7e9963fc7585831e81828ea48466\": rpc error: code = NotFound desc = could not find container \"0a2ef0b6f0627717d3184344105ca49ae22c7e9963fc7585831e81828ea48466\": container with ID starting with 0a2ef0b6f0627717d3184344105ca49ae22c7e9963fc7585831e81828ea48466 not found: ID does not exist" Dec 11 09:13:26 crc kubenswrapper[4881]: I1211 09:13:26.418679 4881 scope.go:117] "RemoveContainer" containerID="c90ed84507c2bfd5427d1250fd1cb71b532822a1782eb2a60a49362c2badd99a" Dec 11 09:13:26 crc kubenswrapper[4881]: E1211 09:13:26.424460 4881 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c90ed84507c2bfd5427d1250fd1cb71b532822a1782eb2a60a49362c2badd99a\": container with ID starting with c90ed84507c2bfd5427d1250fd1cb71b532822a1782eb2a60a49362c2badd99a not found: ID does not exist" containerID="c90ed84507c2bfd5427d1250fd1cb71b532822a1782eb2a60a49362c2badd99a" Dec 11 09:13:26 crc kubenswrapper[4881]: I1211 09:13:26.424504 4881 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c90ed84507c2bfd5427d1250fd1cb71b532822a1782eb2a60a49362c2badd99a"} err="failed to get container status \"c90ed84507c2bfd5427d1250fd1cb71b532822a1782eb2a60a49362c2badd99a\": rpc error: code = NotFound desc = could not find container \"c90ed84507c2bfd5427d1250fd1cb71b532822a1782eb2a60a49362c2badd99a\": container with ID starting with c90ed84507c2bfd5427d1250fd1cb71b532822a1782eb2a60a49362c2badd99a not found: ID does not exist" Dec 11 09:13:26 crc kubenswrapper[4881]: I1211 09:13:26.424528 4881 scope.go:117] "RemoveContainer" containerID="c024982f6e42cfb720ab7a032eeae80db727dbb06dd43a19a7adaa9c8e91a740" Dec 11 09:13:26 crc kubenswrapper[4881]: E1211 09:13:26.428439 4881 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c024982f6e42cfb720ab7a032eeae80db727dbb06dd43a19a7adaa9c8e91a740\": container with ID starting with c024982f6e42cfb720ab7a032eeae80db727dbb06dd43a19a7adaa9c8e91a740 not found: ID does not exist" containerID="c024982f6e42cfb720ab7a032eeae80db727dbb06dd43a19a7adaa9c8e91a740" Dec 11 09:13:26 crc kubenswrapper[4881]: I1211 09:13:26.428476 4881 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c024982f6e42cfb720ab7a032eeae80db727dbb06dd43a19a7adaa9c8e91a740"} err="failed to get container status \"c024982f6e42cfb720ab7a032eeae80db727dbb06dd43a19a7adaa9c8e91a740\": rpc error: code = NotFound desc = could not find container \"c024982f6e42cfb720ab7a032eeae80db727dbb06dd43a19a7adaa9c8e91a740\": container with ID starting with c024982f6e42cfb720ab7a032eeae80db727dbb06dd43a19a7adaa9c8e91a740 not found: ID does not exist" Dec 11 09:13:27 crc kubenswrapper[4881]: I1211 09:13:27.019520 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e313c473-47d0-4680-b24c-d21950163bc0" path="/var/lib/kubelet/pods/e313c473-47d0-4680-b24c-d21950163bc0/volumes" Dec 11 09:14:29 crc kubenswrapper[4881]: I1211 09:14:29.397316 4881 patch_prober.go:28] interesting pod/machine-config-daemon-z9nnh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 11 09:14:29 crc kubenswrapper[4881]: I1211 09:14:29.397955 4881 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 11 09:14:45 crc kubenswrapper[4881]: I1211 09:14:45.366138 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-577q9"] Dec 11 09:14:45 crc kubenswrapper[4881]: E1211 09:14:45.368388 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e313c473-47d0-4680-b24c-d21950163bc0" containerName="extract-utilities" Dec 11 09:14:45 crc kubenswrapper[4881]: I1211 09:14:45.368501 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="e313c473-47d0-4680-b24c-d21950163bc0" containerName="extract-utilities" Dec 11 09:14:45 crc kubenswrapper[4881]: E1211 09:14:45.368595 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e313c473-47d0-4680-b24c-d21950163bc0" containerName="extract-content" Dec 11 09:14:45 crc kubenswrapper[4881]: I1211 09:14:45.368678 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="e313c473-47d0-4680-b24c-d21950163bc0" containerName="extract-content" Dec 11 09:14:45 crc kubenswrapper[4881]: E1211 09:14:45.368801 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e313c473-47d0-4680-b24c-d21950163bc0" containerName="registry-server" Dec 11 09:14:45 crc kubenswrapper[4881]: I1211 09:14:45.369598 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="e313c473-47d0-4680-b24c-d21950163bc0" containerName="registry-server" Dec 11 09:14:45 crc kubenswrapper[4881]: I1211 09:14:45.370538 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="e313c473-47d0-4680-b24c-d21950163bc0" containerName="registry-server" Dec 11 09:14:45 crc kubenswrapper[4881]: I1211 09:14:45.373123 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-577q9" Dec 11 09:14:45 crc kubenswrapper[4881]: I1211 09:14:45.381097 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-577q9"] Dec 11 09:14:45 crc kubenswrapper[4881]: I1211 09:14:45.399850 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2627q\" (UniqueName: \"kubernetes.io/projected/151afd27-7265-4465-aa9c-82edbebc9c7d-kube-api-access-2627q\") pod \"certified-operators-577q9\" (UID: \"151afd27-7265-4465-aa9c-82edbebc9c7d\") " pod="openshift-marketplace/certified-operators-577q9" Dec 11 09:14:45 crc kubenswrapper[4881]: I1211 09:14:45.399996 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/151afd27-7265-4465-aa9c-82edbebc9c7d-utilities\") pod \"certified-operators-577q9\" (UID: \"151afd27-7265-4465-aa9c-82edbebc9c7d\") " pod="openshift-marketplace/certified-operators-577q9" Dec 11 09:14:45 crc kubenswrapper[4881]: I1211 09:14:45.400605 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/151afd27-7265-4465-aa9c-82edbebc9c7d-catalog-content\") pod \"certified-operators-577q9\" (UID: \"151afd27-7265-4465-aa9c-82edbebc9c7d\") " pod="openshift-marketplace/certified-operators-577q9" Dec 11 09:14:45 crc kubenswrapper[4881]: I1211 09:14:45.503679 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/151afd27-7265-4465-aa9c-82edbebc9c7d-catalog-content\") pod \"certified-operators-577q9\" (UID: \"151afd27-7265-4465-aa9c-82edbebc9c7d\") " pod="openshift-marketplace/certified-operators-577q9" Dec 11 09:14:45 crc kubenswrapper[4881]: I1211 09:14:45.504075 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2627q\" (UniqueName: \"kubernetes.io/projected/151afd27-7265-4465-aa9c-82edbebc9c7d-kube-api-access-2627q\") pod \"certified-operators-577q9\" (UID: \"151afd27-7265-4465-aa9c-82edbebc9c7d\") " pod="openshift-marketplace/certified-operators-577q9" Dec 11 09:14:45 crc kubenswrapper[4881]: I1211 09:14:45.504112 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/151afd27-7265-4465-aa9c-82edbebc9c7d-utilities\") pod \"certified-operators-577q9\" (UID: \"151afd27-7265-4465-aa9c-82edbebc9c7d\") " pod="openshift-marketplace/certified-operators-577q9" Dec 11 09:14:45 crc kubenswrapper[4881]: I1211 09:14:45.504661 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/151afd27-7265-4465-aa9c-82edbebc9c7d-utilities\") pod \"certified-operators-577q9\" (UID: \"151afd27-7265-4465-aa9c-82edbebc9c7d\") " pod="openshift-marketplace/certified-operators-577q9" Dec 11 09:14:45 crc kubenswrapper[4881]: I1211 09:14:45.504662 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/151afd27-7265-4465-aa9c-82edbebc9c7d-catalog-content\") pod \"certified-operators-577q9\" (UID: \"151afd27-7265-4465-aa9c-82edbebc9c7d\") " pod="openshift-marketplace/certified-operators-577q9" Dec 11 09:14:45 crc kubenswrapper[4881]: I1211 09:14:45.524882 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2627q\" (UniqueName: \"kubernetes.io/projected/151afd27-7265-4465-aa9c-82edbebc9c7d-kube-api-access-2627q\") pod \"certified-operators-577q9\" (UID: \"151afd27-7265-4465-aa9c-82edbebc9c7d\") " pod="openshift-marketplace/certified-operators-577q9" Dec 11 09:14:45 crc kubenswrapper[4881]: I1211 09:14:45.698991 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-577q9" Dec 11 09:14:46 crc kubenswrapper[4881]: I1211 09:14:46.194388 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-577q9"] Dec 11 09:14:47 crc kubenswrapper[4881]: I1211 09:14:47.130619 4881 generic.go:334] "Generic (PLEG): container finished" podID="151afd27-7265-4465-aa9c-82edbebc9c7d" containerID="eb551fd928da084a03fcb7f0ed8c8b1a3d028010582a75969576ed0101a3114b" exitCode=0 Dec 11 09:14:47 crc kubenswrapper[4881]: I1211 09:14:47.130697 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-577q9" event={"ID":"151afd27-7265-4465-aa9c-82edbebc9c7d","Type":"ContainerDied","Data":"eb551fd928da084a03fcb7f0ed8c8b1a3d028010582a75969576ed0101a3114b"} Dec 11 09:14:47 crc kubenswrapper[4881]: I1211 09:14:47.130954 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-577q9" event={"ID":"151afd27-7265-4465-aa9c-82edbebc9c7d","Type":"ContainerStarted","Data":"4c4f5343a66e6f504164711d681bff94d0e14245ec440b0ea312e1688b92f75b"} Dec 11 09:14:47 crc kubenswrapper[4881]: I1211 09:14:47.133428 4881 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Dec 11 09:14:49 crc kubenswrapper[4881]: I1211 09:14:49.162741 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-577q9" event={"ID":"151afd27-7265-4465-aa9c-82edbebc9c7d","Type":"ContainerStarted","Data":"96615fa6e6f5ca0503e52c456ed19e60f8e81b2f31b9d0a9ef5e7675090cdc6f"} Dec 11 09:14:50 crc kubenswrapper[4881]: I1211 09:14:50.176767 4881 generic.go:334] "Generic (PLEG): container finished" podID="151afd27-7265-4465-aa9c-82edbebc9c7d" containerID="96615fa6e6f5ca0503e52c456ed19e60f8e81b2f31b9d0a9ef5e7675090cdc6f" exitCode=0 Dec 11 09:14:50 crc kubenswrapper[4881]: I1211 09:14:50.176866 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-577q9" event={"ID":"151afd27-7265-4465-aa9c-82edbebc9c7d","Type":"ContainerDied","Data":"96615fa6e6f5ca0503e52c456ed19e60f8e81b2f31b9d0a9ef5e7675090cdc6f"} Dec 11 09:14:53 crc kubenswrapper[4881]: I1211 09:14:53.209517 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-577q9" event={"ID":"151afd27-7265-4465-aa9c-82edbebc9c7d","Type":"ContainerStarted","Data":"9780aaba2676aa164b183056e36bfdcea6fe4f2855b66ab1e2ba0163a61a6038"} Dec 11 09:14:53 crc kubenswrapper[4881]: I1211 09:14:53.229385 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-577q9" podStartSLOduration=3.323059514 podStartE2EDuration="8.229365181s" podCreationTimestamp="2025-12-11 09:14:45 +0000 UTC" firstStartedPulling="2025-12-11 09:14:47.133161755 +0000 UTC m=+3535.510530442" lastFinishedPulling="2025-12-11 09:14:52.039467412 +0000 UTC m=+3540.416836109" observedRunningTime="2025-12-11 09:14:53.225349794 +0000 UTC m=+3541.602718491" watchObservedRunningTime="2025-12-11 09:14:53.229365181 +0000 UTC m=+3541.606733878" Dec 11 09:14:55 crc kubenswrapper[4881]: I1211 09:14:55.699650 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-577q9" Dec 11 09:14:55 crc kubenswrapper[4881]: I1211 09:14:55.700314 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-577q9" Dec 11 09:14:56 crc kubenswrapper[4881]: I1211 09:14:56.745938 4881 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/certified-operators-577q9" podUID="151afd27-7265-4465-aa9c-82edbebc9c7d" containerName="registry-server" probeResult="failure" output=< Dec 11 09:14:56 crc kubenswrapper[4881]: timeout: failed to connect service ":50051" within 1s Dec 11 09:14:56 crc kubenswrapper[4881]: > Dec 11 09:14:59 crc kubenswrapper[4881]: I1211 09:14:59.397549 4881 patch_prober.go:28] interesting pod/machine-config-daemon-z9nnh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 11 09:14:59 crc kubenswrapper[4881]: I1211 09:14:59.398514 4881 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 11 09:15:00 crc kubenswrapper[4881]: I1211 09:15:00.179956 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29424075-ntxg6"] Dec 11 09:15:00 crc kubenswrapper[4881]: I1211 09:15:00.182532 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29424075-ntxg6" Dec 11 09:15:00 crc kubenswrapper[4881]: I1211 09:15:00.186796 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Dec 11 09:15:00 crc kubenswrapper[4881]: I1211 09:15:00.187198 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Dec 11 09:15:00 crc kubenswrapper[4881]: I1211 09:15:00.211039 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29424075-ntxg6"] Dec 11 09:15:00 crc kubenswrapper[4881]: I1211 09:15:00.268003 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/4caeb70a-1169-4614-861a-5b7ec55986cc-config-volume\") pod \"collect-profiles-29424075-ntxg6\" (UID: \"4caeb70a-1169-4614-861a-5b7ec55986cc\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29424075-ntxg6" Dec 11 09:15:00 crc kubenswrapper[4881]: I1211 09:15:00.268076 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/4caeb70a-1169-4614-861a-5b7ec55986cc-secret-volume\") pod \"collect-profiles-29424075-ntxg6\" (UID: \"4caeb70a-1169-4614-861a-5b7ec55986cc\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29424075-ntxg6" Dec 11 09:15:00 crc kubenswrapper[4881]: I1211 09:15:00.268237 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n2n6m\" (UniqueName: \"kubernetes.io/projected/4caeb70a-1169-4614-861a-5b7ec55986cc-kube-api-access-n2n6m\") pod \"collect-profiles-29424075-ntxg6\" (UID: \"4caeb70a-1169-4614-861a-5b7ec55986cc\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29424075-ntxg6" Dec 11 09:15:00 crc kubenswrapper[4881]: I1211 09:15:00.370197 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n2n6m\" (UniqueName: \"kubernetes.io/projected/4caeb70a-1169-4614-861a-5b7ec55986cc-kube-api-access-n2n6m\") pod \"collect-profiles-29424075-ntxg6\" (UID: \"4caeb70a-1169-4614-861a-5b7ec55986cc\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29424075-ntxg6" Dec 11 09:15:00 crc kubenswrapper[4881]: I1211 09:15:00.371688 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/4caeb70a-1169-4614-861a-5b7ec55986cc-config-volume\") pod \"collect-profiles-29424075-ntxg6\" (UID: \"4caeb70a-1169-4614-861a-5b7ec55986cc\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29424075-ntxg6" Dec 11 09:15:00 crc kubenswrapper[4881]: I1211 09:15:00.371923 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/4caeb70a-1169-4614-861a-5b7ec55986cc-secret-volume\") pod \"collect-profiles-29424075-ntxg6\" (UID: \"4caeb70a-1169-4614-861a-5b7ec55986cc\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29424075-ntxg6" Dec 11 09:15:00 crc kubenswrapper[4881]: I1211 09:15:00.372993 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/4caeb70a-1169-4614-861a-5b7ec55986cc-config-volume\") pod \"collect-profiles-29424075-ntxg6\" (UID: \"4caeb70a-1169-4614-861a-5b7ec55986cc\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29424075-ntxg6" Dec 11 09:15:00 crc kubenswrapper[4881]: I1211 09:15:00.380684 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/4caeb70a-1169-4614-861a-5b7ec55986cc-secret-volume\") pod \"collect-profiles-29424075-ntxg6\" (UID: \"4caeb70a-1169-4614-861a-5b7ec55986cc\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29424075-ntxg6" Dec 11 09:15:00 crc kubenswrapper[4881]: I1211 09:15:00.391755 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n2n6m\" (UniqueName: \"kubernetes.io/projected/4caeb70a-1169-4614-861a-5b7ec55986cc-kube-api-access-n2n6m\") pod \"collect-profiles-29424075-ntxg6\" (UID: \"4caeb70a-1169-4614-861a-5b7ec55986cc\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29424075-ntxg6" Dec 11 09:15:00 crc kubenswrapper[4881]: I1211 09:15:00.515027 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29424075-ntxg6" Dec 11 09:15:01 crc kubenswrapper[4881]: I1211 09:15:01.039828 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29424075-ntxg6"] Dec 11 09:15:01 crc kubenswrapper[4881]: W1211 09:15:01.039952 4881 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod4caeb70a_1169_4614_861a_5b7ec55986cc.slice/crio-dd6318e52cf91aa8d6fe46dda85e84b6392ee9340e1889828ab965b3f072b402 WatchSource:0}: Error finding container dd6318e52cf91aa8d6fe46dda85e84b6392ee9340e1889828ab965b3f072b402: Status 404 returned error can't find the container with id dd6318e52cf91aa8d6fe46dda85e84b6392ee9340e1889828ab965b3f072b402 Dec 11 09:15:01 crc kubenswrapper[4881]: I1211 09:15:01.330737 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29424075-ntxg6" event={"ID":"4caeb70a-1169-4614-861a-5b7ec55986cc","Type":"ContainerStarted","Data":"8d743313d6835e3a6a7e88318cf902dadc4a8e6d7f290d31a14d0bdd3ee49f55"} Dec 11 09:15:01 crc kubenswrapper[4881]: I1211 09:15:01.331032 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29424075-ntxg6" event={"ID":"4caeb70a-1169-4614-861a-5b7ec55986cc","Type":"ContainerStarted","Data":"dd6318e52cf91aa8d6fe46dda85e84b6392ee9340e1889828ab965b3f072b402"} Dec 11 09:15:01 crc kubenswrapper[4881]: I1211 09:15:01.358007 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29424075-ntxg6" podStartSLOduration=1.357983401 podStartE2EDuration="1.357983401s" podCreationTimestamp="2025-12-11 09:15:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 09:15:01.35095992 +0000 UTC m=+3549.728328617" watchObservedRunningTime="2025-12-11 09:15:01.357983401 +0000 UTC m=+3549.735352098" Dec 11 09:15:02 crc kubenswrapper[4881]: I1211 09:15:02.346148 4881 generic.go:334] "Generic (PLEG): container finished" podID="4caeb70a-1169-4614-861a-5b7ec55986cc" containerID="8d743313d6835e3a6a7e88318cf902dadc4a8e6d7f290d31a14d0bdd3ee49f55" exitCode=0 Dec 11 09:15:02 crc kubenswrapper[4881]: I1211 09:15:02.346269 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29424075-ntxg6" event={"ID":"4caeb70a-1169-4614-861a-5b7ec55986cc","Type":"ContainerDied","Data":"8d743313d6835e3a6a7e88318cf902dadc4a8e6d7f290d31a14d0bdd3ee49f55"} Dec 11 09:15:03 crc kubenswrapper[4881]: I1211 09:15:03.792745 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29424075-ntxg6" Dec 11 09:15:03 crc kubenswrapper[4881]: I1211 09:15:03.864399 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/4caeb70a-1169-4614-861a-5b7ec55986cc-secret-volume\") pod \"4caeb70a-1169-4614-861a-5b7ec55986cc\" (UID: \"4caeb70a-1169-4614-861a-5b7ec55986cc\") " Dec 11 09:15:03 crc kubenswrapper[4881]: I1211 09:15:03.864494 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-n2n6m\" (UniqueName: \"kubernetes.io/projected/4caeb70a-1169-4614-861a-5b7ec55986cc-kube-api-access-n2n6m\") pod \"4caeb70a-1169-4614-861a-5b7ec55986cc\" (UID: \"4caeb70a-1169-4614-861a-5b7ec55986cc\") " Dec 11 09:15:03 crc kubenswrapper[4881]: I1211 09:15:03.864622 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/4caeb70a-1169-4614-861a-5b7ec55986cc-config-volume\") pod \"4caeb70a-1169-4614-861a-5b7ec55986cc\" (UID: \"4caeb70a-1169-4614-861a-5b7ec55986cc\") " Dec 11 09:15:03 crc kubenswrapper[4881]: I1211 09:15:03.865964 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4caeb70a-1169-4614-861a-5b7ec55986cc-config-volume" (OuterVolumeSpecName: "config-volume") pod "4caeb70a-1169-4614-861a-5b7ec55986cc" (UID: "4caeb70a-1169-4614-861a-5b7ec55986cc"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 09:15:03 crc kubenswrapper[4881]: I1211 09:15:03.871185 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4caeb70a-1169-4614-861a-5b7ec55986cc-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "4caeb70a-1169-4614-861a-5b7ec55986cc" (UID: "4caeb70a-1169-4614-861a-5b7ec55986cc"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 09:15:03 crc kubenswrapper[4881]: I1211 09:15:03.871786 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4caeb70a-1169-4614-861a-5b7ec55986cc-kube-api-access-n2n6m" (OuterVolumeSpecName: "kube-api-access-n2n6m") pod "4caeb70a-1169-4614-861a-5b7ec55986cc" (UID: "4caeb70a-1169-4614-861a-5b7ec55986cc"). InnerVolumeSpecName "kube-api-access-n2n6m". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 09:15:03 crc kubenswrapper[4881]: I1211 09:15:03.967601 4881 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/4caeb70a-1169-4614-861a-5b7ec55986cc-secret-volume\") on node \"crc\" DevicePath \"\"" Dec 11 09:15:03 crc kubenswrapper[4881]: I1211 09:15:03.967946 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-n2n6m\" (UniqueName: \"kubernetes.io/projected/4caeb70a-1169-4614-861a-5b7ec55986cc-kube-api-access-n2n6m\") on node \"crc\" DevicePath \"\"" Dec 11 09:15:03 crc kubenswrapper[4881]: I1211 09:15:03.967960 4881 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/4caeb70a-1169-4614-861a-5b7ec55986cc-config-volume\") on node \"crc\" DevicePath \"\"" Dec 11 09:15:04 crc kubenswrapper[4881]: I1211 09:15:04.398801 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29424075-ntxg6" event={"ID":"4caeb70a-1169-4614-861a-5b7ec55986cc","Type":"ContainerDied","Data":"dd6318e52cf91aa8d6fe46dda85e84b6392ee9340e1889828ab965b3f072b402"} Dec 11 09:15:04 crc kubenswrapper[4881]: I1211 09:15:04.398850 4881 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="dd6318e52cf91aa8d6fe46dda85e84b6392ee9340e1889828ab965b3f072b402" Dec 11 09:15:04 crc kubenswrapper[4881]: I1211 09:15:04.398912 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29424075-ntxg6" Dec 11 09:15:04 crc kubenswrapper[4881]: I1211 09:15:04.464662 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29424030-hkdpr"] Dec 11 09:15:04 crc kubenswrapper[4881]: I1211 09:15:04.476067 4881 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29424030-hkdpr"] Dec 11 09:15:05 crc kubenswrapper[4881]: I1211 09:15:05.023878 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e4cf927d-1b71-4715-9161-bed7cb184f26" path="/var/lib/kubelet/pods/e4cf927d-1b71-4715-9161-bed7cb184f26/volumes" Dec 11 09:15:05 crc kubenswrapper[4881]: I1211 09:15:05.749172 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-577q9" Dec 11 09:15:05 crc kubenswrapper[4881]: I1211 09:15:05.801732 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-577q9" Dec 11 09:15:05 crc kubenswrapper[4881]: I1211 09:15:05.995008 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-577q9"] Dec 11 09:15:07 crc kubenswrapper[4881]: I1211 09:15:07.430798 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-577q9" podUID="151afd27-7265-4465-aa9c-82edbebc9c7d" containerName="registry-server" containerID="cri-o://9780aaba2676aa164b183056e36bfdcea6fe4f2855b66ab1e2ba0163a61a6038" gracePeriod=2 Dec 11 09:15:08 crc kubenswrapper[4881]: I1211 09:15:08.015054 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-577q9" Dec 11 09:15:08 crc kubenswrapper[4881]: I1211 09:15:08.189693 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/151afd27-7265-4465-aa9c-82edbebc9c7d-catalog-content\") pod \"151afd27-7265-4465-aa9c-82edbebc9c7d\" (UID: \"151afd27-7265-4465-aa9c-82edbebc9c7d\") " Dec 11 09:15:08 crc kubenswrapper[4881]: I1211 09:15:08.189788 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2627q\" (UniqueName: \"kubernetes.io/projected/151afd27-7265-4465-aa9c-82edbebc9c7d-kube-api-access-2627q\") pod \"151afd27-7265-4465-aa9c-82edbebc9c7d\" (UID: \"151afd27-7265-4465-aa9c-82edbebc9c7d\") " Dec 11 09:15:08 crc kubenswrapper[4881]: I1211 09:15:08.189856 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/151afd27-7265-4465-aa9c-82edbebc9c7d-utilities\") pod \"151afd27-7265-4465-aa9c-82edbebc9c7d\" (UID: \"151afd27-7265-4465-aa9c-82edbebc9c7d\") " Dec 11 09:15:08 crc kubenswrapper[4881]: I1211 09:15:08.190830 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/151afd27-7265-4465-aa9c-82edbebc9c7d-utilities" (OuterVolumeSpecName: "utilities") pod "151afd27-7265-4465-aa9c-82edbebc9c7d" (UID: "151afd27-7265-4465-aa9c-82edbebc9c7d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 09:15:08 crc kubenswrapper[4881]: I1211 09:15:08.191071 4881 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/151afd27-7265-4465-aa9c-82edbebc9c7d-utilities\") on node \"crc\" DevicePath \"\"" Dec 11 09:15:08 crc kubenswrapper[4881]: I1211 09:15:08.206277 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/151afd27-7265-4465-aa9c-82edbebc9c7d-kube-api-access-2627q" (OuterVolumeSpecName: "kube-api-access-2627q") pod "151afd27-7265-4465-aa9c-82edbebc9c7d" (UID: "151afd27-7265-4465-aa9c-82edbebc9c7d"). InnerVolumeSpecName "kube-api-access-2627q". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 09:15:08 crc kubenswrapper[4881]: I1211 09:15:08.253286 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/151afd27-7265-4465-aa9c-82edbebc9c7d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "151afd27-7265-4465-aa9c-82edbebc9c7d" (UID: "151afd27-7265-4465-aa9c-82edbebc9c7d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 09:15:08 crc kubenswrapper[4881]: I1211 09:15:08.294035 4881 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/151afd27-7265-4465-aa9c-82edbebc9c7d-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 11 09:15:08 crc kubenswrapper[4881]: I1211 09:15:08.294111 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2627q\" (UniqueName: \"kubernetes.io/projected/151afd27-7265-4465-aa9c-82edbebc9c7d-kube-api-access-2627q\") on node \"crc\" DevicePath \"\"" Dec 11 09:15:08 crc kubenswrapper[4881]: I1211 09:15:08.442737 4881 generic.go:334] "Generic (PLEG): container finished" podID="151afd27-7265-4465-aa9c-82edbebc9c7d" containerID="9780aaba2676aa164b183056e36bfdcea6fe4f2855b66ab1e2ba0163a61a6038" exitCode=0 Dec 11 09:15:08 crc kubenswrapper[4881]: I1211 09:15:08.442786 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-577q9" event={"ID":"151afd27-7265-4465-aa9c-82edbebc9c7d","Type":"ContainerDied","Data":"9780aaba2676aa164b183056e36bfdcea6fe4f2855b66ab1e2ba0163a61a6038"} Dec 11 09:15:08 crc kubenswrapper[4881]: I1211 09:15:08.442816 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-577q9" event={"ID":"151afd27-7265-4465-aa9c-82edbebc9c7d","Type":"ContainerDied","Data":"4c4f5343a66e6f504164711d681bff94d0e14245ec440b0ea312e1688b92f75b"} Dec 11 09:15:08 crc kubenswrapper[4881]: I1211 09:15:08.442834 4881 scope.go:117] "RemoveContainer" containerID="9780aaba2676aa164b183056e36bfdcea6fe4f2855b66ab1e2ba0163a61a6038" Dec 11 09:15:08 crc kubenswrapper[4881]: I1211 09:15:08.442985 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-577q9" Dec 11 09:15:08 crc kubenswrapper[4881]: I1211 09:15:08.470238 4881 scope.go:117] "RemoveContainer" containerID="96615fa6e6f5ca0503e52c456ed19e60f8e81b2f31b9d0a9ef5e7675090cdc6f" Dec 11 09:15:08 crc kubenswrapper[4881]: I1211 09:15:08.490996 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-577q9"] Dec 11 09:15:08 crc kubenswrapper[4881]: I1211 09:15:08.502588 4881 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-577q9"] Dec 11 09:15:08 crc kubenswrapper[4881]: I1211 09:15:08.503270 4881 scope.go:117] "RemoveContainer" containerID="eb551fd928da084a03fcb7f0ed8c8b1a3d028010582a75969576ed0101a3114b" Dec 11 09:15:08 crc kubenswrapper[4881]: I1211 09:15:08.559622 4881 scope.go:117] "RemoveContainer" containerID="9780aaba2676aa164b183056e36bfdcea6fe4f2855b66ab1e2ba0163a61a6038" Dec 11 09:15:08 crc kubenswrapper[4881]: E1211 09:15:08.560042 4881 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9780aaba2676aa164b183056e36bfdcea6fe4f2855b66ab1e2ba0163a61a6038\": container with ID starting with 9780aaba2676aa164b183056e36bfdcea6fe4f2855b66ab1e2ba0163a61a6038 not found: ID does not exist" containerID="9780aaba2676aa164b183056e36bfdcea6fe4f2855b66ab1e2ba0163a61a6038" Dec 11 09:15:08 crc kubenswrapper[4881]: I1211 09:15:08.560082 4881 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9780aaba2676aa164b183056e36bfdcea6fe4f2855b66ab1e2ba0163a61a6038"} err="failed to get container status \"9780aaba2676aa164b183056e36bfdcea6fe4f2855b66ab1e2ba0163a61a6038\": rpc error: code = NotFound desc = could not find container \"9780aaba2676aa164b183056e36bfdcea6fe4f2855b66ab1e2ba0163a61a6038\": container with ID starting with 9780aaba2676aa164b183056e36bfdcea6fe4f2855b66ab1e2ba0163a61a6038 not found: ID does not exist" Dec 11 09:15:08 crc kubenswrapper[4881]: I1211 09:15:08.560109 4881 scope.go:117] "RemoveContainer" containerID="96615fa6e6f5ca0503e52c456ed19e60f8e81b2f31b9d0a9ef5e7675090cdc6f" Dec 11 09:15:08 crc kubenswrapper[4881]: E1211 09:15:08.560628 4881 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"96615fa6e6f5ca0503e52c456ed19e60f8e81b2f31b9d0a9ef5e7675090cdc6f\": container with ID starting with 96615fa6e6f5ca0503e52c456ed19e60f8e81b2f31b9d0a9ef5e7675090cdc6f not found: ID does not exist" containerID="96615fa6e6f5ca0503e52c456ed19e60f8e81b2f31b9d0a9ef5e7675090cdc6f" Dec 11 09:15:08 crc kubenswrapper[4881]: I1211 09:15:08.560658 4881 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"96615fa6e6f5ca0503e52c456ed19e60f8e81b2f31b9d0a9ef5e7675090cdc6f"} err="failed to get container status \"96615fa6e6f5ca0503e52c456ed19e60f8e81b2f31b9d0a9ef5e7675090cdc6f\": rpc error: code = NotFound desc = could not find container \"96615fa6e6f5ca0503e52c456ed19e60f8e81b2f31b9d0a9ef5e7675090cdc6f\": container with ID starting with 96615fa6e6f5ca0503e52c456ed19e60f8e81b2f31b9d0a9ef5e7675090cdc6f not found: ID does not exist" Dec 11 09:15:08 crc kubenswrapper[4881]: I1211 09:15:08.560676 4881 scope.go:117] "RemoveContainer" containerID="eb551fd928da084a03fcb7f0ed8c8b1a3d028010582a75969576ed0101a3114b" Dec 11 09:15:08 crc kubenswrapper[4881]: E1211 09:15:08.561161 4881 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"eb551fd928da084a03fcb7f0ed8c8b1a3d028010582a75969576ed0101a3114b\": container with ID starting with eb551fd928da084a03fcb7f0ed8c8b1a3d028010582a75969576ed0101a3114b not found: ID does not exist" containerID="eb551fd928da084a03fcb7f0ed8c8b1a3d028010582a75969576ed0101a3114b" Dec 11 09:15:08 crc kubenswrapper[4881]: I1211 09:15:08.561198 4881 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"eb551fd928da084a03fcb7f0ed8c8b1a3d028010582a75969576ed0101a3114b"} err="failed to get container status \"eb551fd928da084a03fcb7f0ed8c8b1a3d028010582a75969576ed0101a3114b\": rpc error: code = NotFound desc = could not find container \"eb551fd928da084a03fcb7f0ed8c8b1a3d028010582a75969576ed0101a3114b\": container with ID starting with eb551fd928da084a03fcb7f0ed8c8b1a3d028010582a75969576ed0101a3114b not found: ID does not exist" Dec 11 09:15:08 crc kubenswrapper[4881]: E1211 09:15:08.645407 4881 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod151afd27_7265_4465_aa9c_82edbebc9c7d.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod151afd27_7265_4465_aa9c_82edbebc9c7d.slice/crio-4c4f5343a66e6f504164711d681bff94d0e14245ec440b0ea312e1688b92f75b\": RecentStats: unable to find data in memory cache]" Dec 11 09:15:09 crc kubenswrapper[4881]: I1211 09:15:09.025555 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="151afd27-7265-4465-aa9c-82edbebc9c7d" path="/var/lib/kubelet/pods/151afd27-7265-4465-aa9c-82edbebc9c7d/volumes" Dec 11 09:15:11 crc kubenswrapper[4881]: I1211 09:15:11.462634 4881 scope.go:117] "RemoveContainer" containerID="984a7e4b52ad1536d0f9e36ebb4356f86ee31dfb21c39ed2b083d921637aefe2" Dec 11 09:15:29 crc kubenswrapper[4881]: I1211 09:15:29.396616 4881 patch_prober.go:28] interesting pod/machine-config-daemon-z9nnh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 11 09:15:29 crc kubenswrapper[4881]: I1211 09:15:29.397027 4881 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 11 09:15:29 crc kubenswrapper[4881]: I1211 09:15:29.397063 4881 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" Dec 11 09:15:29 crc kubenswrapper[4881]: I1211 09:15:29.397991 4881 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"5961927930adfb6a635fae221aae4cca63733e70ddb8264459aeb0e8385fdd51"} pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Dec 11 09:15:29 crc kubenswrapper[4881]: I1211 09:15:29.398046 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" containerName="machine-config-daemon" containerID="cri-o://5961927930adfb6a635fae221aae4cca63733e70ddb8264459aeb0e8385fdd51" gracePeriod=600 Dec 11 09:15:29 crc kubenswrapper[4881]: I1211 09:15:29.700428 4881 generic.go:334] "Generic (PLEG): container finished" podID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" containerID="5961927930adfb6a635fae221aae4cca63733e70ddb8264459aeb0e8385fdd51" exitCode=0 Dec 11 09:15:29 crc kubenswrapper[4881]: I1211 09:15:29.700494 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" event={"ID":"56d69133-af36-4cbd-af7d-3a58cc4dd8ca","Type":"ContainerDied","Data":"5961927930adfb6a635fae221aae4cca63733e70ddb8264459aeb0e8385fdd51"} Dec 11 09:15:29 crc kubenswrapper[4881]: I1211 09:15:29.701029 4881 scope.go:117] "RemoveContainer" containerID="003d013ffd5b740337ff0333759338b1b3fbe344072684ee96e763f5cd66a8c1" Dec 11 09:15:30 crc kubenswrapper[4881]: I1211 09:15:30.726167 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" event={"ID":"56d69133-af36-4cbd-af7d-3a58cc4dd8ca","Type":"ContainerStarted","Data":"7b429e560ca82d59affd89848e506735dc8a3305106d3b6a969e4360338b2dbc"} Dec 11 09:16:11 crc kubenswrapper[4881]: I1211 09:16:11.565562 4881 scope.go:117] "RemoveContainer" containerID="53db5558de3f1dedb50dce15e1f42deeaa836eecd441e0504816794291de60c2" Dec 11 09:16:11 crc kubenswrapper[4881]: I1211 09:16:11.587792 4881 scope.go:117] "RemoveContainer" containerID="78ebf124e22d0e42c4d38788b0e351fa4a0decea10d3518b5b13c80575d5f519" Dec 11 09:16:11 crc kubenswrapper[4881]: I1211 09:16:11.615422 4881 scope.go:117] "RemoveContainer" containerID="24238b78ae8a03ba9d908a6ac2eeeb37d213d8ac305f9b1bade544825ac920d1" Dec 11 09:17:29 crc kubenswrapper[4881]: I1211 09:17:29.396769 4881 patch_prober.go:28] interesting pod/machine-config-daemon-z9nnh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 11 09:17:29 crc kubenswrapper[4881]: I1211 09:17:29.397781 4881 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 11 09:17:59 crc kubenswrapper[4881]: I1211 09:17:59.396861 4881 patch_prober.go:28] interesting pod/machine-config-daemon-z9nnh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 11 09:17:59 crc kubenswrapper[4881]: I1211 09:17:59.397557 4881 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 11 09:18:29 crc kubenswrapper[4881]: I1211 09:18:29.397824 4881 patch_prober.go:28] interesting pod/machine-config-daemon-z9nnh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 11 09:18:29 crc kubenswrapper[4881]: I1211 09:18:29.398589 4881 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 11 09:18:29 crc kubenswrapper[4881]: I1211 09:18:29.398645 4881 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" Dec 11 09:18:29 crc kubenswrapper[4881]: I1211 09:18:29.399718 4881 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"7b429e560ca82d59affd89848e506735dc8a3305106d3b6a969e4360338b2dbc"} pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Dec 11 09:18:29 crc kubenswrapper[4881]: I1211 09:18:29.399775 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" containerName="machine-config-daemon" containerID="cri-o://7b429e560ca82d59affd89848e506735dc8a3305106d3b6a969e4360338b2dbc" gracePeriod=600 Dec 11 09:18:29 crc kubenswrapper[4881]: E1211 09:18:29.535351 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 09:18:29 crc kubenswrapper[4881]: I1211 09:18:29.988862 4881 generic.go:334] "Generic (PLEG): container finished" podID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" containerID="7b429e560ca82d59affd89848e506735dc8a3305106d3b6a969e4360338b2dbc" exitCode=0 Dec 11 09:18:29 crc kubenswrapper[4881]: I1211 09:18:29.988929 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" event={"ID":"56d69133-af36-4cbd-af7d-3a58cc4dd8ca","Type":"ContainerDied","Data":"7b429e560ca82d59affd89848e506735dc8a3305106d3b6a969e4360338b2dbc"} Dec 11 09:18:29 crc kubenswrapper[4881]: I1211 09:18:29.988971 4881 scope.go:117] "RemoveContainer" containerID="5961927930adfb6a635fae221aae4cca63733e70ddb8264459aeb0e8385fdd51" Dec 11 09:18:29 crc kubenswrapper[4881]: I1211 09:18:29.990695 4881 scope.go:117] "RemoveContainer" containerID="7b429e560ca82d59affd89848e506735dc8a3305106d3b6a969e4360338b2dbc" Dec 11 09:18:29 crc kubenswrapper[4881]: E1211 09:18:29.992949 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 09:18:42 crc kubenswrapper[4881]: I1211 09:18:42.006079 4881 scope.go:117] "RemoveContainer" containerID="7b429e560ca82d59affd89848e506735dc8a3305106d3b6a969e4360338b2dbc" Dec 11 09:18:42 crc kubenswrapper[4881]: E1211 09:18:42.006959 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 09:18:54 crc kubenswrapper[4881]: I1211 09:18:54.005609 4881 scope.go:117] "RemoveContainer" containerID="7b429e560ca82d59affd89848e506735dc8a3305106d3b6a969e4360338b2dbc" Dec 11 09:18:54 crc kubenswrapper[4881]: E1211 09:18:54.006364 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 09:19:07 crc kubenswrapper[4881]: I1211 09:19:07.006265 4881 scope.go:117] "RemoveContainer" containerID="7b429e560ca82d59affd89848e506735dc8a3305106d3b6a969e4360338b2dbc" Dec 11 09:19:07 crc kubenswrapper[4881]: E1211 09:19:07.006999 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 09:19:20 crc kubenswrapper[4881]: I1211 09:19:20.006163 4881 scope.go:117] "RemoveContainer" containerID="7b429e560ca82d59affd89848e506735dc8a3305106d3b6a969e4360338b2dbc" Dec 11 09:19:20 crc kubenswrapper[4881]: E1211 09:19:20.007029 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 09:19:35 crc kubenswrapper[4881]: I1211 09:19:35.005760 4881 scope.go:117] "RemoveContainer" containerID="7b429e560ca82d59affd89848e506735dc8a3305106d3b6a969e4360338b2dbc" Dec 11 09:19:35 crc kubenswrapper[4881]: E1211 09:19:35.007023 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 09:19:49 crc kubenswrapper[4881]: I1211 09:19:49.005892 4881 scope.go:117] "RemoveContainer" containerID="7b429e560ca82d59affd89848e506735dc8a3305106d3b6a969e4360338b2dbc" Dec 11 09:19:49 crc kubenswrapper[4881]: E1211 09:19:49.006803 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 09:20:04 crc kubenswrapper[4881]: I1211 09:20:04.006033 4881 scope.go:117] "RemoveContainer" containerID="7b429e560ca82d59affd89848e506735dc8a3305106d3b6a969e4360338b2dbc" Dec 11 09:20:04 crc kubenswrapper[4881]: E1211 09:20:04.006749 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 09:20:17 crc kubenswrapper[4881]: I1211 09:20:17.005538 4881 scope.go:117] "RemoveContainer" containerID="7b429e560ca82d59affd89848e506735dc8a3305106d3b6a969e4360338b2dbc" Dec 11 09:20:17 crc kubenswrapper[4881]: E1211 09:20:17.006255 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 09:20:32 crc kubenswrapper[4881]: I1211 09:20:32.005320 4881 scope.go:117] "RemoveContainer" containerID="7b429e560ca82d59affd89848e506735dc8a3305106d3b6a969e4360338b2dbc" Dec 11 09:20:32 crc kubenswrapper[4881]: E1211 09:20:32.006186 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 09:20:45 crc kubenswrapper[4881]: I1211 09:20:45.005930 4881 scope.go:117] "RemoveContainer" containerID="7b429e560ca82d59affd89848e506735dc8a3305106d3b6a969e4360338b2dbc" Dec 11 09:20:45 crc kubenswrapper[4881]: E1211 09:20:45.006848 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 09:20:59 crc kubenswrapper[4881]: I1211 09:20:59.005871 4881 scope.go:117] "RemoveContainer" containerID="7b429e560ca82d59affd89848e506735dc8a3305106d3b6a969e4360338b2dbc" Dec 11 09:20:59 crc kubenswrapper[4881]: E1211 09:20:59.006617 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 09:21:01 crc kubenswrapper[4881]: I1211 09:21:01.502679 4881 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack-operators/ironic-operator-controller-manager-54485f899-mvlsx" podUID="2fd323b1-8fa8-456c-bcd8-d89872682762" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.109:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Dec 11 09:21:01 crc kubenswrapper[4881]: I1211 09:21:01.947685 4881 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack-operators/nova-operator-controller-manager-79d658b66d-2dxhd" podUID="05ef8d73-6d8a-4d91-83a3-93ec0fc14ae1" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.114:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Dec 11 09:21:01 crc kubenswrapper[4881]: I1211 09:21:01.947950 4881 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/nova-operator-controller-manager-79d658b66d-2dxhd" podUID="05ef8d73-6d8a-4d91-83a3-93ec0fc14ae1" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.114:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Dec 11 09:21:12 crc kubenswrapper[4881]: I1211 09:21:12.006188 4881 scope.go:117] "RemoveContainer" containerID="7b429e560ca82d59affd89848e506735dc8a3305106d3b6a969e4360338b2dbc" Dec 11 09:21:12 crc kubenswrapper[4881]: E1211 09:21:12.007049 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 09:21:26 crc kubenswrapper[4881]: I1211 09:21:26.006425 4881 scope.go:117] "RemoveContainer" containerID="7b429e560ca82d59affd89848e506735dc8a3305106d3b6a969e4360338b2dbc" Dec 11 09:21:26 crc kubenswrapper[4881]: E1211 09:21:26.007220 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 09:21:37 crc kubenswrapper[4881]: I1211 09:21:37.005727 4881 scope.go:117] "RemoveContainer" containerID="7b429e560ca82d59affd89848e506735dc8a3305106d3b6a969e4360338b2dbc" Dec 11 09:21:37 crc kubenswrapper[4881]: E1211 09:21:37.006464 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 09:21:50 crc kubenswrapper[4881]: I1211 09:21:50.006034 4881 scope.go:117] "RemoveContainer" containerID="7b429e560ca82d59affd89848e506735dc8a3305106d3b6a969e4360338b2dbc" Dec 11 09:21:50 crc kubenswrapper[4881]: E1211 09:21:50.006963 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 09:21:53 crc kubenswrapper[4881]: I1211 09:21:53.390397 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-b926c"] Dec 11 09:21:53 crc kubenswrapper[4881]: E1211 09:21:53.391528 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4caeb70a-1169-4614-861a-5b7ec55986cc" containerName="collect-profiles" Dec 11 09:21:53 crc kubenswrapper[4881]: I1211 09:21:53.391553 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="4caeb70a-1169-4614-861a-5b7ec55986cc" containerName="collect-profiles" Dec 11 09:21:53 crc kubenswrapper[4881]: E1211 09:21:53.391593 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="151afd27-7265-4465-aa9c-82edbebc9c7d" containerName="extract-content" Dec 11 09:21:53 crc kubenswrapper[4881]: I1211 09:21:53.391601 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="151afd27-7265-4465-aa9c-82edbebc9c7d" containerName="extract-content" Dec 11 09:21:53 crc kubenswrapper[4881]: E1211 09:21:53.391647 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="151afd27-7265-4465-aa9c-82edbebc9c7d" containerName="registry-server" Dec 11 09:21:53 crc kubenswrapper[4881]: I1211 09:21:53.391656 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="151afd27-7265-4465-aa9c-82edbebc9c7d" containerName="registry-server" Dec 11 09:21:53 crc kubenswrapper[4881]: E1211 09:21:53.391689 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="151afd27-7265-4465-aa9c-82edbebc9c7d" containerName="extract-utilities" Dec 11 09:21:53 crc kubenswrapper[4881]: I1211 09:21:53.391697 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="151afd27-7265-4465-aa9c-82edbebc9c7d" containerName="extract-utilities" Dec 11 09:21:53 crc kubenswrapper[4881]: I1211 09:21:53.391974 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="151afd27-7265-4465-aa9c-82edbebc9c7d" containerName="registry-server" Dec 11 09:21:53 crc kubenswrapper[4881]: I1211 09:21:53.392031 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="4caeb70a-1169-4614-861a-5b7ec55986cc" containerName="collect-profiles" Dec 11 09:21:53 crc kubenswrapper[4881]: I1211 09:21:53.394100 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-b926c" Dec 11 09:21:53 crc kubenswrapper[4881]: I1211 09:21:53.409351 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-b926c"] Dec 11 09:21:53 crc kubenswrapper[4881]: I1211 09:21:53.500200 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/06b368e6-9e7a-4398-9f8f-d7c14bb29e10-catalog-content\") pod \"redhat-operators-b926c\" (UID: \"06b368e6-9e7a-4398-9f8f-d7c14bb29e10\") " pod="openshift-marketplace/redhat-operators-b926c" Dec 11 09:21:53 crc kubenswrapper[4881]: I1211 09:21:53.500298 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5ksnh\" (UniqueName: \"kubernetes.io/projected/06b368e6-9e7a-4398-9f8f-d7c14bb29e10-kube-api-access-5ksnh\") pod \"redhat-operators-b926c\" (UID: \"06b368e6-9e7a-4398-9f8f-d7c14bb29e10\") " pod="openshift-marketplace/redhat-operators-b926c" Dec 11 09:21:53 crc kubenswrapper[4881]: I1211 09:21:53.500456 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/06b368e6-9e7a-4398-9f8f-d7c14bb29e10-utilities\") pod \"redhat-operators-b926c\" (UID: \"06b368e6-9e7a-4398-9f8f-d7c14bb29e10\") " pod="openshift-marketplace/redhat-operators-b926c" Dec 11 09:21:53 crc kubenswrapper[4881]: I1211 09:21:53.605785 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/06b368e6-9e7a-4398-9f8f-d7c14bb29e10-catalog-content\") pod \"redhat-operators-b926c\" (UID: \"06b368e6-9e7a-4398-9f8f-d7c14bb29e10\") " pod="openshift-marketplace/redhat-operators-b926c" Dec 11 09:21:53 crc kubenswrapper[4881]: I1211 09:21:53.605875 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5ksnh\" (UniqueName: \"kubernetes.io/projected/06b368e6-9e7a-4398-9f8f-d7c14bb29e10-kube-api-access-5ksnh\") pod \"redhat-operators-b926c\" (UID: \"06b368e6-9e7a-4398-9f8f-d7c14bb29e10\") " pod="openshift-marketplace/redhat-operators-b926c" Dec 11 09:21:53 crc kubenswrapper[4881]: I1211 09:21:53.605984 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/06b368e6-9e7a-4398-9f8f-d7c14bb29e10-utilities\") pod \"redhat-operators-b926c\" (UID: \"06b368e6-9e7a-4398-9f8f-d7c14bb29e10\") " pod="openshift-marketplace/redhat-operators-b926c" Dec 11 09:21:53 crc kubenswrapper[4881]: I1211 09:21:53.606530 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/06b368e6-9e7a-4398-9f8f-d7c14bb29e10-catalog-content\") pod \"redhat-operators-b926c\" (UID: \"06b368e6-9e7a-4398-9f8f-d7c14bb29e10\") " pod="openshift-marketplace/redhat-operators-b926c" Dec 11 09:21:53 crc kubenswrapper[4881]: I1211 09:21:53.606568 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/06b368e6-9e7a-4398-9f8f-d7c14bb29e10-utilities\") pod \"redhat-operators-b926c\" (UID: \"06b368e6-9e7a-4398-9f8f-d7c14bb29e10\") " pod="openshift-marketplace/redhat-operators-b926c" Dec 11 09:21:53 crc kubenswrapper[4881]: I1211 09:21:53.630955 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5ksnh\" (UniqueName: \"kubernetes.io/projected/06b368e6-9e7a-4398-9f8f-d7c14bb29e10-kube-api-access-5ksnh\") pod \"redhat-operators-b926c\" (UID: \"06b368e6-9e7a-4398-9f8f-d7c14bb29e10\") " pod="openshift-marketplace/redhat-operators-b926c" Dec 11 09:21:53 crc kubenswrapper[4881]: I1211 09:21:53.718121 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-b926c" Dec 11 09:21:54 crc kubenswrapper[4881]: I1211 09:21:54.297164 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-b926c"] Dec 11 09:21:55 crc kubenswrapper[4881]: I1211 09:21:55.352563 4881 generic.go:334] "Generic (PLEG): container finished" podID="06b368e6-9e7a-4398-9f8f-d7c14bb29e10" containerID="1c118a343d6b0b2fffbe69a0c38e0d658d2444724ec6c85569d3b42f2e63c858" exitCode=0 Dec 11 09:21:55 crc kubenswrapper[4881]: I1211 09:21:55.352733 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-b926c" event={"ID":"06b368e6-9e7a-4398-9f8f-d7c14bb29e10","Type":"ContainerDied","Data":"1c118a343d6b0b2fffbe69a0c38e0d658d2444724ec6c85569d3b42f2e63c858"} Dec 11 09:21:55 crc kubenswrapper[4881]: I1211 09:21:55.353072 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-b926c" event={"ID":"06b368e6-9e7a-4398-9f8f-d7c14bb29e10","Type":"ContainerStarted","Data":"82aa9e99ea4bf024d92cd318a3f76997c802a04156093582ef2a3ab709098710"} Dec 11 09:21:55 crc kubenswrapper[4881]: I1211 09:21:55.355095 4881 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Dec 11 09:21:56 crc kubenswrapper[4881]: I1211 09:21:56.784256 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-4d9h9"] Dec 11 09:21:56 crc kubenswrapper[4881]: I1211 09:21:56.787159 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-4d9h9" Dec 11 09:21:56 crc kubenswrapper[4881]: I1211 09:21:56.814696 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-4d9h9"] Dec 11 09:21:56 crc kubenswrapper[4881]: I1211 09:21:56.897892 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a30d0370-88ca-48de-b318-91089702cebe-utilities\") pod \"community-operators-4d9h9\" (UID: \"a30d0370-88ca-48de-b318-91089702cebe\") " pod="openshift-marketplace/community-operators-4d9h9" Dec 11 09:21:56 crc kubenswrapper[4881]: I1211 09:21:56.898035 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x9ww7\" (UniqueName: \"kubernetes.io/projected/a30d0370-88ca-48de-b318-91089702cebe-kube-api-access-x9ww7\") pod \"community-operators-4d9h9\" (UID: \"a30d0370-88ca-48de-b318-91089702cebe\") " pod="openshift-marketplace/community-operators-4d9h9" Dec 11 09:21:56 crc kubenswrapper[4881]: I1211 09:21:56.898180 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a30d0370-88ca-48de-b318-91089702cebe-catalog-content\") pod \"community-operators-4d9h9\" (UID: \"a30d0370-88ca-48de-b318-91089702cebe\") " pod="openshift-marketplace/community-operators-4d9h9" Dec 11 09:21:57 crc kubenswrapper[4881]: I1211 09:21:57.000536 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a30d0370-88ca-48de-b318-91089702cebe-catalog-content\") pod \"community-operators-4d9h9\" (UID: \"a30d0370-88ca-48de-b318-91089702cebe\") " pod="openshift-marketplace/community-operators-4d9h9" Dec 11 09:21:57 crc kubenswrapper[4881]: I1211 09:21:57.000865 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a30d0370-88ca-48de-b318-91089702cebe-utilities\") pod \"community-operators-4d9h9\" (UID: \"a30d0370-88ca-48de-b318-91089702cebe\") " pod="openshift-marketplace/community-operators-4d9h9" Dec 11 09:21:57 crc kubenswrapper[4881]: I1211 09:21:57.000970 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x9ww7\" (UniqueName: \"kubernetes.io/projected/a30d0370-88ca-48de-b318-91089702cebe-kube-api-access-x9ww7\") pod \"community-operators-4d9h9\" (UID: \"a30d0370-88ca-48de-b318-91089702cebe\") " pod="openshift-marketplace/community-operators-4d9h9" Dec 11 09:21:57 crc kubenswrapper[4881]: I1211 09:21:57.001101 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a30d0370-88ca-48de-b318-91089702cebe-catalog-content\") pod \"community-operators-4d9h9\" (UID: \"a30d0370-88ca-48de-b318-91089702cebe\") " pod="openshift-marketplace/community-operators-4d9h9" Dec 11 09:21:57 crc kubenswrapper[4881]: I1211 09:21:57.001360 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a30d0370-88ca-48de-b318-91089702cebe-utilities\") pod \"community-operators-4d9h9\" (UID: \"a30d0370-88ca-48de-b318-91089702cebe\") " pod="openshift-marketplace/community-operators-4d9h9" Dec 11 09:21:57 crc kubenswrapper[4881]: I1211 09:21:57.039548 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x9ww7\" (UniqueName: \"kubernetes.io/projected/a30d0370-88ca-48de-b318-91089702cebe-kube-api-access-x9ww7\") pod \"community-operators-4d9h9\" (UID: \"a30d0370-88ca-48de-b318-91089702cebe\") " pod="openshift-marketplace/community-operators-4d9h9" Dec 11 09:21:57 crc kubenswrapper[4881]: I1211 09:21:57.108819 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-4d9h9" Dec 11 09:21:57 crc kubenswrapper[4881]: I1211 09:21:57.391944 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-b926c" event={"ID":"06b368e6-9e7a-4398-9f8f-d7c14bb29e10","Type":"ContainerStarted","Data":"e4ff090280aa3a23fc13d28d258cb06357ecffa88181a1f2c0d7194e3fa74822"} Dec 11 09:21:57 crc kubenswrapper[4881]: I1211 09:21:57.808648 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-4d9h9"] Dec 11 09:21:58 crc kubenswrapper[4881]: I1211 09:21:58.404183 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4d9h9" event={"ID":"a30d0370-88ca-48de-b318-91089702cebe","Type":"ContainerStarted","Data":"eb214d425595539b12df6bc880ef223ae448a60f207608a9abe2f1e5cef17720"} Dec 11 09:21:59 crc kubenswrapper[4881]: I1211 09:21:59.419950 4881 generic.go:334] "Generic (PLEG): container finished" podID="a30d0370-88ca-48de-b318-91089702cebe" containerID="a59cf02efd1c45b96922e470245783bd548781a241844be2648d0a7efe074748" exitCode=0 Dec 11 09:21:59 crc kubenswrapper[4881]: I1211 09:21:59.420047 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4d9h9" event={"ID":"a30d0370-88ca-48de-b318-91089702cebe","Type":"ContainerDied","Data":"a59cf02efd1c45b96922e470245783bd548781a241844be2648d0a7efe074748"} Dec 11 09:22:02 crc kubenswrapper[4881]: I1211 09:22:02.497180 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4d9h9" event={"ID":"a30d0370-88ca-48de-b318-91089702cebe","Type":"ContainerStarted","Data":"37a5ac34a650d110bb82250c8ccc22e630412029a3d4b13e76a7c8d8d2b3b44a"} Dec 11 09:22:02 crc kubenswrapper[4881]: I1211 09:22:02.511252 4881 generic.go:334] "Generic (PLEG): container finished" podID="06b368e6-9e7a-4398-9f8f-d7c14bb29e10" containerID="e4ff090280aa3a23fc13d28d258cb06357ecffa88181a1f2c0d7194e3fa74822" exitCode=0 Dec 11 09:22:02 crc kubenswrapper[4881]: I1211 09:22:02.511321 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-b926c" event={"ID":"06b368e6-9e7a-4398-9f8f-d7c14bb29e10","Type":"ContainerDied","Data":"e4ff090280aa3a23fc13d28d258cb06357ecffa88181a1f2c0d7194e3fa74822"} Dec 11 09:22:03 crc kubenswrapper[4881]: I1211 09:22:03.523842 4881 generic.go:334] "Generic (PLEG): container finished" podID="a30d0370-88ca-48de-b318-91089702cebe" containerID="37a5ac34a650d110bb82250c8ccc22e630412029a3d4b13e76a7c8d8d2b3b44a" exitCode=0 Dec 11 09:22:03 crc kubenswrapper[4881]: I1211 09:22:03.523955 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4d9h9" event={"ID":"a30d0370-88ca-48de-b318-91089702cebe","Type":"ContainerDied","Data":"37a5ac34a650d110bb82250c8ccc22e630412029a3d4b13e76a7c8d8d2b3b44a"} Dec 11 09:22:04 crc kubenswrapper[4881]: I1211 09:22:04.005949 4881 scope.go:117] "RemoveContainer" containerID="7b429e560ca82d59affd89848e506735dc8a3305106d3b6a969e4360338b2dbc" Dec 11 09:22:04 crc kubenswrapper[4881]: E1211 09:22:04.006564 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 09:22:04 crc kubenswrapper[4881]: I1211 09:22:04.541171 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-b926c" event={"ID":"06b368e6-9e7a-4398-9f8f-d7c14bb29e10","Type":"ContainerStarted","Data":"37e8b359c251f1417be07aadce2062287e1d0225a892bf5809428b0cd9cbe386"} Dec 11 09:22:04 crc kubenswrapper[4881]: I1211 09:22:04.570930 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-b926c" podStartSLOduration=3.736233264 podStartE2EDuration="11.570885197s" podCreationTimestamp="2025-12-11 09:21:53 +0000 UTC" firstStartedPulling="2025-12-11 09:21:55.354805654 +0000 UTC m=+3963.732174341" lastFinishedPulling="2025-12-11 09:22:03.189457577 +0000 UTC m=+3971.566826274" observedRunningTime="2025-12-11 09:22:04.561112309 +0000 UTC m=+3972.938481036" watchObservedRunningTime="2025-12-11 09:22:04.570885197 +0000 UTC m=+3972.948253894" Dec 11 09:22:06 crc kubenswrapper[4881]: I1211 09:22:06.567669 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4d9h9" event={"ID":"a30d0370-88ca-48de-b318-91089702cebe","Type":"ContainerStarted","Data":"45a3629f5be4d2444e1a24a15771dcb37f4049dd3c3128a3455a67cefb2740cc"} Dec 11 09:22:06 crc kubenswrapper[4881]: I1211 09:22:06.594842 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-4d9h9" podStartSLOduration=6.005046083 podStartE2EDuration="10.594817963s" podCreationTimestamp="2025-12-11 09:21:56 +0000 UTC" firstStartedPulling="2025-12-11 09:22:00.437416102 +0000 UTC m=+3968.814784789" lastFinishedPulling="2025-12-11 09:22:05.027187982 +0000 UTC m=+3973.404556669" observedRunningTime="2025-12-11 09:22:06.586781188 +0000 UTC m=+3974.964149885" watchObservedRunningTime="2025-12-11 09:22:06.594817963 +0000 UTC m=+3974.972186660" Dec 11 09:22:07 crc kubenswrapper[4881]: I1211 09:22:07.109103 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-4d9h9" Dec 11 09:22:07 crc kubenswrapper[4881]: I1211 09:22:07.109671 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-4d9h9" Dec 11 09:22:08 crc kubenswrapper[4881]: I1211 09:22:08.229710 4881 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-4d9h9" podUID="a30d0370-88ca-48de-b318-91089702cebe" containerName="registry-server" probeResult="failure" output=< Dec 11 09:22:08 crc kubenswrapper[4881]: timeout: failed to connect service ":50051" within 1s Dec 11 09:22:08 crc kubenswrapper[4881]: > Dec 11 09:22:13 crc kubenswrapper[4881]: I1211 09:22:13.718610 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-b926c" Dec 11 09:22:13 crc kubenswrapper[4881]: I1211 09:22:13.720012 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-b926c" Dec 11 09:22:14 crc kubenswrapper[4881]: I1211 09:22:14.767771 4881 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-b926c" podUID="06b368e6-9e7a-4398-9f8f-d7c14bb29e10" containerName="registry-server" probeResult="failure" output=< Dec 11 09:22:14 crc kubenswrapper[4881]: timeout: failed to connect service ":50051" within 1s Dec 11 09:22:14 crc kubenswrapper[4881]: > Dec 11 09:22:16 crc kubenswrapper[4881]: I1211 09:22:16.005368 4881 scope.go:117] "RemoveContainer" containerID="7b429e560ca82d59affd89848e506735dc8a3305106d3b6a969e4360338b2dbc" Dec 11 09:22:16 crc kubenswrapper[4881]: E1211 09:22:16.005935 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 09:22:17 crc kubenswrapper[4881]: I1211 09:22:17.167529 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-4d9h9" Dec 11 09:22:17 crc kubenswrapper[4881]: I1211 09:22:17.215551 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-4d9h9" Dec 11 09:22:20 crc kubenswrapper[4881]: I1211 09:22:20.768666 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-4d9h9"] Dec 11 09:22:20 crc kubenswrapper[4881]: I1211 09:22:20.769412 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-4d9h9" podUID="a30d0370-88ca-48de-b318-91089702cebe" containerName="registry-server" containerID="cri-o://45a3629f5be4d2444e1a24a15771dcb37f4049dd3c3128a3455a67cefb2740cc" gracePeriod=2 Dec 11 09:22:21 crc kubenswrapper[4881]: I1211 09:22:21.468363 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-4d9h9" Dec 11 09:22:21 crc kubenswrapper[4881]: I1211 09:22:21.667667 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a30d0370-88ca-48de-b318-91089702cebe-utilities\") pod \"a30d0370-88ca-48de-b318-91089702cebe\" (UID: \"a30d0370-88ca-48de-b318-91089702cebe\") " Dec 11 09:22:21 crc kubenswrapper[4881]: I1211 09:22:21.667753 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x9ww7\" (UniqueName: \"kubernetes.io/projected/a30d0370-88ca-48de-b318-91089702cebe-kube-api-access-x9ww7\") pod \"a30d0370-88ca-48de-b318-91089702cebe\" (UID: \"a30d0370-88ca-48de-b318-91089702cebe\") " Dec 11 09:22:21 crc kubenswrapper[4881]: I1211 09:22:21.667860 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a30d0370-88ca-48de-b318-91089702cebe-catalog-content\") pod \"a30d0370-88ca-48de-b318-91089702cebe\" (UID: \"a30d0370-88ca-48de-b318-91089702cebe\") " Dec 11 09:22:21 crc kubenswrapper[4881]: I1211 09:22:21.671577 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a30d0370-88ca-48de-b318-91089702cebe-utilities" (OuterVolumeSpecName: "utilities") pod "a30d0370-88ca-48de-b318-91089702cebe" (UID: "a30d0370-88ca-48de-b318-91089702cebe"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 09:22:21 crc kubenswrapper[4881]: I1211 09:22:21.711037 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a30d0370-88ca-48de-b318-91089702cebe-kube-api-access-x9ww7" (OuterVolumeSpecName: "kube-api-access-x9ww7") pod "a30d0370-88ca-48de-b318-91089702cebe" (UID: "a30d0370-88ca-48de-b318-91089702cebe"). InnerVolumeSpecName "kube-api-access-x9ww7". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 09:22:21 crc kubenswrapper[4881]: I1211 09:22:21.759170 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a30d0370-88ca-48de-b318-91089702cebe-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "a30d0370-88ca-48de-b318-91089702cebe" (UID: "a30d0370-88ca-48de-b318-91089702cebe"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 09:22:21 crc kubenswrapper[4881]: I1211 09:22:21.771409 4881 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a30d0370-88ca-48de-b318-91089702cebe-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 11 09:22:21 crc kubenswrapper[4881]: I1211 09:22:21.771449 4881 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a30d0370-88ca-48de-b318-91089702cebe-utilities\") on node \"crc\" DevicePath \"\"" Dec 11 09:22:21 crc kubenswrapper[4881]: I1211 09:22:21.771485 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x9ww7\" (UniqueName: \"kubernetes.io/projected/a30d0370-88ca-48de-b318-91089702cebe-kube-api-access-x9ww7\") on node \"crc\" DevicePath \"\"" Dec 11 09:22:21 crc kubenswrapper[4881]: I1211 09:22:21.855758 4881 generic.go:334] "Generic (PLEG): container finished" podID="a30d0370-88ca-48de-b318-91089702cebe" containerID="45a3629f5be4d2444e1a24a15771dcb37f4049dd3c3128a3455a67cefb2740cc" exitCode=0 Dec 11 09:22:21 crc kubenswrapper[4881]: I1211 09:22:21.855812 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4d9h9" event={"ID":"a30d0370-88ca-48de-b318-91089702cebe","Type":"ContainerDied","Data":"45a3629f5be4d2444e1a24a15771dcb37f4049dd3c3128a3455a67cefb2740cc"} Dec 11 09:22:21 crc kubenswrapper[4881]: I1211 09:22:21.855852 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4d9h9" event={"ID":"a30d0370-88ca-48de-b318-91089702cebe","Type":"ContainerDied","Data":"eb214d425595539b12df6bc880ef223ae448a60f207608a9abe2f1e5cef17720"} Dec 11 09:22:21 crc kubenswrapper[4881]: I1211 09:22:21.855859 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-4d9h9" Dec 11 09:22:21 crc kubenswrapper[4881]: I1211 09:22:21.855877 4881 scope.go:117] "RemoveContainer" containerID="45a3629f5be4d2444e1a24a15771dcb37f4049dd3c3128a3455a67cefb2740cc" Dec 11 09:22:21 crc kubenswrapper[4881]: I1211 09:22:21.888024 4881 scope.go:117] "RemoveContainer" containerID="37a5ac34a650d110bb82250c8ccc22e630412029a3d4b13e76a7c8d8d2b3b44a" Dec 11 09:22:21 crc kubenswrapper[4881]: I1211 09:22:21.895862 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-4d9h9"] Dec 11 09:22:21 crc kubenswrapper[4881]: I1211 09:22:21.907316 4881 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-4d9h9"] Dec 11 09:22:21 crc kubenswrapper[4881]: I1211 09:22:21.913958 4881 scope.go:117] "RemoveContainer" containerID="a59cf02efd1c45b96922e470245783bd548781a241844be2648d0a7efe074748" Dec 11 09:22:21 crc kubenswrapper[4881]: I1211 09:22:21.979401 4881 scope.go:117] "RemoveContainer" containerID="45a3629f5be4d2444e1a24a15771dcb37f4049dd3c3128a3455a67cefb2740cc" Dec 11 09:22:21 crc kubenswrapper[4881]: E1211 09:22:21.979894 4881 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"45a3629f5be4d2444e1a24a15771dcb37f4049dd3c3128a3455a67cefb2740cc\": container with ID starting with 45a3629f5be4d2444e1a24a15771dcb37f4049dd3c3128a3455a67cefb2740cc not found: ID does not exist" containerID="45a3629f5be4d2444e1a24a15771dcb37f4049dd3c3128a3455a67cefb2740cc" Dec 11 09:22:21 crc kubenswrapper[4881]: I1211 09:22:21.979948 4881 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"45a3629f5be4d2444e1a24a15771dcb37f4049dd3c3128a3455a67cefb2740cc"} err="failed to get container status \"45a3629f5be4d2444e1a24a15771dcb37f4049dd3c3128a3455a67cefb2740cc\": rpc error: code = NotFound desc = could not find container \"45a3629f5be4d2444e1a24a15771dcb37f4049dd3c3128a3455a67cefb2740cc\": container with ID starting with 45a3629f5be4d2444e1a24a15771dcb37f4049dd3c3128a3455a67cefb2740cc not found: ID does not exist" Dec 11 09:22:21 crc kubenswrapper[4881]: I1211 09:22:21.979982 4881 scope.go:117] "RemoveContainer" containerID="37a5ac34a650d110bb82250c8ccc22e630412029a3d4b13e76a7c8d8d2b3b44a" Dec 11 09:22:21 crc kubenswrapper[4881]: E1211 09:22:21.980953 4881 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"37a5ac34a650d110bb82250c8ccc22e630412029a3d4b13e76a7c8d8d2b3b44a\": container with ID starting with 37a5ac34a650d110bb82250c8ccc22e630412029a3d4b13e76a7c8d8d2b3b44a not found: ID does not exist" containerID="37a5ac34a650d110bb82250c8ccc22e630412029a3d4b13e76a7c8d8d2b3b44a" Dec 11 09:22:21 crc kubenswrapper[4881]: I1211 09:22:21.980984 4881 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"37a5ac34a650d110bb82250c8ccc22e630412029a3d4b13e76a7c8d8d2b3b44a"} err="failed to get container status \"37a5ac34a650d110bb82250c8ccc22e630412029a3d4b13e76a7c8d8d2b3b44a\": rpc error: code = NotFound desc = could not find container \"37a5ac34a650d110bb82250c8ccc22e630412029a3d4b13e76a7c8d8d2b3b44a\": container with ID starting with 37a5ac34a650d110bb82250c8ccc22e630412029a3d4b13e76a7c8d8d2b3b44a not found: ID does not exist" Dec 11 09:22:21 crc kubenswrapper[4881]: I1211 09:22:21.981002 4881 scope.go:117] "RemoveContainer" containerID="a59cf02efd1c45b96922e470245783bd548781a241844be2648d0a7efe074748" Dec 11 09:22:21 crc kubenswrapper[4881]: E1211 09:22:21.981261 4881 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a59cf02efd1c45b96922e470245783bd548781a241844be2648d0a7efe074748\": container with ID starting with a59cf02efd1c45b96922e470245783bd548781a241844be2648d0a7efe074748 not found: ID does not exist" containerID="a59cf02efd1c45b96922e470245783bd548781a241844be2648d0a7efe074748" Dec 11 09:22:21 crc kubenswrapper[4881]: I1211 09:22:21.981281 4881 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a59cf02efd1c45b96922e470245783bd548781a241844be2648d0a7efe074748"} err="failed to get container status \"a59cf02efd1c45b96922e470245783bd548781a241844be2648d0a7efe074748\": rpc error: code = NotFound desc = could not find container \"a59cf02efd1c45b96922e470245783bd548781a241844be2648d0a7efe074748\": container with ID starting with a59cf02efd1c45b96922e470245783bd548781a241844be2648d0a7efe074748 not found: ID does not exist" Dec 11 09:22:23 crc kubenswrapper[4881]: I1211 09:22:23.021092 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a30d0370-88ca-48de-b318-91089702cebe" path="/var/lib/kubelet/pods/a30d0370-88ca-48de-b318-91089702cebe/volumes" Dec 11 09:22:23 crc kubenswrapper[4881]: I1211 09:22:23.785946 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-b926c" Dec 11 09:22:23 crc kubenswrapper[4881]: I1211 09:22:23.844305 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-b926c" Dec 11 09:22:26 crc kubenswrapper[4881]: I1211 09:22:26.367623 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-b926c"] Dec 11 09:22:26 crc kubenswrapper[4881]: I1211 09:22:26.368458 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-b926c" podUID="06b368e6-9e7a-4398-9f8f-d7c14bb29e10" containerName="registry-server" containerID="cri-o://37e8b359c251f1417be07aadce2062287e1d0225a892bf5809428b0cd9cbe386" gracePeriod=2 Dec 11 09:22:26 crc kubenswrapper[4881]: I1211 09:22:26.946899 4881 generic.go:334] "Generic (PLEG): container finished" podID="06b368e6-9e7a-4398-9f8f-d7c14bb29e10" containerID="37e8b359c251f1417be07aadce2062287e1d0225a892bf5809428b0cd9cbe386" exitCode=0 Dec 11 09:22:26 crc kubenswrapper[4881]: I1211 09:22:26.947239 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-b926c" event={"ID":"06b368e6-9e7a-4398-9f8f-d7c14bb29e10","Type":"ContainerDied","Data":"37e8b359c251f1417be07aadce2062287e1d0225a892bf5809428b0cd9cbe386"} Dec 11 09:22:27 crc kubenswrapper[4881]: I1211 09:22:27.517834 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-b926c" Dec 11 09:22:27 crc kubenswrapper[4881]: I1211 09:22:27.669071 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/06b368e6-9e7a-4398-9f8f-d7c14bb29e10-catalog-content\") pod \"06b368e6-9e7a-4398-9f8f-d7c14bb29e10\" (UID: \"06b368e6-9e7a-4398-9f8f-d7c14bb29e10\") " Dec 11 09:22:27 crc kubenswrapper[4881]: I1211 09:22:27.669274 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5ksnh\" (UniqueName: \"kubernetes.io/projected/06b368e6-9e7a-4398-9f8f-d7c14bb29e10-kube-api-access-5ksnh\") pod \"06b368e6-9e7a-4398-9f8f-d7c14bb29e10\" (UID: \"06b368e6-9e7a-4398-9f8f-d7c14bb29e10\") " Dec 11 09:22:27 crc kubenswrapper[4881]: I1211 09:22:27.669360 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/06b368e6-9e7a-4398-9f8f-d7c14bb29e10-utilities\") pod \"06b368e6-9e7a-4398-9f8f-d7c14bb29e10\" (UID: \"06b368e6-9e7a-4398-9f8f-d7c14bb29e10\") " Dec 11 09:22:27 crc kubenswrapper[4881]: I1211 09:22:27.670748 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/06b368e6-9e7a-4398-9f8f-d7c14bb29e10-utilities" (OuterVolumeSpecName: "utilities") pod "06b368e6-9e7a-4398-9f8f-d7c14bb29e10" (UID: "06b368e6-9e7a-4398-9f8f-d7c14bb29e10"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 09:22:27 crc kubenswrapper[4881]: I1211 09:22:27.671084 4881 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/06b368e6-9e7a-4398-9f8f-d7c14bb29e10-utilities\") on node \"crc\" DevicePath \"\"" Dec 11 09:22:27 crc kubenswrapper[4881]: I1211 09:22:27.675354 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/06b368e6-9e7a-4398-9f8f-d7c14bb29e10-kube-api-access-5ksnh" (OuterVolumeSpecName: "kube-api-access-5ksnh") pod "06b368e6-9e7a-4398-9f8f-d7c14bb29e10" (UID: "06b368e6-9e7a-4398-9f8f-d7c14bb29e10"). InnerVolumeSpecName "kube-api-access-5ksnh". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 09:22:27 crc kubenswrapper[4881]: I1211 09:22:27.772646 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5ksnh\" (UniqueName: \"kubernetes.io/projected/06b368e6-9e7a-4398-9f8f-d7c14bb29e10-kube-api-access-5ksnh\") on node \"crc\" DevicePath \"\"" Dec 11 09:22:27 crc kubenswrapper[4881]: I1211 09:22:27.799771 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/06b368e6-9e7a-4398-9f8f-d7c14bb29e10-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "06b368e6-9e7a-4398-9f8f-d7c14bb29e10" (UID: "06b368e6-9e7a-4398-9f8f-d7c14bb29e10"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 09:22:27 crc kubenswrapper[4881]: I1211 09:22:27.875518 4881 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/06b368e6-9e7a-4398-9f8f-d7c14bb29e10-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 11 09:22:27 crc kubenswrapper[4881]: I1211 09:22:27.959983 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-b926c" event={"ID":"06b368e6-9e7a-4398-9f8f-d7c14bb29e10","Type":"ContainerDied","Data":"82aa9e99ea4bf024d92cd318a3f76997c802a04156093582ef2a3ab709098710"} Dec 11 09:22:27 crc kubenswrapper[4881]: I1211 09:22:27.960056 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-b926c" Dec 11 09:22:27 crc kubenswrapper[4881]: I1211 09:22:27.960067 4881 scope.go:117] "RemoveContainer" containerID="37e8b359c251f1417be07aadce2062287e1d0225a892bf5809428b0cd9cbe386" Dec 11 09:22:27 crc kubenswrapper[4881]: I1211 09:22:27.992002 4881 scope.go:117] "RemoveContainer" containerID="e4ff090280aa3a23fc13d28d258cb06357ecffa88181a1f2c0d7194e3fa74822" Dec 11 09:22:28 crc kubenswrapper[4881]: I1211 09:22:28.012867 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-b926c"] Dec 11 09:22:28 crc kubenswrapper[4881]: I1211 09:22:28.025870 4881 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-b926c"] Dec 11 09:22:28 crc kubenswrapper[4881]: I1211 09:22:28.035540 4881 scope.go:117] "RemoveContainer" containerID="1c118a343d6b0b2fffbe69a0c38e0d658d2444724ec6c85569d3b42f2e63c858" Dec 11 09:22:29 crc kubenswrapper[4881]: I1211 09:22:29.021709 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="06b368e6-9e7a-4398-9f8f-d7c14bb29e10" path="/var/lib/kubelet/pods/06b368e6-9e7a-4398-9f8f-d7c14bb29e10/volumes" Dec 11 09:22:31 crc kubenswrapper[4881]: I1211 09:22:31.005803 4881 scope.go:117] "RemoveContainer" containerID="7b429e560ca82d59affd89848e506735dc8a3305106d3b6a969e4360338b2dbc" Dec 11 09:22:31 crc kubenswrapper[4881]: E1211 09:22:31.006416 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 09:22:46 crc kubenswrapper[4881]: I1211 09:22:46.005981 4881 scope.go:117] "RemoveContainer" containerID="7b429e560ca82d59affd89848e506735dc8a3305106d3b6a969e4360338b2dbc" Dec 11 09:22:46 crc kubenswrapper[4881]: E1211 09:22:46.007392 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 09:22:58 crc kubenswrapper[4881]: I1211 09:22:58.005732 4881 scope.go:117] "RemoveContainer" containerID="7b429e560ca82d59affd89848e506735dc8a3305106d3b6a969e4360338b2dbc" Dec 11 09:22:58 crc kubenswrapper[4881]: E1211 09:22:58.006911 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 09:23:11 crc kubenswrapper[4881]: I1211 09:23:11.006911 4881 scope.go:117] "RemoveContainer" containerID="7b429e560ca82d59affd89848e506735dc8a3305106d3b6a969e4360338b2dbc" Dec 11 09:23:11 crc kubenswrapper[4881]: E1211 09:23:11.009291 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 09:23:25 crc kubenswrapper[4881]: I1211 09:23:25.006575 4881 scope.go:117] "RemoveContainer" containerID="7b429e560ca82d59affd89848e506735dc8a3305106d3b6a969e4360338b2dbc" Dec 11 09:23:25 crc kubenswrapper[4881]: E1211 09:23:25.008011 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 09:23:38 crc kubenswrapper[4881]: I1211 09:23:38.006163 4881 scope.go:117] "RemoveContainer" containerID="7b429e560ca82d59affd89848e506735dc8a3305106d3b6a969e4360338b2dbc" Dec 11 09:23:38 crc kubenswrapper[4881]: I1211 09:23:38.858038 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" event={"ID":"56d69133-af36-4cbd-af7d-3a58cc4dd8ca","Type":"ContainerStarted","Data":"3c919a09176628ef675a5ea64db5491ef66443c447c2effe71c706a3382e44e2"} Dec 11 09:24:01 crc kubenswrapper[4881]: I1211 09:24:01.985504 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-qq5dg"] Dec 11 09:24:01 crc kubenswrapper[4881]: E1211 09:24:01.986513 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="06b368e6-9e7a-4398-9f8f-d7c14bb29e10" containerName="extract-content" Dec 11 09:24:01 crc kubenswrapper[4881]: I1211 09:24:01.986530 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="06b368e6-9e7a-4398-9f8f-d7c14bb29e10" containerName="extract-content" Dec 11 09:24:01 crc kubenswrapper[4881]: E1211 09:24:01.986562 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="06b368e6-9e7a-4398-9f8f-d7c14bb29e10" containerName="extract-utilities" Dec 11 09:24:01 crc kubenswrapper[4881]: I1211 09:24:01.986568 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="06b368e6-9e7a-4398-9f8f-d7c14bb29e10" containerName="extract-utilities" Dec 11 09:24:01 crc kubenswrapper[4881]: E1211 09:24:01.986589 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a30d0370-88ca-48de-b318-91089702cebe" containerName="extract-utilities" Dec 11 09:24:01 crc kubenswrapper[4881]: I1211 09:24:01.986595 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="a30d0370-88ca-48de-b318-91089702cebe" containerName="extract-utilities" Dec 11 09:24:01 crc kubenswrapper[4881]: E1211 09:24:01.986606 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a30d0370-88ca-48de-b318-91089702cebe" containerName="extract-content" Dec 11 09:24:01 crc kubenswrapper[4881]: I1211 09:24:01.986612 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="a30d0370-88ca-48de-b318-91089702cebe" containerName="extract-content" Dec 11 09:24:01 crc kubenswrapper[4881]: E1211 09:24:01.986631 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="06b368e6-9e7a-4398-9f8f-d7c14bb29e10" containerName="registry-server" Dec 11 09:24:01 crc kubenswrapper[4881]: I1211 09:24:01.986636 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="06b368e6-9e7a-4398-9f8f-d7c14bb29e10" containerName="registry-server" Dec 11 09:24:01 crc kubenswrapper[4881]: E1211 09:24:01.986647 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a30d0370-88ca-48de-b318-91089702cebe" containerName="registry-server" Dec 11 09:24:01 crc kubenswrapper[4881]: I1211 09:24:01.986652 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="a30d0370-88ca-48de-b318-91089702cebe" containerName="registry-server" Dec 11 09:24:01 crc kubenswrapper[4881]: I1211 09:24:01.986888 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="06b368e6-9e7a-4398-9f8f-d7c14bb29e10" containerName="registry-server" Dec 11 09:24:01 crc kubenswrapper[4881]: I1211 09:24:01.986925 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="a30d0370-88ca-48de-b318-91089702cebe" containerName="registry-server" Dec 11 09:24:01 crc kubenswrapper[4881]: I1211 09:24:01.988731 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-qq5dg" Dec 11 09:24:02 crc kubenswrapper[4881]: I1211 09:24:02.002022 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-qq5dg"] Dec 11 09:24:02 crc kubenswrapper[4881]: I1211 09:24:02.145743 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-swz46\" (UniqueName: \"kubernetes.io/projected/223793f9-5fcc-4495-b1c4-537248807693-kube-api-access-swz46\") pod \"redhat-marketplace-qq5dg\" (UID: \"223793f9-5fcc-4495-b1c4-537248807693\") " pod="openshift-marketplace/redhat-marketplace-qq5dg" Dec 11 09:24:02 crc kubenswrapper[4881]: I1211 09:24:02.145899 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/223793f9-5fcc-4495-b1c4-537248807693-catalog-content\") pod \"redhat-marketplace-qq5dg\" (UID: \"223793f9-5fcc-4495-b1c4-537248807693\") " pod="openshift-marketplace/redhat-marketplace-qq5dg" Dec 11 09:24:02 crc kubenswrapper[4881]: I1211 09:24:02.146471 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/223793f9-5fcc-4495-b1c4-537248807693-utilities\") pod \"redhat-marketplace-qq5dg\" (UID: \"223793f9-5fcc-4495-b1c4-537248807693\") " pod="openshift-marketplace/redhat-marketplace-qq5dg" Dec 11 09:24:02 crc kubenswrapper[4881]: I1211 09:24:02.259188 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-swz46\" (UniqueName: \"kubernetes.io/projected/223793f9-5fcc-4495-b1c4-537248807693-kube-api-access-swz46\") pod \"redhat-marketplace-qq5dg\" (UID: \"223793f9-5fcc-4495-b1c4-537248807693\") " pod="openshift-marketplace/redhat-marketplace-qq5dg" Dec 11 09:24:02 crc kubenswrapper[4881]: I1211 09:24:02.259280 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/223793f9-5fcc-4495-b1c4-537248807693-catalog-content\") pod \"redhat-marketplace-qq5dg\" (UID: \"223793f9-5fcc-4495-b1c4-537248807693\") " pod="openshift-marketplace/redhat-marketplace-qq5dg" Dec 11 09:24:02 crc kubenswrapper[4881]: I1211 09:24:02.259478 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/223793f9-5fcc-4495-b1c4-537248807693-utilities\") pod \"redhat-marketplace-qq5dg\" (UID: \"223793f9-5fcc-4495-b1c4-537248807693\") " pod="openshift-marketplace/redhat-marketplace-qq5dg" Dec 11 09:24:02 crc kubenswrapper[4881]: I1211 09:24:02.260031 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/223793f9-5fcc-4495-b1c4-537248807693-utilities\") pod \"redhat-marketplace-qq5dg\" (UID: \"223793f9-5fcc-4495-b1c4-537248807693\") " pod="openshift-marketplace/redhat-marketplace-qq5dg" Dec 11 09:24:02 crc kubenswrapper[4881]: I1211 09:24:02.260821 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/223793f9-5fcc-4495-b1c4-537248807693-catalog-content\") pod \"redhat-marketplace-qq5dg\" (UID: \"223793f9-5fcc-4495-b1c4-537248807693\") " pod="openshift-marketplace/redhat-marketplace-qq5dg" Dec 11 09:24:02 crc kubenswrapper[4881]: I1211 09:24:02.287974 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-swz46\" (UniqueName: \"kubernetes.io/projected/223793f9-5fcc-4495-b1c4-537248807693-kube-api-access-swz46\") pod \"redhat-marketplace-qq5dg\" (UID: \"223793f9-5fcc-4495-b1c4-537248807693\") " pod="openshift-marketplace/redhat-marketplace-qq5dg" Dec 11 09:24:02 crc kubenswrapper[4881]: I1211 09:24:02.313035 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-qq5dg" Dec 11 09:24:02 crc kubenswrapper[4881]: I1211 09:24:02.855146 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-qq5dg"] Dec 11 09:24:03 crc kubenswrapper[4881]: I1211 09:24:03.208091 4881 generic.go:334] "Generic (PLEG): container finished" podID="223793f9-5fcc-4495-b1c4-537248807693" containerID="e9bf3fb0a875b2bcee99295c3d5c5f8639d32038d6c6b0f2e2925d6194536317" exitCode=0 Dec 11 09:24:03 crc kubenswrapper[4881]: I1211 09:24:03.208163 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qq5dg" event={"ID":"223793f9-5fcc-4495-b1c4-537248807693","Type":"ContainerDied","Data":"e9bf3fb0a875b2bcee99295c3d5c5f8639d32038d6c6b0f2e2925d6194536317"} Dec 11 09:24:03 crc kubenswrapper[4881]: I1211 09:24:03.208199 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qq5dg" event={"ID":"223793f9-5fcc-4495-b1c4-537248807693","Type":"ContainerStarted","Data":"2435d2fc12aa110302c900bd06e087b0b82c73baa0461f83ed59d1092f0db216"} Dec 11 09:24:05 crc kubenswrapper[4881]: I1211 09:24:05.232663 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qq5dg" event={"ID":"223793f9-5fcc-4495-b1c4-537248807693","Type":"ContainerStarted","Data":"ceda788f9067b42511262da3da4642eadec5d578b721cc275389be88aada5351"} Dec 11 09:24:06 crc kubenswrapper[4881]: I1211 09:24:06.252801 4881 generic.go:334] "Generic (PLEG): container finished" podID="223793f9-5fcc-4495-b1c4-537248807693" containerID="ceda788f9067b42511262da3da4642eadec5d578b721cc275389be88aada5351" exitCode=0 Dec 11 09:24:06 crc kubenswrapper[4881]: I1211 09:24:06.252911 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qq5dg" event={"ID":"223793f9-5fcc-4495-b1c4-537248807693","Type":"ContainerDied","Data":"ceda788f9067b42511262da3da4642eadec5d578b721cc275389be88aada5351"} Dec 11 09:24:07 crc kubenswrapper[4881]: I1211 09:24:07.279043 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qq5dg" event={"ID":"223793f9-5fcc-4495-b1c4-537248807693","Type":"ContainerStarted","Data":"fd5f9eac0f2f020b0fc555f69a65c2e11be78da9f564f36f055b0ad9644ce0a6"} Dec 11 09:24:07 crc kubenswrapper[4881]: I1211 09:24:07.322393 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-qq5dg" podStartSLOduration=2.678882582 podStartE2EDuration="6.322370341s" podCreationTimestamp="2025-12-11 09:24:01 +0000 UTC" firstStartedPulling="2025-12-11 09:24:03.211129301 +0000 UTC m=+4091.588498008" lastFinishedPulling="2025-12-11 09:24:06.85461705 +0000 UTC m=+4095.231985767" observedRunningTime="2025-12-11 09:24:07.311229975 +0000 UTC m=+4095.688598672" watchObservedRunningTime="2025-12-11 09:24:07.322370341 +0000 UTC m=+4095.699739038" Dec 11 09:24:12 crc kubenswrapper[4881]: I1211 09:24:12.313550 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-qq5dg" Dec 11 09:24:12 crc kubenswrapper[4881]: I1211 09:24:12.314112 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-qq5dg" Dec 11 09:24:12 crc kubenswrapper[4881]: I1211 09:24:12.365190 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-qq5dg" Dec 11 09:24:12 crc kubenswrapper[4881]: I1211 09:24:12.414706 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-qq5dg" Dec 11 09:24:12 crc kubenswrapper[4881]: I1211 09:24:12.610144 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-qq5dg"] Dec 11 09:24:14 crc kubenswrapper[4881]: I1211 09:24:14.361676 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-qq5dg" podUID="223793f9-5fcc-4495-b1c4-537248807693" containerName="registry-server" containerID="cri-o://fd5f9eac0f2f020b0fc555f69a65c2e11be78da9f564f36f055b0ad9644ce0a6" gracePeriod=2 Dec 11 09:24:15 crc kubenswrapper[4881]: I1211 09:24:15.020022 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-qq5dg" Dec 11 09:24:15 crc kubenswrapper[4881]: I1211 09:24:15.173747 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/223793f9-5fcc-4495-b1c4-537248807693-catalog-content\") pod \"223793f9-5fcc-4495-b1c4-537248807693\" (UID: \"223793f9-5fcc-4495-b1c4-537248807693\") " Dec 11 09:24:15 crc kubenswrapper[4881]: I1211 09:24:15.174058 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-swz46\" (UniqueName: \"kubernetes.io/projected/223793f9-5fcc-4495-b1c4-537248807693-kube-api-access-swz46\") pod \"223793f9-5fcc-4495-b1c4-537248807693\" (UID: \"223793f9-5fcc-4495-b1c4-537248807693\") " Dec 11 09:24:15 crc kubenswrapper[4881]: I1211 09:24:15.174291 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/223793f9-5fcc-4495-b1c4-537248807693-utilities\") pod \"223793f9-5fcc-4495-b1c4-537248807693\" (UID: \"223793f9-5fcc-4495-b1c4-537248807693\") " Dec 11 09:24:15 crc kubenswrapper[4881]: I1211 09:24:15.175414 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/223793f9-5fcc-4495-b1c4-537248807693-utilities" (OuterVolumeSpecName: "utilities") pod "223793f9-5fcc-4495-b1c4-537248807693" (UID: "223793f9-5fcc-4495-b1c4-537248807693"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 09:24:15 crc kubenswrapper[4881]: I1211 09:24:15.175630 4881 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/223793f9-5fcc-4495-b1c4-537248807693-utilities\") on node \"crc\" DevicePath \"\"" Dec 11 09:24:15 crc kubenswrapper[4881]: I1211 09:24:15.182295 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/223793f9-5fcc-4495-b1c4-537248807693-kube-api-access-swz46" (OuterVolumeSpecName: "kube-api-access-swz46") pod "223793f9-5fcc-4495-b1c4-537248807693" (UID: "223793f9-5fcc-4495-b1c4-537248807693"). InnerVolumeSpecName "kube-api-access-swz46". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 09:24:15 crc kubenswrapper[4881]: I1211 09:24:15.199939 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/223793f9-5fcc-4495-b1c4-537248807693-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "223793f9-5fcc-4495-b1c4-537248807693" (UID: "223793f9-5fcc-4495-b1c4-537248807693"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 09:24:15 crc kubenswrapper[4881]: I1211 09:24:15.277841 4881 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/223793f9-5fcc-4495-b1c4-537248807693-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 11 09:24:15 crc kubenswrapper[4881]: I1211 09:24:15.277931 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-swz46\" (UniqueName: \"kubernetes.io/projected/223793f9-5fcc-4495-b1c4-537248807693-kube-api-access-swz46\") on node \"crc\" DevicePath \"\"" Dec 11 09:24:15 crc kubenswrapper[4881]: I1211 09:24:15.374642 4881 generic.go:334] "Generic (PLEG): container finished" podID="223793f9-5fcc-4495-b1c4-537248807693" containerID="fd5f9eac0f2f020b0fc555f69a65c2e11be78da9f564f36f055b0ad9644ce0a6" exitCode=0 Dec 11 09:24:15 crc kubenswrapper[4881]: I1211 09:24:15.374771 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-qq5dg" Dec 11 09:24:15 crc kubenswrapper[4881]: I1211 09:24:15.374801 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qq5dg" event={"ID":"223793f9-5fcc-4495-b1c4-537248807693","Type":"ContainerDied","Data":"fd5f9eac0f2f020b0fc555f69a65c2e11be78da9f564f36f055b0ad9644ce0a6"} Dec 11 09:24:15 crc kubenswrapper[4881]: I1211 09:24:15.375767 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qq5dg" event={"ID":"223793f9-5fcc-4495-b1c4-537248807693","Type":"ContainerDied","Data":"2435d2fc12aa110302c900bd06e087b0b82c73baa0461f83ed59d1092f0db216"} Dec 11 09:24:15 crc kubenswrapper[4881]: I1211 09:24:15.375807 4881 scope.go:117] "RemoveContainer" containerID="fd5f9eac0f2f020b0fc555f69a65c2e11be78da9f564f36f055b0ad9644ce0a6" Dec 11 09:24:15 crc kubenswrapper[4881]: I1211 09:24:15.410737 4881 scope.go:117] "RemoveContainer" containerID="ceda788f9067b42511262da3da4642eadec5d578b721cc275389be88aada5351" Dec 11 09:24:15 crc kubenswrapper[4881]: I1211 09:24:15.420546 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-qq5dg"] Dec 11 09:24:15 crc kubenswrapper[4881]: I1211 09:24:15.432217 4881 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-qq5dg"] Dec 11 09:24:15 crc kubenswrapper[4881]: I1211 09:24:15.454641 4881 scope.go:117] "RemoveContainer" containerID="e9bf3fb0a875b2bcee99295c3d5c5f8639d32038d6c6b0f2e2925d6194536317" Dec 11 09:24:15 crc kubenswrapper[4881]: I1211 09:24:15.521249 4881 scope.go:117] "RemoveContainer" containerID="fd5f9eac0f2f020b0fc555f69a65c2e11be78da9f564f36f055b0ad9644ce0a6" Dec 11 09:24:15 crc kubenswrapper[4881]: E1211 09:24:15.521800 4881 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fd5f9eac0f2f020b0fc555f69a65c2e11be78da9f564f36f055b0ad9644ce0a6\": container with ID starting with fd5f9eac0f2f020b0fc555f69a65c2e11be78da9f564f36f055b0ad9644ce0a6 not found: ID does not exist" containerID="fd5f9eac0f2f020b0fc555f69a65c2e11be78da9f564f36f055b0ad9644ce0a6" Dec 11 09:24:15 crc kubenswrapper[4881]: I1211 09:24:15.521862 4881 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fd5f9eac0f2f020b0fc555f69a65c2e11be78da9f564f36f055b0ad9644ce0a6"} err="failed to get container status \"fd5f9eac0f2f020b0fc555f69a65c2e11be78da9f564f36f055b0ad9644ce0a6\": rpc error: code = NotFound desc = could not find container \"fd5f9eac0f2f020b0fc555f69a65c2e11be78da9f564f36f055b0ad9644ce0a6\": container with ID starting with fd5f9eac0f2f020b0fc555f69a65c2e11be78da9f564f36f055b0ad9644ce0a6 not found: ID does not exist" Dec 11 09:24:15 crc kubenswrapper[4881]: I1211 09:24:15.521915 4881 scope.go:117] "RemoveContainer" containerID="ceda788f9067b42511262da3da4642eadec5d578b721cc275389be88aada5351" Dec 11 09:24:15 crc kubenswrapper[4881]: E1211 09:24:15.522147 4881 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ceda788f9067b42511262da3da4642eadec5d578b721cc275389be88aada5351\": container with ID starting with ceda788f9067b42511262da3da4642eadec5d578b721cc275389be88aada5351 not found: ID does not exist" containerID="ceda788f9067b42511262da3da4642eadec5d578b721cc275389be88aada5351" Dec 11 09:24:15 crc kubenswrapper[4881]: I1211 09:24:15.522178 4881 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ceda788f9067b42511262da3da4642eadec5d578b721cc275389be88aada5351"} err="failed to get container status \"ceda788f9067b42511262da3da4642eadec5d578b721cc275389be88aada5351\": rpc error: code = NotFound desc = could not find container \"ceda788f9067b42511262da3da4642eadec5d578b721cc275389be88aada5351\": container with ID starting with ceda788f9067b42511262da3da4642eadec5d578b721cc275389be88aada5351 not found: ID does not exist" Dec 11 09:24:15 crc kubenswrapper[4881]: I1211 09:24:15.522197 4881 scope.go:117] "RemoveContainer" containerID="e9bf3fb0a875b2bcee99295c3d5c5f8639d32038d6c6b0f2e2925d6194536317" Dec 11 09:24:15 crc kubenswrapper[4881]: E1211 09:24:15.522465 4881 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e9bf3fb0a875b2bcee99295c3d5c5f8639d32038d6c6b0f2e2925d6194536317\": container with ID starting with e9bf3fb0a875b2bcee99295c3d5c5f8639d32038d6c6b0f2e2925d6194536317 not found: ID does not exist" containerID="e9bf3fb0a875b2bcee99295c3d5c5f8639d32038d6c6b0f2e2925d6194536317" Dec 11 09:24:15 crc kubenswrapper[4881]: I1211 09:24:15.522506 4881 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e9bf3fb0a875b2bcee99295c3d5c5f8639d32038d6c6b0f2e2925d6194536317"} err="failed to get container status \"e9bf3fb0a875b2bcee99295c3d5c5f8639d32038d6c6b0f2e2925d6194536317\": rpc error: code = NotFound desc = could not find container \"e9bf3fb0a875b2bcee99295c3d5c5f8639d32038d6c6b0f2e2925d6194536317\": container with ID starting with e9bf3fb0a875b2bcee99295c3d5c5f8639d32038d6c6b0f2e2925d6194536317 not found: ID does not exist" Dec 11 09:24:17 crc kubenswrapper[4881]: I1211 09:24:17.023494 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="223793f9-5fcc-4495-b1c4-537248807693" path="/var/lib/kubelet/pods/223793f9-5fcc-4495-b1c4-537248807693/volumes" Dec 11 09:24:59 crc kubenswrapper[4881]: I1211 09:24:59.260311 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-hdm4r"] Dec 11 09:24:59 crc kubenswrapper[4881]: E1211 09:24:59.261304 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="223793f9-5fcc-4495-b1c4-537248807693" containerName="extract-content" Dec 11 09:24:59 crc kubenswrapper[4881]: I1211 09:24:59.261323 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="223793f9-5fcc-4495-b1c4-537248807693" containerName="extract-content" Dec 11 09:24:59 crc kubenswrapper[4881]: E1211 09:24:59.261372 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="223793f9-5fcc-4495-b1c4-537248807693" containerName="extract-utilities" Dec 11 09:24:59 crc kubenswrapper[4881]: I1211 09:24:59.261378 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="223793f9-5fcc-4495-b1c4-537248807693" containerName="extract-utilities" Dec 11 09:24:59 crc kubenswrapper[4881]: E1211 09:24:59.261415 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="223793f9-5fcc-4495-b1c4-537248807693" containerName="registry-server" Dec 11 09:24:59 crc kubenswrapper[4881]: I1211 09:24:59.261421 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="223793f9-5fcc-4495-b1c4-537248807693" containerName="registry-server" Dec 11 09:24:59 crc kubenswrapper[4881]: I1211 09:24:59.261641 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="223793f9-5fcc-4495-b1c4-537248807693" containerName="registry-server" Dec 11 09:24:59 crc kubenswrapper[4881]: I1211 09:24:59.263359 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-hdm4r" Dec 11 09:24:59 crc kubenswrapper[4881]: I1211 09:24:59.282661 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-hdm4r"] Dec 11 09:24:59 crc kubenswrapper[4881]: I1211 09:24:59.344045 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lgjq9\" (UniqueName: \"kubernetes.io/projected/b081ce59-5487-45ec-9302-6d63a3c44fae-kube-api-access-lgjq9\") pod \"certified-operators-hdm4r\" (UID: \"b081ce59-5487-45ec-9302-6d63a3c44fae\") " pod="openshift-marketplace/certified-operators-hdm4r" Dec 11 09:24:59 crc kubenswrapper[4881]: I1211 09:24:59.344104 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b081ce59-5487-45ec-9302-6d63a3c44fae-utilities\") pod \"certified-operators-hdm4r\" (UID: \"b081ce59-5487-45ec-9302-6d63a3c44fae\") " pod="openshift-marketplace/certified-operators-hdm4r" Dec 11 09:24:59 crc kubenswrapper[4881]: I1211 09:24:59.344645 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b081ce59-5487-45ec-9302-6d63a3c44fae-catalog-content\") pod \"certified-operators-hdm4r\" (UID: \"b081ce59-5487-45ec-9302-6d63a3c44fae\") " pod="openshift-marketplace/certified-operators-hdm4r" Dec 11 09:24:59 crc kubenswrapper[4881]: I1211 09:24:59.446887 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b081ce59-5487-45ec-9302-6d63a3c44fae-catalog-content\") pod \"certified-operators-hdm4r\" (UID: \"b081ce59-5487-45ec-9302-6d63a3c44fae\") " pod="openshift-marketplace/certified-operators-hdm4r" Dec 11 09:24:59 crc kubenswrapper[4881]: I1211 09:24:59.447348 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lgjq9\" (UniqueName: \"kubernetes.io/projected/b081ce59-5487-45ec-9302-6d63a3c44fae-kube-api-access-lgjq9\") pod \"certified-operators-hdm4r\" (UID: \"b081ce59-5487-45ec-9302-6d63a3c44fae\") " pod="openshift-marketplace/certified-operators-hdm4r" Dec 11 09:24:59 crc kubenswrapper[4881]: I1211 09:24:59.447377 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b081ce59-5487-45ec-9302-6d63a3c44fae-utilities\") pod \"certified-operators-hdm4r\" (UID: \"b081ce59-5487-45ec-9302-6d63a3c44fae\") " pod="openshift-marketplace/certified-operators-hdm4r" Dec 11 09:24:59 crc kubenswrapper[4881]: I1211 09:24:59.447594 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b081ce59-5487-45ec-9302-6d63a3c44fae-catalog-content\") pod \"certified-operators-hdm4r\" (UID: \"b081ce59-5487-45ec-9302-6d63a3c44fae\") " pod="openshift-marketplace/certified-operators-hdm4r" Dec 11 09:24:59 crc kubenswrapper[4881]: I1211 09:24:59.447891 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b081ce59-5487-45ec-9302-6d63a3c44fae-utilities\") pod \"certified-operators-hdm4r\" (UID: \"b081ce59-5487-45ec-9302-6d63a3c44fae\") " pod="openshift-marketplace/certified-operators-hdm4r" Dec 11 09:24:59 crc kubenswrapper[4881]: I1211 09:24:59.906769 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lgjq9\" (UniqueName: \"kubernetes.io/projected/b081ce59-5487-45ec-9302-6d63a3c44fae-kube-api-access-lgjq9\") pod \"certified-operators-hdm4r\" (UID: \"b081ce59-5487-45ec-9302-6d63a3c44fae\") " pod="openshift-marketplace/certified-operators-hdm4r" Dec 11 09:25:00 crc kubenswrapper[4881]: I1211 09:25:00.193153 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-hdm4r" Dec 11 09:25:00 crc kubenswrapper[4881]: I1211 09:25:00.690872 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-hdm4r"] Dec 11 09:25:01 crc kubenswrapper[4881]: I1211 09:25:01.021943 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-hdm4r" event={"ID":"b081ce59-5487-45ec-9302-6d63a3c44fae","Type":"ContainerStarted","Data":"ebbc1ed51c4195ecf9b049881f24270026379c05a388947beddd2346053a5f79"} Dec 11 09:25:02 crc kubenswrapper[4881]: I1211 09:25:02.026414 4881 generic.go:334] "Generic (PLEG): container finished" podID="b081ce59-5487-45ec-9302-6d63a3c44fae" containerID="cae206f000038022dddda837073873409446efc09c1d8946d0e46367f434f531" exitCode=0 Dec 11 09:25:02 crc kubenswrapper[4881]: I1211 09:25:02.026862 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-hdm4r" event={"ID":"b081ce59-5487-45ec-9302-6d63a3c44fae","Type":"ContainerDied","Data":"cae206f000038022dddda837073873409446efc09c1d8946d0e46367f434f531"} Dec 11 09:25:04 crc kubenswrapper[4881]: I1211 09:25:04.105537 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-hdm4r" event={"ID":"b081ce59-5487-45ec-9302-6d63a3c44fae","Type":"ContainerStarted","Data":"172693520e93d618e271e2d540a8357ce2ce4321b4112350bf89f479b2f7ebe8"} Dec 11 09:25:05 crc kubenswrapper[4881]: I1211 09:25:05.120520 4881 generic.go:334] "Generic (PLEG): container finished" podID="b081ce59-5487-45ec-9302-6d63a3c44fae" containerID="172693520e93d618e271e2d540a8357ce2ce4321b4112350bf89f479b2f7ebe8" exitCode=0 Dec 11 09:25:05 crc kubenswrapper[4881]: I1211 09:25:05.120611 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-hdm4r" event={"ID":"b081ce59-5487-45ec-9302-6d63a3c44fae","Type":"ContainerDied","Data":"172693520e93d618e271e2d540a8357ce2ce4321b4112350bf89f479b2f7ebe8"} Dec 11 09:25:08 crc kubenswrapper[4881]: I1211 09:25:08.161170 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-hdm4r" event={"ID":"b081ce59-5487-45ec-9302-6d63a3c44fae","Type":"ContainerStarted","Data":"2389e8b8adb88dc8c82819a2a013982123e8000ff8e24b596f16de5535dc4c9b"} Dec 11 09:25:08 crc kubenswrapper[4881]: I1211 09:25:08.189777 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-hdm4r" podStartSLOduration=3.594949141 podStartE2EDuration="9.189750581s" podCreationTimestamp="2025-12-11 09:24:59 +0000 UTC" firstStartedPulling="2025-12-11 09:25:02.03068741 +0000 UTC m=+4150.408056107" lastFinishedPulling="2025-12-11 09:25:07.62548885 +0000 UTC m=+4156.002857547" observedRunningTime="2025-12-11 09:25:08.177222561 +0000 UTC m=+4156.554591258" watchObservedRunningTime="2025-12-11 09:25:08.189750581 +0000 UTC m=+4156.567119278" Dec 11 09:25:10 crc kubenswrapper[4881]: I1211 09:25:10.193969 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-hdm4r" Dec 11 09:25:10 crc kubenswrapper[4881]: I1211 09:25:10.194561 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-hdm4r" Dec 11 09:25:10 crc kubenswrapper[4881]: I1211 09:25:10.356279 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-hdm4r" Dec 11 09:25:20 crc kubenswrapper[4881]: I1211 09:25:20.246916 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-hdm4r" Dec 11 09:25:20 crc kubenswrapper[4881]: I1211 09:25:20.310843 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-hdm4r"] Dec 11 09:25:20 crc kubenswrapper[4881]: I1211 09:25:20.311109 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-hdm4r" podUID="b081ce59-5487-45ec-9302-6d63a3c44fae" containerName="registry-server" containerID="cri-o://2389e8b8adb88dc8c82819a2a013982123e8000ff8e24b596f16de5535dc4c9b" gracePeriod=2 Dec 11 09:25:20 crc kubenswrapper[4881]: I1211 09:25:20.912243 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-hdm4r" Dec 11 09:25:20 crc kubenswrapper[4881]: I1211 09:25:20.996329 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b081ce59-5487-45ec-9302-6d63a3c44fae-utilities\") pod \"b081ce59-5487-45ec-9302-6d63a3c44fae\" (UID: \"b081ce59-5487-45ec-9302-6d63a3c44fae\") " Dec 11 09:25:20 crc kubenswrapper[4881]: I1211 09:25:20.996692 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lgjq9\" (UniqueName: \"kubernetes.io/projected/b081ce59-5487-45ec-9302-6d63a3c44fae-kube-api-access-lgjq9\") pod \"b081ce59-5487-45ec-9302-6d63a3c44fae\" (UID: \"b081ce59-5487-45ec-9302-6d63a3c44fae\") " Dec 11 09:25:20 crc kubenswrapper[4881]: I1211 09:25:20.996757 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b081ce59-5487-45ec-9302-6d63a3c44fae-catalog-content\") pod \"b081ce59-5487-45ec-9302-6d63a3c44fae\" (UID: \"b081ce59-5487-45ec-9302-6d63a3c44fae\") " Dec 11 09:25:20 crc kubenswrapper[4881]: I1211 09:25:20.998212 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b081ce59-5487-45ec-9302-6d63a3c44fae-utilities" (OuterVolumeSpecName: "utilities") pod "b081ce59-5487-45ec-9302-6d63a3c44fae" (UID: "b081ce59-5487-45ec-9302-6d63a3c44fae"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 09:25:21 crc kubenswrapper[4881]: I1211 09:25:21.005380 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b081ce59-5487-45ec-9302-6d63a3c44fae-kube-api-access-lgjq9" (OuterVolumeSpecName: "kube-api-access-lgjq9") pod "b081ce59-5487-45ec-9302-6d63a3c44fae" (UID: "b081ce59-5487-45ec-9302-6d63a3c44fae"). InnerVolumeSpecName "kube-api-access-lgjq9". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 09:25:21 crc kubenswrapper[4881]: I1211 09:25:21.062721 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b081ce59-5487-45ec-9302-6d63a3c44fae-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b081ce59-5487-45ec-9302-6d63a3c44fae" (UID: "b081ce59-5487-45ec-9302-6d63a3c44fae"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 09:25:21 crc kubenswrapper[4881]: I1211 09:25:21.102458 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lgjq9\" (UniqueName: \"kubernetes.io/projected/b081ce59-5487-45ec-9302-6d63a3c44fae-kube-api-access-lgjq9\") on node \"crc\" DevicePath \"\"" Dec 11 09:25:21 crc kubenswrapper[4881]: I1211 09:25:21.102504 4881 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b081ce59-5487-45ec-9302-6d63a3c44fae-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 11 09:25:21 crc kubenswrapper[4881]: I1211 09:25:21.102547 4881 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b081ce59-5487-45ec-9302-6d63a3c44fae-utilities\") on node \"crc\" DevicePath \"\"" Dec 11 09:25:21 crc kubenswrapper[4881]: I1211 09:25:21.317794 4881 generic.go:334] "Generic (PLEG): container finished" podID="b081ce59-5487-45ec-9302-6d63a3c44fae" containerID="2389e8b8adb88dc8c82819a2a013982123e8000ff8e24b596f16de5535dc4c9b" exitCode=0 Dec 11 09:25:21 crc kubenswrapper[4881]: I1211 09:25:21.318927 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-hdm4r" event={"ID":"b081ce59-5487-45ec-9302-6d63a3c44fae","Type":"ContainerDied","Data":"2389e8b8adb88dc8c82819a2a013982123e8000ff8e24b596f16de5535dc4c9b"} Dec 11 09:25:21 crc kubenswrapper[4881]: I1211 09:25:21.319072 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-hdm4r" event={"ID":"b081ce59-5487-45ec-9302-6d63a3c44fae","Type":"ContainerDied","Data":"ebbc1ed51c4195ecf9b049881f24270026379c05a388947beddd2346053a5f79"} Dec 11 09:25:21 crc kubenswrapper[4881]: I1211 09:25:21.319200 4881 scope.go:117] "RemoveContainer" containerID="2389e8b8adb88dc8c82819a2a013982123e8000ff8e24b596f16de5535dc4c9b" Dec 11 09:25:21 crc kubenswrapper[4881]: I1211 09:25:21.319520 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-hdm4r" Dec 11 09:25:21 crc kubenswrapper[4881]: I1211 09:25:21.359536 4881 scope.go:117] "RemoveContainer" containerID="172693520e93d618e271e2d540a8357ce2ce4321b4112350bf89f479b2f7ebe8" Dec 11 09:25:21 crc kubenswrapper[4881]: I1211 09:25:21.365088 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-hdm4r"] Dec 11 09:25:21 crc kubenswrapper[4881]: I1211 09:25:21.378654 4881 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-hdm4r"] Dec 11 09:25:21 crc kubenswrapper[4881]: I1211 09:25:21.392494 4881 scope.go:117] "RemoveContainer" containerID="cae206f000038022dddda837073873409446efc09c1d8946d0e46367f434f531" Dec 11 09:25:21 crc kubenswrapper[4881]: I1211 09:25:21.455082 4881 scope.go:117] "RemoveContainer" containerID="2389e8b8adb88dc8c82819a2a013982123e8000ff8e24b596f16de5535dc4c9b" Dec 11 09:25:21 crc kubenswrapper[4881]: E1211 09:25:21.455576 4881 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2389e8b8adb88dc8c82819a2a013982123e8000ff8e24b596f16de5535dc4c9b\": container with ID starting with 2389e8b8adb88dc8c82819a2a013982123e8000ff8e24b596f16de5535dc4c9b not found: ID does not exist" containerID="2389e8b8adb88dc8c82819a2a013982123e8000ff8e24b596f16de5535dc4c9b" Dec 11 09:25:21 crc kubenswrapper[4881]: I1211 09:25:21.455619 4881 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2389e8b8adb88dc8c82819a2a013982123e8000ff8e24b596f16de5535dc4c9b"} err="failed to get container status \"2389e8b8adb88dc8c82819a2a013982123e8000ff8e24b596f16de5535dc4c9b\": rpc error: code = NotFound desc = could not find container \"2389e8b8adb88dc8c82819a2a013982123e8000ff8e24b596f16de5535dc4c9b\": container with ID starting with 2389e8b8adb88dc8c82819a2a013982123e8000ff8e24b596f16de5535dc4c9b not found: ID does not exist" Dec 11 09:25:21 crc kubenswrapper[4881]: I1211 09:25:21.455642 4881 scope.go:117] "RemoveContainer" containerID="172693520e93d618e271e2d540a8357ce2ce4321b4112350bf89f479b2f7ebe8" Dec 11 09:25:21 crc kubenswrapper[4881]: E1211 09:25:21.455948 4881 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"172693520e93d618e271e2d540a8357ce2ce4321b4112350bf89f479b2f7ebe8\": container with ID starting with 172693520e93d618e271e2d540a8357ce2ce4321b4112350bf89f479b2f7ebe8 not found: ID does not exist" containerID="172693520e93d618e271e2d540a8357ce2ce4321b4112350bf89f479b2f7ebe8" Dec 11 09:25:21 crc kubenswrapper[4881]: I1211 09:25:21.456114 4881 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"172693520e93d618e271e2d540a8357ce2ce4321b4112350bf89f479b2f7ebe8"} err="failed to get container status \"172693520e93d618e271e2d540a8357ce2ce4321b4112350bf89f479b2f7ebe8\": rpc error: code = NotFound desc = could not find container \"172693520e93d618e271e2d540a8357ce2ce4321b4112350bf89f479b2f7ebe8\": container with ID starting with 172693520e93d618e271e2d540a8357ce2ce4321b4112350bf89f479b2f7ebe8 not found: ID does not exist" Dec 11 09:25:21 crc kubenswrapper[4881]: I1211 09:25:21.456229 4881 scope.go:117] "RemoveContainer" containerID="cae206f000038022dddda837073873409446efc09c1d8946d0e46367f434f531" Dec 11 09:25:21 crc kubenswrapper[4881]: E1211 09:25:21.456653 4881 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cae206f000038022dddda837073873409446efc09c1d8946d0e46367f434f531\": container with ID starting with cae206f000038022dddda837073873409446efc09c1d8946d0e46367f434f531 not found: ID does not exist" containerID="cae206f000038022dddda837073873409446efc09c1d8946d0e46367f434f531" Dec 11 09:25:21 crc kubenswrapper[4881]: I1211 09:25:21.456682 4881 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cae206f000038022dddda837073873409446efc09c1d8946d0e46367f434f531"} err="failed to get container status \"cae206f000038022dddda837073873409446efc09c1d8946d0e46367f434f531\": rpc error: code = NotFound desc = could not find container \"cae206f000038022dddda837073873409446efc09c1d8946d0e46367f434f531\": container with ID starting with cae206f000038022dddda837073873409446efc09c1d8946d0e46367f434f531 not found: ID does not exist" Dec 11 09:25:23 crc kubenswrapper[4881]: I1211 09:25:23.018077 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b081ce59-5487-45ec-9302-6d63a3c44fae" path="/var/lib/kubelet/pods/b081ce59-5487-45ec-9302-6d63a3c44fae/volumes" Dec 11 09:25:59 crc kubenswrapper[4881]: I1211 09:25:59.397232 4881 patch_prober.go:28] interesting pod/machine-config-daemon-z9nnh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 11 09:25:59 crc kubenswrapper[4881]: I1211 09:25:59.397930 4881 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 11 09:26:29 crc kubenswrapper[4881]: I1211 09:26:29.397200 4881 patch_prober.go:28] interesting pod/machine-config-daemon-z9nnh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 11 09:26:29 crc kubenswrapper[4881]: I1211 09:26:29.397739 4881 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 11 09:26:59 crc kubenswrapper[4881]: I1211 09:26:59.397981 4881 patch_prober.go:28] interesting pod/machine-config-daemon-z9nnh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 11 09:26:59 crc kubenswrapper[4881]: I1211 09:26:59.398765 4881 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 11 09:26:59 crc kubenswrapper[4881]: I1211 09:26:59.398819 4881 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" Dec 11 09:26:59 crc kubenswrapper[4881]: I1211 09:26:59.399853 4881 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"3c919a09176628ef675a5ea64db5491ef66443c447c2effe71c706a3382e44e2"} pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Dec 11 09:26:59 crc kubenswrapper[4881]: I1211 09:26:59.399913 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" containerName="machine-config-daemon" containerID="cri-o://3c919a09176628ef675a5ea64db5491ef66443c447c2effe71c706a3382e44e2" gracePeriod=600 Dec 11 09:27:00 crc kubenswrapper[4881]: I1211 09:27:00.533501 4881 generic.go:334] "Generic (PLEG): container finished" podID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" containerID="3c919a09176628ef675a5ea64db5491ef66443c447c2effe71c706a3382e44e2" exitCode=0 Dec 11 09:27:00 crc kubenswrapper[4881]: I1211 09:27:00.533539 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" event={"ID":"56d69133-af36-4cbd-af7d-3a58cc4dd8ca","Type":"ContainerDied","Data":"3c919a09176628ef675a5ea64db5491ef66443c447c2effe71c706a3382e44e2"} Dec 11 09:27:00 crc kubenswrapper[4881]: I1211 09:27:00.533913 4881 scope.go:117] "RemoveContainer" containerID="7b429e560ca82d59affd89848e506735dc8a3305106d3b6a969e4360338b2dbc" Dec 11 09:27:01 crc kubenswrapper[4881]: I1211 09:27:01.551599 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" event={"ID":"56d69133-af36-4cbd-af7d-3a58cc4dd8ca","Type":"ContainerStarted","Data":"1badfedc757c795081819123d86bb178d21e88e179f66cb3bbd3bb4b81192bc0"} Dec 11 09:29:29 crc kubenswrapper[4881]: I1211 09:29:29.397027 4881 patch_prober.go:28] interesting pod/machine-config-daemon-z9nnh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 11 09:29:29 crc kubenswrapper[4881]: I1211 09:29:29.397513 4881 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 11 09:29:59 crc kubenswrapper[4881]: I1211 09:29:59.397153 4881 patch_prober.go:28] interesting pod/machine-config-daemon-z9nnh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 11 09:29:59 crc kubenswrapper[4881]: I1211 09:29:59.397986 4881 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 11 09:30:00 crc kubenswrapper[4881]: I1211 09:30:00.184770 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29424090-df5hw"] Dec 11 09:30:00 crc kubenswrapper[4881]: E1211 09:30:00.185812 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b081ce59-5487-45ec-9302-6d63a3c44fae" containerName="extract-content" Dec 11 09:30:00 crc kubenswrapper[4881]: I1211 09:30:00.185840 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="b081ce59-5487-45ec-9302-6d63a3c44fae" containerName="extract-content" Dec 11 09:30:00 crc kubenswrapper[4881]: E1211 09:30:00.185867 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b081ce59-5487-45ec-9302-6d63a3c44fae" containerName="extract-utilities" Dec 11 09:30:00 crc kubenswrapper[4881]: I1211 09:30:00.185877 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="b081ce59-5487-45ec-9302-6d63a3c44fae" containerName="extract-utilities" Dec 11 09:30:00 crc kubenswrapper[4881]: E1211 09:30:00.185932 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b081ce59-5487-45ec-9302-6d63a3c44fae" containerName="registry-server" Dec 11 09:30:00 crc kubenswrapper[4881]: I1211 09:30:00.185940 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="b081ce59-5487-45ec-9302-6d63a3c44fae" containerName="registry-server" Dec 11 09:30:00 crc kubenswrapper[4881]: I1211 09:30:00.186258 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="b081ce59-5487-45ec-9302-6d63a3c44fae" containerName="registry-server" Dec 11 09:30:00 crc kubenswrapper[4881]: I1211 09:30:00.187455 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29424090-df5hw" Dec 11 09:30:00 crc kubenswrapper[4881]: I1211 09:30:00.191011 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Dec 11 09:30:00 crc kubenswrapper[4881]: I1211 09:30:00.191217 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Dec 11 09:30:00 crc kubenswrapper[4881]: I1211 09:30:00.213506 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29424090-df5hw"] Dec 11 09:30:00 crc kubenswrapper[4881]: I1211 09:30:00.250436 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f48ks\" (UniqueName: \"kubernetes.io/projected/2503315f-4203-483b-811e-d2a520564f97-kube-api-access-f48ks\") pod \"collect-profiles-29424090-df5hw\" (UID: \"2503315f-4203-483b-811e-d2a520564f97\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29424090-df5hw" Dec 11 09:30:00 crc kubenswrapper[4881]: I1211 09:30:00.250555 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/2503315f-4203-483b-811e-d2a520564f97-secret-volume\") pod \"collect-profiles-29424090-df5hw\" (UID: \"2503315f-4203-483b-811e-d2a520564f97\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29424090-df5hw" Dec 11 09:30:00 crc kubenswrapper[4881]: I1211 09:30:00.250646 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/2503315f-4203-483b-811e-d2a520564f97-config-volume\") pod \"collect-profiles-29424090-df5hw\" (UID: \"2503315f-4203-483b-811e-d2a520564f97\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29424090-df5hw" Dec 11 09:30:00 crc kubenswrapper[4881]: I1211 09:30:00.352219 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f48ks\" (UniqueName: \"kubernetes.io/projected/2503315f-4203-483b-811e-d2a520564f97-kube-api-access-f48ks\") pod \"collect-profiles-29424090-df5hw\" (UID: \"2503315f-4203-483b-811e-d2a520564f97\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29424090-df5hw" Dec 11 09:30:00 crc kubenswrapper[4881]: I1211 09:30:00.352594 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/2503315f-4203-483b-811e-d2a520564f97-secret-volume\") pod \"collect-profiles-29424090-df5hw\" (UID: \"2503315f-4203-483b-811e-d2a520564f97\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29424090-df5hw" Dec 11 09:30:00 crc kubenswrapper[4881]: I1211 09:30:00.352762 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/2503315f-4203-483b-811e-d2a520564f97-config-volume\") pod \"collect-profiles-29424090-df5hw\" (UID: \"2503315f-4203-483b-811e-d2a520564f97\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29424090-df5hw" Dec 11 09:30:00 crc kubenswrapper[4881]: I1211 09:30:00.354091 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/2503315f-4203-483b-811e-d2a520564f97-config-volume\") pod \"collect-profiles-29424090-df5hw\" (UID: \"2503315f-4203-483b-811e-d2a520564f97\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29424090-df5hw" Dec 11 09:30:00 crc kubenswrapper[4881]: I1211 09:30:00.366211 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/2503315f-4203-483b-811e-d2a520564f97-secret-volume\") pod \"collect-profiles-29424090-df5hw\" (UID: \"2503315f-4203-483b-811e-d2a520564f97\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29424090-df5hw" Dec 11 09:30:00 crc kubenswrapper[4881]: I1211 09:30:00.368808 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f48ks\" (UniqueName: \"kubernetes.io/projected/2503315f-4203-483b-811e-d2a520564f97-kube-api-access-f48ks\") pod \"collect-profiles-29424090-df5hw\" (UID: \"2503315f-4203-483b-811e-d2a520564f97\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29424090-df5hw" Dec 11 09:30:00 crc kubenswrapper[4881]: I1211 09:30:00.522540 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29424090-df5hw" Dec 11 09:30:01 crc kubenswrapper[4881]: I1211 09:30:01.227390 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29424090-df5hw"] Dec 11 09:30:01 crc kubenswrapper[4881]: I1211 09:30:01.574498 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29424090-df5hw" event={"ID":"2503315f-4203-483b-811e-d2a520564f97","Type":"ContainerStarted","Data":"f2757a69b2a1cc2b2a50ce0f130770c5bcbc99ac1e083bcf64b3ecd958c73ef9"} Dec 11 09:30:01 crc kubenswrapper[4881]: I1211 09:30:01.574816 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29424090-df5hw" event={"ID":"2503315f-4203-483b-811e-d2a520564f97","Type":"ContainerStarted","Data":"d59bec5cbc1f07b70564575382e0739f26fbadc6abd237a60c0fd45ccabedb34"} Dec 11 09:30:01 crc kubenswrapper[4881]: I1211 09:30:01.592916 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29424090-df5hw" podStartSLOduration=1.592894933 podStartE2EDuration="1.592894933s" podCreationTimestamp="2025-12-11 09:30:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 09:30:01.5915528 +0000 UTC m=+4449.968921507" watchObservedRunningTime="2025-12-11 09:30:01.592894933 +0000 UTC m=+4449.970263630" Dec 11 09:30:02 crc kubenswrapper[4881]: I1211 09:30:02.589763 4881 generic.go:334] "Generic (PLEG): container finished" podID="2503315f-4203-483b-811e-d2a520564f97" containerID="f2757a69b2a1cc2b2a50ce0f130770c5bcbc99ac1e083bcf64b3ecd958c73ef9" exitCode=0 Dec 11 09:30:02 crc kubenswrapper[4881]: I1211 09:30:02.589843 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29424090-df5hw" event={"ID":"2503315f-4203-483b-811e-d2a520564f97","Type":"ContainerDied","Data":"f2757a69b2a1cc2b2a50ce0f130770c5bcbc99ac1e083bcf64b3ecd958c73ef9"} Dec 11 09:30:04 crc kubenswrapper[4881]: I1211 09:30:04.117517 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29424090-df5hw" Dec 11 09:30:04 crc kubenswrapper[4881]: I1211 09:30:04.187872 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-f48ks\" (UniqueName: \"kubernetes.io/projected/2503315f-4203-483b-811e-d2a520564f97-kube-api-access-f48ks\") pod \"2503315f-4203-483b-811e-d2a520564f97\" (UID: \"2503315f-4203-483b-811e-d2a520564f97\") " Dec 11 09:30:04 crc kubenswrapper[4881]: I1211 09:30:04.188018 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/2503315f-4203-483b-811e-d2a520564f97-secret-volume\") pod \"2503315f-4203-483b-811e-d2a520564f97\" (UID: \"2503315f-4203-483b-811e-d2a520564f97\") " Dec 11 09:30:04 crc kubenswrapper[4881]: I1211 09:30:04.188310 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/2503315f-4203-483b-811e-d2a520564f97-config-volume\") pod \"2503315f-4203-483b-811e-d2a520564f97\" (UID: \"2503315f-4203-483b-811e-d2a520564f97\") " Dec 11 09:30:04 crc kubenswrapper[4881]: I1211 09:30:04.188964 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2503315f-4203-483b-811e-d2a520564f97-config-volume" (OuterVolumeSpecName: "config-volume") pod "2503315f-4203-483b-811e-d2a520564f97" (UID: "2503315f-4203-483b-811e-d2a520564f97"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 09:30:04 crc kubenswrapper[4881]: I1211 09:30:04.197698 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2503315f-4203-483b-811e-d2a520564f97-kube-api-access-f48ks" (OuterVolumeSpecName: "kube-api-access-f48ks") pod "2503315f-4203-483b-811e-d2a520564f97" (UID: "2503315f-4203-483b-811e-d2a520564f97"). InnerVolumeSpecName "kube-api-access-f48ks". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 09:30:04 crc kubenswrapper[4881]: I1211 09:30:04.200563 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2503315f-4203-483b-811e-d2a520564f97-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "2503315f-4203-483b-811e-d2a520564f97" (UID: "2503315f-4203-483b-811e-d2a520564f97"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 09:30:04 crc kubenswrapper[4881]: I1211 09:30:04.291090 4881 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/2503315f-4203-483b-811e-d2a520564f97-config-volume\") on node \"crc\" DevicePath \"\"" Dec 11 09:30:04 crc kubenswrapper[4881]: I1211 09:30:04.291121 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-f48ks\" (UniqueName: \"kubernetes.io/projected/2503315f-4203-483b-811e-d2a520564f97-kube-api-access-f48ks\") on node \"crc\" DevicePath \"\"" Dec 11 09:30:04 crc kubenswrapper[4881]: I1211 09:30:04.291139 4881 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/2503315f-4203-483b-811e-d2a520564f97-secret-volume\") on node \"crc\" DevicePath \"\"" Dec 11 09:30:04 crc kubenswrapper[4881]: I1211 09:30:04.617171 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29424090-df5hw" event={"ID":"2503315f-4203-483b-811e-d2a520564f97","Type":"ContainerDied","Data":"d59bec5cbc1f07b70564575382e0739f26fbadc6abd237a60c0fd45ccabedb34"} Dec 11 09:30:04 crc kubenswrapper[4881]: I1211 09:30:04.617512 4881 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d59bec5cbc1f07b70564575382e0739f26fbadc6abd237a60c0fd45ccabedb34" Dec 11 09:30:04 crc kubenswrapper[4881]: I1211 09:30:04.617438 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29424090-df5hw" Dec 11 09:30:04 crc kubenswrapper[4881]: I1211 09:30:04.691256 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29424045-r9ck7"] Dec 11 09:30:04 crc kubenswrapper[4881]: I1211 09:30:04.707870 4881 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29424045-r9ck7"] Dec 11 09:30:05 crc kubenswrapper[4881]: I1211 09:30:05.021328 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="79fedc84-0e64-4f6f-82a1-ce37cf2e9304" path="/var/lib/kubelet/pods/79fedc84-0e64-4f6f-82a1-ce37cf2e9304/volumes" Dec 11 09:30:12 crc kubenswrapper[4881]: I1211 09:30:12.102422 4881 scope.go:117] "RemoveContainer" containerID="afd3a472892b2a696e9f1749de8addbbc1347f2096dae11bfcee21112b65a8ea" Dec 11 09:30:29 crc kubenswrapper[4881]: I1211 09:30:29.412305 4881 patch_prober.go:28] interesting pod/machine-config-daemon-z9nnh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 11 09:30:29 crc kubenswrapper[4881]: I1211 09:30:29.412886 4881 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 11 09:30:29 crc kubenswrapper[4881]: I1211 09:30:29.412937 4881 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" Dec 11 09:30:29 crc kubenswrapper[4881]: I1211 09:30:29.414056 4881 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"1badfedc757c795081819123d86bb178d21e88e179f66cb3bbd3bb4b81192bc0"} pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Dec 11 09:30:29 crc kubenswrapper[4881]: I1211 09:30:29.414127 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" containerName="machine-config-daemon" containerID="cri-o://1badfedc757c795081819123d86bb178d21e88e179f66cb3bbd3bb4b81192bc0" gracePeriod=600 Dec 11 09:30:35 crc kubenswrapper[4881]: E1211 09:30:35.424935 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 09:30:35 crc kubenswrapper[4881]: I1211 09:30:35.770836 4881 generic.go:334] "Generic (PLEG): container finished" podID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" containerID="1badfedc757c795081819123d86bb178d21e88e179f66cb3bbd3bb4b81192bc0" exitCode=0 Dec 11 09:30:35 crc kubenswrapper[4881]: I1211 09:30:35.770908 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" event={"ID":"56d69133-af36-4cbd-af7d-3a58cc4dd8ca","Type":"ContainerDied","Data":"1badfedc757c795081819123d86bb178d21e88e179f66cb3bbd3bb4b81192bc0"} Dec 11 09:30:35 crc kubenswrapper[4881]: I1211 09:30:35.771216 4881 scope.go:117] "RemoveContainer" containerID="3c919a09176628ef675a5ea64db5491ef66443c447c2effe71c706a3382e44e2" Dec 11 09:30:36 crc kubenswrapper[4881]: I1211 09:30:36.785929 4881 scope.go:117] "RemoveContainer" containerID="1badfedc757c795081819123d86bb178d21e88e179f66cb3bbd3bb4b81192bc0" Dec 11 09:30:36 crc kubenswrapper[4881]: E1211 09:30:36.786410 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 09:30:51 crc kubenswrapper[4881]: I1211 09:30:51.006271 4881 scope.go:117] "RemoveContainer" containerID="1badfedc757c795081819123d86bb178d21e88e179f66cb3bbd3bb4b81192bc0" Dec 11 09:30:51 crc kubenswrapper[4881]: E1211 09:30:51.007875 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 09:31:06 crc kubenswrapper[4881]: I1211 09:31:06.005768 4881 scope.go:117] "RemoveContainer" containerID="1badfedc757c795081819123d86bb178d21e88e179f66cb3bbd3bb4b81192bc0" Dec 11 09:31:06 crc kubenswrapper[4881]: E1211 09:31:06.006497 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 09:31:21 crc kubenswrapper[4881]: I1211 09:31:21.006230 4881 scope.go:117] "RemoveContainer" containerID="1badfedc757c795081819123d86bb178d21e88e179f66cb3bbd3bb4b81192bc0" Dec 11 09:31:21 crc kubenswrapper[4881]: E1211 09:31:21.007068 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 09:31:32 crc kubenswrapper[4881]: I1211 09:31:32.006581 4881 scope.go:117] "RemoveContainer" containerID="1badfedc757c795081819123d86bb178d21e88e179f66cb3bbd3bb4b81192bc0" Dec 11 09:31:32 crc kubenswrapper[4881]: E1211 09:31:32.007539 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 09:31:46 crc kubenswrapper[4881]: I1211 09:31:46.005960 4881 scope.go:117] "RemoveContainer" containerID="1badfedc757c795081819123d86bb178d21e88e179f66cb3bbd3bb4b81192bc0" Dec 11 09:31:46 crc kubenswrapper[4881]: E1211 09:31:46.006879 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 09:31:57 crc kubenswrapper[4881]: I1211 09:31:57.005722 4881 scope.go:117] "RemoveContainer" containerID="1badfedc757c795081819123d86bb178d21e88e179f66cb3bbd3bb4b81192bc0" Dec 11 09:31:57 crc kubenswrapper[4881]: E1211 09:31:57.006949 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 09:31:58 crc kubenswrapper[4881]: I1211 09:31:58.936060 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-8p9kt"] Dec 11 09:31:58 crc kubenswrapper[4881]: E1211 09:31:58.938673 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2503315f-4203-483b-811e-d2a520564f97" containerName="collect-profiles" Dec 11 09:31:58 crc kubenswrapper[4881]: I1211 09:31:58.938691 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="2503315f-4203-483b-811e-d2a520564f97" containerName="collect-profiles" Dec 11 09:31:58 crc kubenswrapper[4881]: I1211 09:31:58.939019 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="2503315f-4203-483b-811e-d2a520564f97" containerName="collect-profiles" Dec 11 09:31:58 crc kubenswrapper[4881]: I1211 09:31:58.941305 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-8p9kt" Dec 11 09:31:58 crc kubenswrapper[4881]: I1211 09:31:58.956168 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-8p9kt"] Dec 11 09:31:59 crc kubenswrapper[4881]: I1211 09:31:59.107100 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2f5819c5-f4df-4726-a435-7cdaf5a952bf-catalog-content\") pod \"redhat-operators-8p9kt\" (UID: \"2f5819c5-f4df-4726-a435-7cdaf5a952bf\") " pod="openshift-marketplace/redhat-operators-8p9kt" Dec 11 09:31:59 crc kubenswrapper[4881]: I1211 09:31:59.107162 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xmkwb\" (UniqueName: \"kubernetes.io/projected/2f5819c5-f4df-4726-a435-7cdaf5a952bf-kube-api-access-xmkwb\") pod \"redhat-operators-8p9kt\" (UID: \"2f5819c5-f4df-4726-a435-7cdaf5a952bf\") " pod="openshift-marketplace/redhat-operators-8p9kt" Dec 11 09:31:59 crc kubenswrapper[4881]: I1211 09:31:59.107285 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2f5819c5-f4df-4726-a435-7cdaf5a952bf-utilities\") pod \"redhat-operators-8p9kt\" (UID: \"2f5819c5-f4df-4726-a435-7cdaf5a952bf\") " pod="openshift-marketplace/redhat-operators-8p9kt" Dec 11 09:31:59 crc kubenswrapper[4881]: I1211 09:31:59.209077 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2f5819c5-f4df-4726-a435-7cdaf5a952bf-catalog-content\") pod \"redhat-operators-8p9kt\" (UID: \"2f5819c5-f4df-4726-a435-7cdaf5a952bf\") " pod="openshift-marketplace/redhat-operators-8p9kt" Dec 11 09:31:59 crc kubenswrapper[4881]: I1211 09:31:59.209154 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xmkwb\" (UniqueName: \"kubernetes.io/projected/2f5819c5-f4df-4726-a435-7cdaf5a952bf-kube-api-access-xmkwb\") pod \"redhat-operators-8p9kt\" (UID: \"2f5819c5-f4df-4726-a435-7cdaf5a952bf\") " pod="openshift-marketplace/redhat-operators-8p9kt" Dec 11 09:31:59 crc kubenswrapper[4881]: I1211 09:31:59.209292 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2f5819c5-f4df-4726-a435-7cdaf5a952bf-utilities\") pod \"redhat-operators-8p9kt\" (UID: \"2f5819c5-f4df-4726-a435-7cdaf5a952bf\") " pod="openshift-marketplace/redhat-operators-8p9kt" Dec 11 09:31:59 crc kubenswrapper[4881]: I1211 09:31:59.210475 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2f5819c5-f4df-4726-a435-7cdaf5a952bf-utilities\") pod \"redhat-operators-8p9kt\" (UID: \"2f5819c5-f4df-4726-a435-7cdaf5a952bf\") " pod="openshift-marketplace/redhat-operators-8p9kt" Dec 11 09:31:59 crc kubenswrapper[4881]: I1211 09:31:59.210632 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2f5819c5-f4df-4726-a435-7cdaf5a952bf-catalog-content\") pod \"redhat-operators-8p9kt\" (UID: \"2f5819c5-f4df-4726-a435-7cdaf5a952bf\") " pod="openshift-marketplace/redhat-operators-8p9kt" Dec 11 09:31:59 crc kubenswrapper[4881]: I1211 09:31:59.233293 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xmkwb\" (UniqueName: \"kubernetes.io/projected/2f5819c5-f4df-4726-a435-7cdaf5a952bf-kube-api-access-xmkwb\") pod \"redhat-operators-8p9kt\" (UID: \"2f5819c5-f4df-4726-a435-7cdaf5a952bf\") " pod="openshift-marketplace/redhat-operators-8p9kt" Dec 11 09:31:59 crc kubenswrapper[4881]: I1211 09:31:59.277703 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-8p9kt" Dec 11 09:31:59 crc kubenswrapper[4881]: I1211 09:31:59.888715 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-8p9kt"] Dec 11 09:32:00 crc kubenswrapper[4881]: I1211 09:32:00.755274 4881 generic.go:334] "Generic (PLEG): container finished" podID="2f5819c5-f4df-4726-a435-7cdaf5a952bf" containerID="29acfd6193e268a1e49830c759046006583a6c3411a616a5cad0b853e8517415" exitCode=0 Dec 11 09:32:00 crc kubenswrapper[4881]: I1211 09:32:00.755361 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-8p9kt" event={"ID":"2f5819c5-f4df-4726-a435-7cdaf5a952bf","Type":"ContainerDied","Data":"29acfd6193e268a1e49830c759046006583a6c3411a616a5cad0b853e8517415"} Dec 11 09:32:00 crc kubenswrapper[4881]: I1211 09:32:00.755572 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-8p9kt" event={"ID":"2f5819c5-f4df-4726-a435-7cdaf5a952bf","Type":"ContainerStarted","Data":"c80372745db8785bb992bcf865c9711b0deac0857d32e956ce118a4fa598675f"} Dec 11 09:32:00 crc kubenswrapper[4881]: I1211 09:32:00.757829 4881 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Dec 11 09:32:01 crc kubenswrapper[4881]: I1211 09:32:01.325567 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-t27hd"] Dec 11 09:32:01 crc kubenswrapper[4881]: I1211 09:32:01.328810 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-t27hd" Dec 11 09:32:01 crc kubenswrapper[4881]: I1211 09:32:01.334987 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-t27hd"] Dec 11 09:32:01 crc kubenswrapper[4881]: I1211 09:32:01.468687 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b13d04f5-66c3-4f21-aaca-9a9426d9e059-utilities\") pod \"community-operators-t27hd\" (UID: \"b13d04f5-66c3-4f21-aaca-9a9426d9e059\") " pod="openshift-marketplace/community-operators-t27hd" Dec 11 09:32:01 crc kubenswrapper[4881]: I1211 09:32:01.469233 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9h29b\" (UniqueName: \"kubernetes.io/projected/b13d04f5-66c3-4f21-aaca-9a9426d9e059-kube-api-access-9h29b\") pod \"community-operators-t27hd\" (UID: \"b13d04f5-66c3-4f21-aaca-9a9426d9e059\") " pod="openshift-marketplace/community-operators-t27hd" Dec 11 09:32:01 crc kubenswrapper[4881]: I1211 09:32:01.469416 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b13d04f5-66c3-4f21-aaca-9a9426d9e059-catalog-content\") pod \"community-operators-t27hd\" (UID: \"b13d04f5-66c3-4f21-aaca-9a9426d9e059\") " pod="openshift-marketplace/community-operators-t27hd" Dec 11 09:32:01 crc kubenswrapper[4881]: I1211 09:32:01.571374 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b13d04f5-66c3-4f21-aaca-9a9426d9e059-utilities\") pod \"community-operators-t27hd\" (UID: \"b13d04f5-66c3-4f21-aaca-9a9426d9e059\") " pod="openshift-marketplace/community-operators-t27hd" Dec 11 09:32:01 crc kubenswrapper[4881]: I1211 09:32:01.571451 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9h29b\" (UniqueName: \"kubernetes.io/projected/b13d04f5-66c3-4f21-aaca-9a9426d9e059-kube-api-access-9h29b\") pod \"community-operators-t27hd\" (UID: \"b13d04f5-66c3-4f21-aaca-9a9426d9e059\") " pod="openshift-marketplace/community-operators-t27hd" Dec 11 09:32:01 crc kubenswrapper[4881]: I1211 09:32:01.571503 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b13d04f5-66c3-4f21-aaca-9a9426d9e059-catalog-content\") pod \"community-operators-t27hd\" (UID: \"b13d04f5-66c3-4f21-aaca-9a9426d9e059\") " pod="openshift-marketplace/community-operators-t27hd" Dec 11 09:32:01 crc kubenswrapper[4881]: I1211 09:32:01.571990 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b13d04f5-66c3-4f21-aaca-9a9426d9e059-catalog-content\") pod \"community-operators-t27hd\" (UID: \"b13d04f5-66c3-4f21-aaca-9a9426d9e059\") " pod="openshift-marketplace/community-operators-t27hd" Dec 11 09:32:01 crc kubenswrapper[4881]: I1211 09:32:01.572348 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b13d04f5-66c3-4f21-aaca-9a9426d9e059-utilities\") pod \"community-operators-t27hd\" (UID: \"b13d04f5-66c3-4f21-aaca-9a9426d9e059\") " pod="openshift-marketplace/community-operators-t27hd" Dec 11 09:32:01 crc kubenswrapper[4881]: I1211 09:32:01.592485 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9h29b\" (UniqueName: \"kubernetes.io/projected/b13d04f5-66c3-4f21-aaca-9a9426d9e059-kube-api-access-9h29b\") pod \"community-operators-t27hd\" (UID: \"b13d04f5-66c3-4f21-aaca-9a9426d9e059\") " pod="openshift-marketplace/community-operators-t27hd" Dec 11 09:32:01 crc kubenswrapper[4881]: I1211 09:32:01.655943 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-t27hd" Dec 11 09:32:02 crc kubenswrapper[4881]: W1211 09:32:02.278713 4881 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb13d04f5_66c3_4f21_aaca_9a9426d9e059.slice/crio-ed6261d9c66c55bb794ed72ffb0a4f1b029fe7b165ab5e3e2e9c4edc7db622c6 WatchSource:0}: Error finding container ed6261d9c66c55bb794ed72ffb0a4f1b029fe7b165ab5e3e2e9c4edc7db622c6: Status 404 returned error can't find the container with id ed6261d9c66c55bb794ed72ffb0a4f1b029fe7b165ab5e3e2e9c4edc7db622c6 Dec 11 09:32:02 crc kubenswrapper[4881]: I1211 09:32:02.283043 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-t27hd"] Dec 11 09:32:02 crc kubenswrapper[4881]: I1211 09:32:02.780153 4881 generic.go:334] "Generic (PLEG): container finished" podID="b13d04f5-66c3-4f21-aaca-9a9426d9e059" containerID="996124a0c4de1319564ea988a194e49458c09fa01d6d3c0adc0aaa04e9476771" exitCode=0 Dec 11 09:32:02 crc kubenswrapper[4881]: I1211 09:32:02.780543 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-t27hd" event={"ID":"b13d04f5-66c3-4f21-aaca-9a9426d9e059","Type":"ContainerDied","Data":"996124a0c4de1319564ea988a194e49458c09fa01d6d3c0adc0aaa04e9476771"} Dec 11 09:32:02 crc kubenswrapper[4881]: I1211 09:32:02.780574 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-t27hd" event={"ID":"b13d04f5-66c3-4f21-aaca-9a9426d9e059","Type":"ContainerStarted","Data":"ed6261d9c66c55bb794ed72ffb0a4f1b029fe7b165ab5e3e2e9c4edc7db622c6"} Dec 11 09:32:02 crc kubenswrapper[4881]: I1211 09:32:02.789001 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-8p9kt" event={"ID":"2f5819c5-f4df-4726-a435-7cdaf5a952bf","Type":"ContainerStarted","Data":"f8bc986e3cc174ec821d3975098532406a25dc44890201aa28f88de51af8cefb"} Dec 11 09:32:08 crc kubenswrapper[4881]: I1211 09:32:08.861807 4881 generic.go:334] "Generic (PLEG): container finished" podID="2f5819c5-f4df-4726-a435-7cdaf5a952bf" containerID="f8bc986e3cc174ec821d3975098532406a25dc44890201aa28f88de51af8cefb" exitCode=0 Dec 11 09:32:08 crc kubenswrapper[4881]: I1211 09:32:08.861887 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-8p9kt" event={"ID":"2f5819c5-f4df-4726-a435-7cdaf5a952bf","Type":"ContainerDied","Data":"f8bc986e3cc174ec821d3975098532406a25dc44890201aa28f88de51af8cefb"} Dec 11 09:32:10 crc kubenswrapper[4881]: I1211 09:32:10.883182 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-t27hd" event={"ID":"b13d04f5-66c3-4f21-aaca-9a9426d9e059","Type":"ContainerStarted","Data":"280eee0b4806d2712ae3b5cc7e9521731ff9865547cdccc8beb318a93ce39743"} Dec 11 09:32:10 crc kubenswrapper[4881]: I1211 09:32:10.886232 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-8p9kt" event={"ID":"2f5819c5-f4df-4726-a435-7cdaf5a952bf","Type":"ContainerStarted","Data":"7722a7d8fad3b22610e17f494a01b0950fc0c1a77cea5335f348cf88410fcb34"} Dec 11 09:32:10 crc kubenswrapper[4881]: I1211 09:32:10.924562 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-8p9kt" podStartSLOduration=3.6921781129999998 podStartE2EDuration="12.92454164s" podCreationTimestamp="2025-12-11 09:31:58 +0000 UTC" firstStartedPulling="2025-12-11 09:32:00.757574609 +0000 UTC m=+4569.134943306" lastFinishedPulling="2025-12-11 09:32:09.989938116 +0000 UTC m=+4578.367306833" observedRunningTime="2025-12-11 09:32:10.9197461 +0000 UTC m=+4579.297114797" watchObservedRunningTime="2025-12-11 09:32:10.92454164 +0000 UTC m=+4579.301910337" Dec 11 09:32:11 crc kubenswrapper[4881]: I1211 09:32:11.006143 4881 scope.go:117] "RemoveContainer" containerID="1badfedc757c795081819123d86bb178d21e88e179f66cb3bbd3bb4b81192bc0" Dec 11 09:32:11 crc kubenswrapper[4881]: E1211 09:32:11.006469 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 09:32:11 crc kubenswrapper[4881]: I1211 09:32:11.896517 4881 generic.go:334] "Generic (PLEG): container finished" podID="b13d04f5-66c3-4f21-aaca-9a9426d9e059" containerID="280eee0b4806d2712ae3b5cc7e9521731ff9865547cdccc8beb318a93ce39743" exitCode=0 Dec 11 09:32:11 crc kubenswrapper[4881]: I1211 09:32:11.896723 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-t27hd" event={"ID":"b13d04f5-66c3-4f21-aaca-9a9426d9e059","Type":"ContainerDied","Data":"280eee0b4806d2712ae3b5cc7e9521731ff9865547cdccc8beb318a93ce39743"} Dec 11 09:32:13 crc kubenswrapper[4881]: I1211 09:32:13.942419 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-t27hd" event={"ID":"b13d04f5-66c3-4f21-aaca-9a9426d9e059","Type":"ContainerStarted","Data":"846b7c74eca239f5a18223c110a4e811e150efd0498e4a047ed345d9bc1601e6"} Dec 11 09:32:13 crc kubenswrapper[4881]: I1211 09:32:13.971583 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-t27hd" podStartSLOduration=2.665468882 podStartE2EDuration="12.971541743s" podCreationTimestamp="2025-12-11 09:32:01 +0000 UTC" firstStartedPulling="2025-12-11 09:32:02.783912815 +0000 UTC m=+4571.161281512" lastFinishedPulling="2025-12-11 09:32:13.089985676 +0000 UTC m=+4581.467354373" observedRunningTime="2025-12-11 09:32:13.965885132 +0000 UTC m=+4582.343253849" watchObservedRunningTime="2025-12-11 09:32:13.971541743 +0000 UTC m=+4582.348910450" Dec 11 09:32:19 crc kubenswrapper[4881]: I1211 09:32:19.278600 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-8p9kt" Dec 11 09:32:19 crc kubenswrapper[4881]: I1211 09:32:19.279113 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-8p9kt" Dec 11 09:32:20 crc kubenswrapper[4881]: I1211 09:32:20.344168 4881 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-8p9kt" podUID="2f5819c5-f4df-4726-a435-7cdaf5a952bf" containerName="registry-server" probeResult="failure" output=< Dec 11 09:32:20 crc kubenswrapper[4881]: timeout: failed to connect service ":50051" within 1s Dec 11 09:32:20 crc kubenswrapper[4881]: > Dec 11 09:32:21 crc kubenswrapper[4881]: I1211 09:32:21.656953 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-t27hd" Dec 11 09:32:21 crc kubenswrapper[4881]: I1211 09:32:21.658463 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-t27hd" Dec 11 09:32:21 crc kubenswrapper[4881]: I1211 09:32:21.718432 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-t27hd" Dec 11 09:32:22 crc kubenswrapper[4881]: I1211 09:32:22.089671 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-t27hd" Dec 11 09:32:22 crc kubenswrapper[4881]: I1211 09:32:22.146647 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-t27hd"] Dec 11 09:32:24 crc kubenswrapper[4881]: I1211 09:32:24.066935 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-t27hd" podUID="b13d04f5-66c3-4f21-aaca-9a9426d9e059" containerName="registry-server" containerID="cri-o://846b7c74eca239f5a18223c110a4e811e150efd0498e4a047ed345d9bc1601e6" gracePeriod=2 Dec 11 09:32:25 crc kubenswrapper[4881]: I1211 09:32:25.117453 4881 generic.go:334] "Generic (PLEG): container finished" podID="b13d04f5-66c3-4f21-aaca-9a9426d9e059" containerID="846b7c74eca239f5a18223c110a4e811e150efd0498e4a047ed345d9bc1601e6" exitCode=0 Dec 11 09:32:25 crc kubenswrapper[4881]: I1211 09:32:25.117685 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-t27hd" event={"ID":"b13d04f5-66c3-4f21-aaca-9a9426d9e059","Type":"ContainerDied","Data":"846b7c74eca239f5a18223c110a4e811e150efd0498e4a047ed345d9bc1601e6"} Dec 11 09:32:25 crc kubenswrapper[4881]: I1211 09:32:25.362723 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-t27hd" Dec 11 09:32:25 crc kubenswrapper[4881]: I1211 09:32:25.476477 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9h29b\" (UniqueName: \"kubernetes.io/projected/b13d04f5-66c3-4f21-aaca-9a9426d9e059-kube-api-access-9h29b\") pod \"b13d04f5-66c3-4f21-aaca-9a9426d9e059\" (UID: \"b13d04f5-66c3-4f21-aaca-9a9426d9e059\") " Dec 11 09:32:25 crc kubenswrapper[4881]: I1211 09:32:25.477041 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b13d04f5-66c3-4f21-aaca-9a9426d9e059-utilities\") pod \"b13d04f5-66c3-4f21-aaca-9a9426d9e059\" (UID: \"b13d04f5-66c3-4f21-aaca-9a9426d9e059\") " Dec 11 09:32:25 crc kubenswrapper[4881]: I1211 09:32:25.477197 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b13d04f5-66c3-4f21-aaca-9a9426d9e059-catalog-content\") pod \"b13d04f5-66c3-4f21-aaca-9a9426d9e059\" (UID: \"b13d04f5-66c3-4f21-aaca-9a9426d9e059\") " Dec 11 09:32:25 crc kubenswrapper[4881]: I1211 09:32:25.477948 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b13d04f5-66c3-4f21-aaca-9a9426d9e059-utilities" (OuterVolumeSpecName: "utilities") pod "b13d04f5-66c3-4f21-aaca-9a9426d9e059" (UID: "b13d04f5-66c3-4f21-aaca-9a9426d9e059"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 09:32:25 crc kubenswrapper[4881]: I1211 09:32:25.485558 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b13d04f5-66c3-4f21-aaca-9a9426d9e059-kube-api-access-9h29b" (OuterVolumeSpecName: "kube-api-access-9h29b") pod "b13d04f5-66c3-4f21-aaca-9a9426d9e059" (UID: "b13d04f5-66c3-4f21-aaca-9a9426d9e059"). InnerVolumeSpecName "kube-api-access-9h29b". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 09:32:25 crc kubenswrapper[4881]: I1211 09:32:25.535037 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b13d04f5-66c3-4f21-aaca-9a9426d9e059-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b13d04f5-66c3-4f21-aaca-9a9426d9e059" (UID: "b13d04f5-66c3-4f21-aaca-9a9426d9e059"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 09:32:25 crc kubenswrapper[4881]: I1211 09:32:25.580763 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9h29b\" (UniqueName: \"kubernetes.io/projected/b13d04f5-66c3-4f21-aaca-9a9426d9e059-kube-api-access-9h29b\") on node \"crc\" DevicePath \"\"" Dec 11 09:32:25 crc kubenswrapper[4881]: I1211 09:32:25.580811 4881 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b13d04f5-66c3-4f21-aaca-9a9426d9e059-utilities\") on node \"crc\" DevicePath \"\"" Dec 11 09:32:25 crc kubenswrapper[4881]: I1211 09:32:25.580826 4881 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b13d04f5-66c3-4f21-aaca-9a9426d9e059-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 11 09:32:26 crc kubenswrapper[4881]: I1211 09:32:26.006475 4881 scope.go:117] "RemoveContainer" containerID="1badfedc757c795081819123d86bb178d21e88e179f66cb3bbd3bb4b81192bc0" Dec 11 09:32:26 crc kubenswrapper[4881]: E1211 09:32:26.009295 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 09:32:26 crc kubenswrapper[4881]: I1211 09:32:26.137559 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-t27hd" event={"ID":"b13d04f5-66c3-4f21-aaca-9a9426d9e059","Type":"ContainerDied","Data":"ed6261d9c66c55bb794ed72ffb0a4f1b029fe7b165ab5e3e2e9c4edc7db622c6"} Dec 11 09:32:26 crc kubenswrapper[4881]: I1211 09:32:26.137644 4881 scope.go:117] "RemoveContainer" containerID="846b7c74eca239f5a18223c110a4e811e150efd0498e4a047ed345d9bc1601e6" Dec 11 09:32:26 crc kubenswrapper[4881]: I1211 09:32:26.138617 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-t27hd" Dec 11 09:32:26 crc kubenswrapper[4881]: I1211 09:32:26.180499 4881 scope.go:117] "RemoveContainer" containerID="280eee0b4806d2712ae3b5cc7e9521731ff9865547cdccc8beb318a93ce39743" Dec 11 09:32:26 crc kubenswrapper[4881]: I1211 09:32:26.185296 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-t27hd"] Dec 11 09:32:26 crc kubenswrapper[4881]: I1211 09:32:26.211576 4881 scope.go:117] "RemoveContainer" containerID="996124a0c4de1319564ea988a194e49458c09fa01d6d3c0adc0aaa04e9476771" Dec 11 09:32:26 crc kubenswrapper[4881]: I1211 09:32:26.226802 4881 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-t27hd"] Dec 11 09:32:27 crc kubenswrapper[4881]: I1211 09:32:27.018721 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b13d04f5-66c3-4f21-aaca-9a9426d9e059" path="/var/lib/kubelet/pods/b13d04f5-66c3-4f21-aaca-9a9426d9e059/volumes" Dec 11 09:32:29 crc kubenswrapper[4881]: I1211 09:32:29.398016 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-8p9kt" Dec 11 09:32:29 crc kubenswrapper[4881]: I1211 09:32:29.463842 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-8p9kt" Dec 11 09:32:30 crc kubenswrapper[4881]: I1211 09:32:30.141286 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-8p9kt"] Dec 11 09:32:31 crc kubenswrapper[4881]: I1211 09:32:31.188631 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-8p9kt" podUID="2f5819c5-f4df-4726-a435-7cdaf5a952bf" containerName="registry-server" containerID="cri-o://7722a7d8fad3b22610e17f494a01b0950fc0c1a77cea5335f348cf88410fcb34" gracePeriod=2 Dec 11 09:32:31 crc kubenswrapper[4881]: I1211 09:32:31.795203 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-8p9kt" Dec 11 09:32:31 crc kubenswrapper[4881]: I1211 09:32:31.849240 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2f5819c5-f4df-4726-a435-7cdaf5a952bf-catalog-content\") pod \"2f5819c5-f4df-4726-a435-7cdaf5a952bf\" (UID: \"2f5819c5-f4df-4726-a435-7cdaf5a952bf\") " Dec 11 09:32:31 crc kubenswrapper[4881]: I1211 09:32:31.849602 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xmkwb\" (UniqueName: \"kubernetes.io/projected/2f5819c5-f4df-4726-a435-7cdaf5a952bf-kube-api-access-xmkwb\") pod \"2f5819c5-f4df-4726-a435-7cdaf5a952bf\" (UID: \"2f5819c5-f4df-4726-a435-7cdaf5a952bf\") " Dec 11 09:32:31 crc kubenswrapper[4881]: I1211 09:32:31.849765 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2f5819c5-f4df-4726-a435-7cdaf5a952bf-utilities\") pod \"2f5819c5-f4df-4726-a435-7cdaf5a952bf\" (UID: \"2f5819c5-f4df-4726-a435-7cdaf5a952bf\") " Dec 11 09:32:31 crc kubenswrapper[4881]: I1211 09:32:31.851240 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2f5819c5-f4df-4726-a435-7cdaf5a952bf-utilities" (OuterVolumeSpecName: "utilities") pod "2f5819c5-f4df-4726-a435-7cdaf5a952bf" (UID: "2f5819c5-f4df-4726-a435-7cdaf5a952bf"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 09:32:31 crc kubenswrapper[4881]: I1211 09:32:31.866858 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2f5819c5-f4df-4726-a435-7cdaf5a952bf-kube-api-access-xmkwb" (OuterVolumeSpecName: "kube-api-access-xmkwb") pod "2f5819c5-f4df-4726-a435-7cdaf5a952bf" (UID: "2f5819c5-f4df-4726-a435-7cdaf5a952bf"). InnerVolumeSpecName "kube-api-access-xmkwb". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 09:32:31 crc kubenswrapper[4881]: I1211 09:32:31.959257 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xmkwb\" (UniqueName: \"kubernetes.io/projected/2f5819c5-f4df-4726-a435-7cdaf5a952bf-kube-api-access-xmkwb\") on node \"crc\" DevicePath \"\"" Dec 11 09:32:31 crc kubenswrapper[4881]: I1211 09:32:31.959354 4881 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2f5819c5-f4df-4726-a435-7cdaf5a952bf-utilities\") on node \"crc\" DevicePath \"\"" Dec 11 09:32:32 crc kubenswrapper[4881]: I1211 09:32:32.007948 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2f5819c5-f4df-4726-a435-7cdaf5a952bf-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "2f5819c5-f4df-4726-a435-7cdaf5a952bf" (UID: "2f5819c5-f4df-4726-a435-7cdaf5a952bf"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 09:32:32 crc kubenswrapper[4881]: I1211 09:32:32.061861 4881 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2f5819c5-f4df-4726-a435-7cdaf5a952bf-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 11 09:32:32 crc kubenswrapper[4881]: I1211 09:32:32.201454 4881 generic.go:334] "Generic (PLEG): container finished" podID="2f5819c5-f4df-4726-a435-7cdaf5a952bf" containerID="7722a7d8fad3b22610e17f494a01b0950fc0c1a77cea5335f348cf88410fcb34" exitCode=0 Dec 11 09:32:32 crc kubenswrapper[4881]: I1211 09:32:32.201771 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-8p9kt" event={"ID":"2f5819c5-f4df-4726-a435-7cdaf5a952bf","Type":"ContainerDied","Data":"7722a7d8fad3b22610e17f494a01b0950fc0c1a77cea5335f348cf88410fcb34"} Dec 11 09:32:32 crc kubenswrapper[4881]: I1211 09:32:32.201805 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-8p9kt" event={"ID":"2f5819c5-f4df-4726-a435-7cdaf5a952bf","Type":"ContainerDied","Data":"c80372745db8785bb992bcf865c9711b0deac0857d32e956ce118a4fa598675f"} Dec 11 09:32:32 crc kubenswrapper[4881]: I1211 09:32:32.201827 4881 scope.go:117] "RemoveContainer" containerID="7722a7d8fad3b22610e17f494a01b0950fc0c1a77cea5335f348cf88410fcb34" Dec 11 09:32:32 crc kubenswrapper[4881]: I1211 09:32:32.202029 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-8p9kt" Dec 11 09:32:32 crc kubenswrapper[4881]: I1211 09:32:32.241209 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-8p9kt"] Dec 11 09:32:32 crc kubenswrapper[4881]: I1211 09:32:32.245440 4881 scope.go:117] "RemoveContainer" containerID="f8bc986e3cc174ec821d3975098532406a25dc44890201aa28f88de51af8cefb" Dec 11 09:32:32 crc kubenswrapper[4881]: I1211 09:32:32.253159 4881 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-8p9kt"] Dec 11 09:32:32 crc kubenswrapper[4881]: I1211 09:32:32.269634 4881 scope.go:117] "RemoveContainer" containerID="29acfd6193e268a1e49830c759046006583a6c3411a616a5cad0b853e8517415" Dec 11 09:32:32 crc kubenswrapper[4881]: I1211 09:32:32.338373 4881 scope.go:117] "RemoveContainer" containerID="7722a7d8fad3b22610e17f494a01b0950fc0c1a77cea5335f348cf88410fcb34" Dec 11 09:32:32 crc kubenswrapper[4881]: E1211 09:32:32.338951 4881 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7722a7d8fad3b22610e17f494a01b0950fc0c1a77cea5335f348cf88410fcb34\": container with ID starting with 7722a7d8fad3b22610e17f494a01b0950fc0c1a77cea5335f348cf88410fcb34 not found: ID does not exist" containerID="7722a7d8fad3b22610e17f494a01b0950fc0c1a77cea5335f348cf88410fcb34" Dec 11 09:32:32 crc kubenswrapper[4881]: I1211 09:32:32.338999 4881 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7722a7d8fad3b22610e17f494a01b0950fc0c1a77cea5335f348cf88410fcb34"} err="failed to get container status \"7722a7d8fad3b22610e17f494a01b0950fc0c1a77cea5335f348cf88410fcb34\": rpc error: code = NotFound desc = could not find container \"7722a7d8fad3b22610e17f494a01b0950fc0c1a77cea5335f348cf88410fcb34\": container with ID starting with 7722a7d8fad3b22610e17f494a01b0950fc0c1a77cea5335f348cf88410fcb34 not found: ID does not exist" Dec 11 09:32:32 crc kubenswrapper[4881]: I1211 09:32:32.339026 4881 scope.go:117] "RemoveContainer" containerID="f8bc986e3cc174ec821d3975098532406a25dc44890201aa28f88de51af8cefb" Dec 11 09:32:32 crc kubenswrapper[4881]: E1211 09:32:32.339305 4881 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f8bc986e3cc174ec821d3975098532406a25dc44890201aa28f88de51af8cefb\": container with ID starting with f8bc986e3cc174ec821d3975098532406a25dc44890201aa28f88de51af8cefb not found: ID does not exist" containerID="f8bc986e3cc174ec821d3975098532406a25dc44890201aa28f88de51af8cefb" Dec 11 09:32:32 crc kubenswrapper[4881]: I1211 09:32:32.339330 4881 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f8bc986e3cc174ec821d3975098532406a25dc44890201aa28f88de51af8cefb"} err="failed to get container status \"f8bc986e3cc174ec821d3975098532406a25dc44890201aa28f88de51af8cefb\": rpc error: code = NotFound desc = could not find container \"f8bc986e3cc174ec821d3975098532406a25dc44890201aa28f88de51af8cefb\": container with ID starting with f8bc986e3cc174ec821d3975098532406a25dc44890201aa28f88de51af8cefb not found: ID does not exist" Dec 11 09:32:32 crc kubenswrapper[4881]: I1211 09:32:32.339364 4881 scope.go:117] "RemoveContainer" containerID="29acfd6193e268a1e49830c759046006583a6c3411a616a5cad0b853e8517415" Dec 11 09:32:32 crc kubenswrapper[4881]: E1211 09:32:32.339729 4881 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"29acfd6193e268a1e49830c759046006583a6c3411a616a5cad0b853e8517415\": container with ID starting with 29acfd6193e268a1e49830c759046006583a6c3411a616a5cad0b853e8517415 not found: ID does not exist" containerID="29acfd6193e268a1e49830c759046006583a6c3411a616a5cad0b853e8517415" Dec 11 09:32:32 crc kubenswrapper[4881]: I1211 09:32:32.339772 4881 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"29acfd6193e268a1e49830c759046006583a6c3411a616a5cad0b853e8517415"} err="failed to get container status \"29acfd6193e268a1e49830c759046006583a6c3411a616a5cad0b853e8517415\": rpc error: code = NotFound desc = could not find container \"29acfd6193e268a1e49830c759046006583a6c3411a616a5cad0b853e8517415\": container with ID starting with 29acfd6193e268a1e49830c759046006583a6c3411a616a5cad0b853e8517415 not found: ID does not exist" Dec 11 09:32:33 crc kubenswrapper[4881]: I1211 09:32:33.020113 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2f5819c5-f4df-4726-a435-7cdaf5a952bf" path="/var/lib/kubelet/pods/2f5819c5-f4df-4726-a435-7cdaf5a952bf/volumes" Dec 11 09:32:40 crc kubenswrapper[4881]: I1211 09:32:40.005091 4881 scope.go:117] "RemoveContainer" containerID="1badfedc757c795081819123d86bb178d21e88e179f66cb3bbd3bb4b81192bc0" Dec 11 09:32:40 crc kubenswrapper[4881]: E1211 09:32:40.005847 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 09:32:53 crc kubenswrapper[4881]: I1211 09:32:53.018290 4881 scope.go:117] "RemoveContainer" containerID="1badfedc757c795081819123d86bb178d21e88e179f66cb3bbd3bb4b81192bc0" Dec 11 09:32:53 crc kubenswrapper[4881]: E1211 09:32:53.019070 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 09:33:04 crc kubenswrapper[4881]: I1211 09:33:04.005448 4881 scope.go:117] "RemoveContainer" containerID="1badfedc757c795081819123d86bb178d21e88e179f66cb3bbd3bb4b81192bc0" Dec 11 09:33:04 crc kubenswrapper[4881]: E1211 09:33:04.006145 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 09:33:17 crc kubenswrapper[4881]: I1211 09:33:17.006304 4881 scope.go:117] "RemoveContainer" containerID="1badfedc757c795081819123d86bb178d21e88e179f66cb3bbd3bb4b81192bc0" Dec 11 09:33:17 crc kubenswrapper[4881]: E1211 09:33:17.008008 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 09:33:32 crc kubenswrapper[4881]: I1211 09:33:32.006066 4881 scope.go:117] "RemoveContainer" containerID="1badfedc757c795081819123d86bb178d21e88e179f66cb3bbd3bb4b81192bc0" Dec 11 09:33:32 crc kubenswrapper[4881]: E1211 09:33:32.007074 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 09:33:47 crc kubenswrapper[4881]: I1211 09:33:47.005370 4881 scope.go:117] "RemoveContainer" containerID="1badfedc757c795081819123d86bb178d21e88e179f66cb3bbd3bb4b81192bc0" Dec 11 09:33:47 crc kubenswrapper[4881]: E1211 09:33:47.006298 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 09:34:00 crc kubenswrapper[4881]: I1211 09:34:00.005616 4881 scope.go:117] "RemoveContainer" containerID="1badfedc757c795081819123d86bb178d21e88e179f66cb3bbd3bb4b81192bc0" Dec 11 09:34:00 crc kubenswrapper[4881]: E1211 09:34:00.006401 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 09:34:13 crc kubenswrapper[4881]: I1211 09:34:13.018286 4881 scope.go:117] "RemoveContainer" containerID="1badfedc757c795081819123d86bb178d21e88e179f66cb3bbd3bb4b81192bc0" Dec 11 09:34:13 crc kubenswrapper[4881]: E1211 09:34:13.019143 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 09:34:24 crc kubenswrapper[4881]: I1211 09:34:24.006758 4881 scope.go:117] "RemoveContainer" containerID="1badfedc757c795081819123d86bb178d21e88e179f66cb3bbd3bb4b81192bc0" Dec 11 09:34:24 crc kubenswrapper[4881]: E1211 09:34:24.007606 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 09:34:36 crc kubenswrapper[4881]: I1211 09:34:36.005906 4881 scope.go:117] "RemoveContainer" containerID="1badfedc757c795081819123d86bb178d21e88e179f66cb3bbd3bb4b81192bc0" Dec 11 09:34:36 crc kubenswrapper[4881]: E1211 09:34:36.006966 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 09:34:39 crc kubenswrapper[4881]: I1211 09:34:39.692065 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-v9drk"] Dec 11 09:34:39 crc kubenswrapper[4881]: E1211 09:34:39.693180 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2f5819c5-f4df-4726-a435-7cdaf5a952bf" containerName="extract-utilities" Dec 11 09:34:39 crc kubenswrapper[4881]: I1211 09:34:39.693193 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="2f5819c5-f4df-4726-a435-7cdaf5a952bf" containerName="extract-utilities" Dec 11 09:34:39 crc kubenswrapper[4881]: E1211 09:34:39.693203 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b13d04f5-66c3-4f21-aaca-9a9426d9e059" containerName="registry-server" Dec 11 09:34:39 crc kubenswrapper[4881]: I1211 09:34:39.693209 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="b13d04f5-66c3-4f21-aaca-9a9426d9e059" containerName="registry-server" Dec 11 09:34:39 crc kubenswrapper[4881]: E1211 09:34:39.693224 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2f5819c5-f4df-4726-a435-7cdaf5a952bf" containerName="extract-content" Dec 11 09:34:39 crc kubenswrapper[4881]: I1211 09:34:39.693231 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="2f5819c5-f4df-4726-a435-7cdaf5a952bf" containerName="extract-content" Dec 11 09:34:39 crc kubenswrapper[4881]: E1211 09:34:39.693243 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b13d04f5-66c3-4f21-aaca-9a9426d9e059" containerName="extract-content" Dec 11 09:34:39 crc kubenswrapper[4881]: I1211 09:34:39.693249 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="b13d04f5-66c3-4f21-aaca-9a9426d9e059" containerName="extract-content" Dec 11 09:34:39 crc kubenswrapper[4881]: E1211 09:34:39.693268 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2f5819c5-f4df-4726-a435-7cdaf5a952bf" containerName="registry-server" Dec 11 09:34:39 crc kubenswrapper[4881]: I1211 09:34:39.693274 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="2f5819c5-f4df-4726-a435-7cdaf5a952bf" containerName="registry-server" Dec 11 09:34:39 crc kubenswrapper[4881]: E1211 09:34:39.693298 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b13d04f5-66c3-4f21-aaca-9a9426d9e059" containerName="extract-utilities" Dec 11 09:34:39 crc kubenswrapper[4881]: I1211 09:34:39.693306 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="b13d04f5-66c3-4f21-aaca-9a9426d9e059" containerName="extract-utilities" Dec 11 09:34:39 crc kubenswrapper[4881]: I1211 09:34:39.693569 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="2f5819c5-f4df-4726-a435-7cdaf5a952bf" containerName="registry-server" Dec 11 09:34:39 crc kubenswrapper[4881]: I1211 09:34:39.693596 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="b13d04f5-66c3-4f21-aaca-9a9426d9e059" containerName="registry-server" Dec 11 09:34:39 crc kubenswrapper[4881]: I1211 09:34:39.695286 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-v9drk" Dec 11 09:34:39 crc kubenswrapper[4881]: I1211 09:34:39.716138 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-v9drk"] Dec 11 09:34:39 crc kubenswrapper[4881]: I1211 09:34:39.792524 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/76c08cf9-c8c1-4863-ab0b-cf5c3519f6eb-utilities\") pod \"redhat-marketplace-v9drk\" (UID: \"76c08cf9-c8c1-4863-ab0b-cf5c3519f6eb\") " pod="openshift-marketplace/redhat-marketplace-v9drk" Dec 11 09:34:39 crc kubenswrapper[4881]: I1211 09:34:39.792900 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/76c08cf9-c8c1-4863-ab0b-cf5c3519f6eb-catalog-content\") pod \"redhat-marketplace-v9drk\" (UID: \"76c08cf9-c8c1-4863-ab0b-cf5c3519f6eb\") " pod="openshift-marketplace/redhat-marketplace-v9drk" Dec 11 09:34:39 crc kubenswrapper[4881]: I1211 09:34:39.793324 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4xlw4\" (UniqueName: \"kubernetes.io/projected/76c08cf9-c8c1-4863-ab0b-cf5c3519f6eb-kube-api-access-4xlw4\") pod \"redhat-marketplace-v9drk\" (UID: \"76c08cf9-c8c1-4863-ab0b-cf5c3519f6eb\") " pod="openshift-marketplace/redhat-marketplace-v9drk" Dec 11 09:34:39 crc kubenswrapper[4881]: I1211 09:34:39.895321 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4xlw4\" (UniqueName: \"kubernetes.io/projected/76c08cf9-c8c1-4863-ab0b-cf5c3519f6eb-kube-api-access-4xlw4\") pod \"redhat-marketplace-v9drk\" (UID: \"76c08cf9-c8c1-4863-ab0b-cf5c3519f6eb\") " pod="openshift-marketplace/redhat-marketplace-v9drk" Dec 11 09:34:39 crc kubenswrapper[4881]: I1211 09:34:39.895663 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/76c08cf9-c8c1-4863-ab0b-cf5c3519f6eb-utilities\") pod \"redhat-marketplace-v9drk\" (UID: \"76c08cf9-c8c1-4863-ab0b-cf5c3519f6eb\") " pod="openshift-marketplace/redhat-marketplace-v9drk" Dec 11 09:34:39 crc kubenswrapper[4881]: I1211 09:34:39.895808 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/76c08cf9-c8c1-4863-ab0b-cf5c3519f6eb-catalog-content\") pod \"redhat-marketplace-v9drk\" (UID: \"76c08cf9-c8c1-4863-ab0b-cf5c3519f6eb\") " pod="openshift-marketplace/redhat-marketplace-v9drk" Dec 11 09:34:39 crc kubenswrapper[4881]: I1211 09:34:39.896364 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/76c08cf9-c8c1-4863-ab0b-cf5c3519f6eb-catalog-content\") pod \"redhat-marketplace-v9drk\" (UID: \"76c08cf9-c8c1-4863-ab0b-cf5c3519f6eb\") " pod="openshift-marketplace/redhat-marketplace-v9drk" Dec 11 09:34:39 crc kubenswrapper[4881]: I1211 09:34:39.896583 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/76c08cf9-c8c1-4863-ab0b-cf5c3519f6eb-utilities\") pod \"redhat-marketplace-v9drk\" (UID: \"76c08cf9-c8c1-4863-ab0b-cf5c3519f6eb\") " pod="openshift-marketplace/redhat-marketplace-v9drk" Dec 11 09:34:39 crc kubenswrapper[4881]: I1211 09:34:39.930556 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4xlw4\" (UniqueName: \"kubernetes.io/projected/76c08cf9-c8c1-4863-ab0b-cf5c3519f6eb-kube-api-access-4xlw4\") pod \"redhat-marketplace-v9drk\" (UID: \"76c08cf9-c8c1-4863-ab0b-cf5c3519f6eb\") " pod="openshift-marketplace/redhat-marketplace-v9drk" Dec 11 09:34:40 crc kubenswrapper[4881]: I1211 09:34:40.029227 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-v9drk" Dec 11 09:34:40 crc kubenswrapper[4881]: I1211 09:34:40.860165 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-v9drk"] Dec 11 09:34:41 crc kubenswrapper[4881]: I1211 09:34:41.758041 4881 generic.go:334] "Generic (PLEG): container finished" podID="76c08cf9-c8c1-4863-ab0b-cf5c3519f6eb" containerID="b90a284767de77dea0b656a52e17796f6155748d82acc03f1d76a23ee64d8871" exitCode=0 Dec 11 09:34:41 crc kubenswrapper[4881]: I1211 09:34:41.758124 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-v9drk" event={"ID":"76c08cf9-c8c1-4863-ab0b-cf5c3519f6eb","Type":"ContainerDied","Data":"b90a284767de77dea0b656a52e17796f6155748d82acc03f1d76a23ee64d8871"} Dec 11 09:34:41 crc kubenswrapper[4881]: I1211 09:34:41.758372 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-v9drk" event={"ID":"76c08cf9-c8c1-4863-ab0b-cf5c3519f6eb","Type":"ContainerStarted","Data":"6f63798dd6828c35876fa46ea1009d82b81a1d6a203362480be051f664aa7099"} Dec 11 09:34:49 crc kubenswrapper[4881]: I1211 09:34:49.054976 4881 scope.go:117] "RemoveContainer" containerID="1badfedc757c795081819123d86bb178d21e88e179f66cb3bbd3bb4b81192bc0" Dec 11 09:34:49 crc kubenswrapper[4881]: E1211 09:34:49.055776 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 09:34:49 crc kubenswrapper[4881]: I1211 09:34:49.845910 4881 generic.go:334] "Generic (PLEG): container finished" podID="76c08cf9-c8c1-4863-ab0b-cf5c3519f6eb" containerID="2410de14257289fff35c20ec1709d2d2a1e118006cc917729b88e03eeefe35a8" exitCode=0 Dec 11 09:34:49 crc kubenswrapper[4881]: I1211 09:34:49.846010 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-v9drk" event={"ID":"76c08cf9-c8c1-4863-ab0b-cf5c3519f6eb","Type":"ContainerDied","Data":"2410de14257289fff35c20ec1709d2d2a1e118006cc917729b88e03eeefe35a8"} Dec 11 09:34:51 crc kubenswrapper[4881]: I1211 09:34:51.872190 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-v9drk" event={"ID":"76c08cf9-c8c1-4863-ab0b-cf5c3519f6eb","Type":"ContainerStarted","Data":"0a5bcb747938aa9b4c32e397becbb88eef6893cccf99a949c27c99833acf5161"} Dec 11 09:34:51 crc kubenswrapper[4881]: I1211 09:34:51.896262 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-v9drk" podStartSLOduration=3.9495155730000002 podStartE2EDuration="12.896237702s" podCreationTimestamp="2025-12-11 09:34:39 +0000 UTC" firstStartedPulling="2025-12-11 09:34:41.760171929 +0000 UTC m=+4730.137540626" lastFinishedPulling="2025-12-11 09:34:50.706894048 +0000 UTC m=+4739.084262755" observedRunningTime="2025-12-11 09:34:51.891110205 +0000 UTC m=+4740.268478922" watchObservedRunningTime="2025-12-11 09:34:51.896237702 +0000 UTC m=+4740.273606399" Dec 11 09:35:00 crc kubenswrapper[4881]: I1211 09:35:00.005535 4881 scope.go:117] "RemoveContainer" containerID="1badfedc757c795081819123d86bb178d21e88e179f66cb3bbd3bb4b81192bc0" Dec 11 09:35:00 crc kubenswrapper[4881]: E1211 09:35:00.007253 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 09:35:00 crc kubenswrapper[4881]: I1211 09:35:00.029942 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-v9drk" Dec 11 09:35:00 crc kubenswrapper[4881]: I1211 09:35:00.030017 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-v9drk" Dec 11 09:35:00 crc kubenswrapper[4881]: I1211 09:35:00.086924 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-v9drk" Dec 11 09:35:01 crc kubenswrapper[4881]: I1211 09:35:01.105062 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-v9drk" Dec 11 09:35:01 crc kubenswrapper[4881]: I1211 09:35:01.167342 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-v9drk"] Dec 11 09:35:03 crc kubenswrapper[4881]: I1211 09:35:03.029785 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-v9drk" podUID="76c08cf9-c8c1-4863-ab0b-cf5c3519f6eb" containerName="registry-server" containerID="cri-o://0a5bcb747938aa9b4c32e397becbb88eef6893cccf99a949c27c99833acf5161" gracePeriod=2 Dec 11 09:35:04 crc kubenswrapper[4881]: I1211 09:35:04.044008 4881 generic.go:334] "Generic (PLEG): container finished" podID="76c08cf9-c8c1-4863-ab0b-cf5c3519f6eb" containerID="0a5bcb747938aa9b4c32e397becbb88eef6893cccf99a949c27c99833acf5161" exitCode=0 Dec 11 09:35:04 crc kubenswrapper[4881]: I1211 09:35:04.044336 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-v9drk" event={"ID":"76c08cf9-c8c1-4863-ab0b-cf5c3519f6eb","Type":"ContainerDied","Data":"0a5bcb747938aa9b4c32e397becbb88eef6893cccf99a949c27c99833acf5161"} Dec 11 09:35:04 crc kubenswrapper[4881]: I1211 09:35:04.320871 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-v9drk" Dec 11 09:35:04 crc kubenswrapper[4881]: I1211 09:35:04.384555 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/76c08cf9-c8c1-4863-ab0b-cf5c3519f6eb-utilities\") pod \"76c08cf9-c8c1-4863-ab0b-cf5c3519f6eb\" (UID: \"76c08cf9-c8c1-4863-ab0b-cf5c3519f6eb\") " Dec 11 09:35:04 crc kubenswrapper[4881]: I1211 09:35:04.384633 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/76c08cf9-c8c1-4863-ab0b-cf5c3519f6eb-catalog-content\") pod \"76c08cf9-c8c1-4863-ab0b-cf5c3519f6eb\" (UID: \"76c08cf9-c8c1-4863-ab0b-cf5c3519f6eb\") " Dec 11 09:35:04 crc kubenswrapper[4881]: I1211 09:35:04.384790 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4xlw4\" (UniqueName: \"kubernetes.io/projected/76c08cf9-c8c1-4863-ab0b-cf5c3519f6eb-kube-api-access-4xlw4\") pod \"76c08cf9-c8c1-4863-ab0b-cf5c3519f6eb\" (UID: \"76c08cf9-c8c1-4863-ab0b-cf5c3519f6eb\") " Dec 11 09:35:04 crc kubenswrapper[4881]: I1211 09:35:04.386242 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/76c08cf9-c8c1-4863-ab0b-cf5c3519f6eb-utilities" (OuterVolumeSpecName: "utilities") pod "76c08cf9-c8c1-4863-ab0b-cf5c3519f6eb" (UID: "76c08cf9-c8c1-4863-ab0b-cf5c3519f6eb"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 09:35:04 crc kubenswrapper[4881]: I1211 09:35:04.391468 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/76c08cf9-c8c1-4863-ab0b-cf5c3519f6eb-kube-api-access-4xlw4" (OuterVolumeSpecName: "kube-api-access-4xlw4") pod "76c08cf9-c8c1-4863-ab0b-cf5c3519f6eb" (UID: "76c08cf9-c8c1-4863-ab0b-cf5c3519f6eb"). InnerVolumeSpecName "kube-api-access-4xlw4". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 09:35:04 crc kubenswrapper[4881]: I1211 09:35:04.415156 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/76c08cf9-c8c1-4863-ab0b-cf5c3519f6eb-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "76c08cf9-c8c1-4863-ab0b-cf5c3519f6eb" (UID: "76c08cf9-c8c1-4863-ab0b-cf5c3519f6eb"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 09:35:04 crc kubenswrapper[4881]: I1211 09:35:04.488624 4881 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/76c08cf9-c8c1-4863-ab0b-cf5c3519f6eb-utilities\") on node \"crc\" DevicePath \"\"" Dec 11 09:35:04 crc kubenswrapper[4881]: I1211 09:35:04.488674 4881 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/76c08cf9-c8c1-4863-ab0b-cf5c3519f6eb-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 11 09:35:04 crc kubenswrapper[4881]: I1211 09:35:04.488689 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4xlw4\" (UniqueName: \"kubernetes.io/projected/76c08cf9-c8c1-4863-ab0b-cf5c3519f6eb-kube-api-access-4xlw4\") on node \"crc\" DevicePath \"\"" Dec 11 09:35:05 crc kubenswrapper[4881]: I1211 09:35:05.055684 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-v9drk" event={"ID":"76c08cf9-c8c1-4863-ab0b-cf5c3519f6eb","Type":"ContainerDied","Data":"6f63798dd6828c35876fa46ea1009d82b81a1d6a203362480be051f664aa7099"} Dec 11 09:35:05 crc kubenswrapper[4881]: I1211 09:35:05.055752 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-v9drk" Dec 11 09:35:05 crc kubenswrapper[4881]: I1211 09:35:05.056021 4881 scope.go:117] "RemoveContainer" containerID="0a5bcb747938aa9b4c32e397becbb88eef6893cccf99a949c27c99833acf5161" Dec 11 09:35:05 crc kubenswrapper[4881]: I1211 09:35:05.084843 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-v9drk"] Dec 11 09:35:05 crc kubenswrapper[4881]: I1211 09:35:05.088925 4881 scope.go:117] "RemoveContainer" containerID="2410de14257289fff35c20ec1709d2d2a1e118006cc917729b88e03eeefe35a8" Dec 11 09:35:05 crc kubenswrapper[4881]: I1211 09:35:05.097377 4881 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-v9drk"] Dec 11 09:35:05 crc kubenswrapper[4881]: I1211 09:35:05.141747 4881 scope.go:117] "RemoveContainer" containerID="b90a284767de77dea0b656a52e17796f6155748d82acc03f1d76a23ee64d8871" Dec 11 09:35:07 crc kubenswrapper[4881]: I1211 09:35:07.021135 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="76c08cf9-c8c1-4863-ab0b-cf5c3519f6eb" path="/var/lib/kubelet/pods/76c08cf9-c8c1-4863-ab0b-cf5c3519f6eb/volumes" Dec 11 09:35:12 crc kubenswrapper[4881]: I1211 09:35:12.005512 4881 scope.go:117] "RemoveContainer" containerID="1badfedc757c795081819123d86bb178d21e88e179f66cb3bbd3bb4b81192bc0" Dec 11 09:35:12 crc kubenswrapper[4881]: E1211 09:35:12.006377 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 09:35:26 crc kubenswrapper[4881]: I1211 09:35:26.006962 4881 scope.go:117] "RemoveContainer" containerID="1badfedc757c795081819123d86bb178d21e88e179f66cb3bbd3bb4b81192bc0" Dec 11 09:35:26 crc kubenswrapper[4881]: E1211 09:35:26.008066 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 09:35:35 crc kubenswrapper[4881]: I1211 09:35:35.141729 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-754v9"] Dec 11 09:35:35 crc kubenswrapper[4881]: E1211 09:35:35.142891 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="76c08cf9-c8c1-4863-ab0b-cf5c3519f6eb" containerName="registry-server" Dec 11 09:35:35 crc kubenswrapper[4881]: I1211 09:35:35.142910 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="76c08cf9-c8c1-4863-ab0b-cf5c3519f6eb" containerName="registry-server" Dec 11 09:35:35 crc kubenswrapper[4881]: E1211 09:35:35.142971 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="76c08cf9-c8c1-4863-ab0b-cf5c3519f6eb" containerName="extract-content" Dec 11 09:35:35 crc kubenswrapper[4881]: I1211 09:35:35.142980 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="76c08cf9-c8c1-4863-ab0b-cf5c3519f6eb" containerName="extract-content" Dec 11 09:35:35 crc kubenswrapper[4881]: E1211 09:35:35.143020 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="76c08cf9-c8c1-4863-ab0b-cf5c3519f6eb" containerName="extract-utilities" Dec 11 09:35:35 crc kubenswrapper[4881]: I1211 09:35:35.143028 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="76c08cf9-c8c1-4863-ab0b-cf5c3519f6eb" containerName="extract-utilities" Dec 11 09:35:35 crc kubenswrapper[4881]: I1211 09:35:35.143391 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="76c08cf9-c8c1-4863-ab0b-cf5c3519f6eb" containerName="registry-server" Dec 11 09:35:35 crc kubenswrapper[4881]: I1211 09:35:35.145690 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-754v9" Dec 11 09:35:35 crc kubenswrapper[4881]: I1211 09:35:35.161318 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-754v9"] Dec 11 09:35:35 crc kubenswrapper[4881]: I1211 09:35:35.297214 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/61648b74-25cf-409a-939b-4d69483bd1a3-catalog-content\") pod \"certified-operators-754v9\" (UID: \"61648b74-25cf-409a-939b-4d69483bd1a3\") " pod="openshift-marketplace/certified-operators-754v9" Dec 11 09:35:35 crc kubenswrapper[4881]: I1211 09:35:35.297379 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/61648b74-25cf-409a-939b-4d69483bd1a3-utilities\") pod \"certified-operators-754v9\" (UID: \"61648b74-25cf-409a-939b-4d69483bd1a3\") " pod="openshift-marketplace/certified-operators-754v9" Dec 11 09:35:35 crc kubenswrapper[4881]: I1211 09:35:35.297481 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cmsn8\" (UniqueName: \"kubernetes.io/projected/61648b74-25cf-409a-939b-4d69483bd1a3-kube-api-access-cmsn8\") pod \"certified-operators-754v9\" (UID: \"61648b74-25cf-409a-939b-4d69483bd1a3\") " pod="openshift-marketplace/certified-operators-754v9" Dec 11 09:35:35 crc kubenswrapper[4881]: I1211 09:35:35.399559 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cmsn8\" (UniqueName: \"kubernetes.io/projected/61648b74-25cf-409a-939b-4d69483bd1a3-kube-api-access-cmsn8\") pod \"certified-operators-754v9\" (UID: \"61648b74-25cf-409a-939b-4d69483bd1a3\") " pod="openshift-marketplace/certified-operators-754v9" Dec 11 09:35:35 crc kubenswrapper[4881]: I1211 09:35:35.400034 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/61648b74-25cf-409a-939b-4d69483bd1a3-catalog-content\") pod \"certified-operators-754v9\" (UID: \"61648b74-25cf-409a-939b-4d69483bd1a3\") " pod="openshift-marketplace/certified-operators-754v9" Dec 11 09:35:35 crc kubenswrapper[4881]: I1211 09:35:35.400241 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/61648b74-25cf-409a-939b-4d69483bd1a3-utilities\") pod \"certified-operators-754v9\" (UID: \"61648b74-25cf-409a-939b-4d69483bd1a3\") " pod="openshift-marketplace/certified-operators-754v9" Dec 11 09:35:35 crc kubenswrapper[4881]: I1211 09:35:35.400712 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/61648b74-25cf-409a-939b-4d69483bd1a3-catalog-content\") pod \"certified-operators-754v9\" (UID: \"61648b74-25cf-409a-939b-4d69483bd1a3\") " pod="openshift-marketplace/certified-operators-754v9" Dec 11 09:35:35 crc kubenswrapper[4881]: I1211 09:35:35.401075 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/61648b74-25cf-409a-939b-4d69483bd1a3-utilities\") pod \"certified-operators-754v9\" (UID: \"61648b74-25cf-409a-939b-4d69483bd1a3\") " pod="openshift-marketplace/certified-operators-754v9" Dec 11 09:35:35 crc kubenswrapper[4881]: I1211 09:35:35.422423 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cmsn8\" (UniqueName: \"kubernetes.io/projected/61648b74-25cf-409a-939b-4d69483bd1a3-kube-api-access-cmsn8\") pod \"certified-operators-754v9\" (UID: \"61648b74-25cf-409a-939b-4d69483bd1a3\") " pod="openshift-marketplace/certified-operators-754v9" Dec 11 09:35:35 crc kubenswrapper[4881]: I1211 09:35:35.468211 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-754v9" Dec 11 09:35:36 crc kubenswrapper[4881]: I1211 09:35:36.058176 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-754v9"] Dec 11 09:35:36 crc kubenswrapper[4881]: I1211 09:35:36.419399 4881 generic.go:334] "Generic (PLEG): container finished" podID="61648b74-25cf-409a-939b-4d69483bd1a3" containerID="2d4474ccb667abdbc7e57d0980bf0e9c5003b79e16293f724875b427f1ec0f57" exitCode=0 Dec 11 09:35:36 crc kubenswrapper[4881]: I1211 09:35:36.419444 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-754v9" event={"ID":"61648b74-25cf-409a-939b-4d69483bd1a3","Type":"ContainerDied","Data":"2d4474ccb667abdbc7e57d0980bf0e9c5003b79e16293f724875b427f1ec0f57"} Dec 11 09:35:36 crc kubenswrapper[4881]: I1211 09:35:36.419758 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-754v9" event={"ID":"61648b74-25cf-409a-939b-4d69483bd1a3","Type":"ContainerStarted","Data":"ca0486f61110208221ca25dd3d50fa66df39e8ee53b69cab43390719547be156"} Dec 11 09:35:37 crc kubenswrapper[4881]: I1211 09:35:37.432571 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-754v9" event={"ID":"61648b74-25cf-409a-939b-4d69483bd1a3","Type":"ContainerStarted","Data":"7c97c298edc3a4b5703c53d08a6dbd508490a63e86b21811b52802fe09061819"} Dec 11 09:35:39 crc kubenswrapper[4881]: I1211 09:35:39.455422 4881 generic.go:334] "Generic (PLEG): container finished" podID="61648b74-25cf-409a-939b-4d69483bd1a3" containerID="7c97c298edc3a4b5703c53d08a6dbd508490a63e86b21811b52802fe09061819" exitCode=0 Dec 11 09:35:39 crc kubenswrapper[4881]: I1211 09:35:39.455490 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-754v9" event={"ID":"61648b74-25cf-409a-939b-4d69483bd1a3","Type":"ContainerDied","Data":"7c97c298edc3a4b5703c53d08a6dbd508490a63e86b21811b52802fe09061819"} Dec 11 09:35:40 crc kubenswrapper[4881]: I1211 09:35:40.006602 4881 scope.go:117] "RemoveContainer" containerID="1badfedc757c795081819123d86bb178d21e88e179f66cb3bbd3bb4b81192bc0" Dec 11 09:35:42 crc kubenswrapper[4881]: I1211 09:35:42.488870 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" event={"ID":"56d69133-af36-4cbd-af7d-3a58cc4dd8ca","Type":"ContainerStarted","Data":"337643c0cd1de28485c17857df37f2bfd89fb1fbf082effe9ba1f659e31f1340"} Dec 11 09:35:42 crc kubenswrapper[4881]: I1211 09:35:42.494564 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-754v9" event={"ID":"61648b74-25cf-409a-939b-4d69483bd1a3","Type":"ContainerStarted","Data":"5b6ded7e862224fcff416ecf130799de17cf0ccb0fd120116b07bda743e7ef5a"} Dec 11 09:35:42 crc kubenswrapper[4881]: I1211 09:35:42.555218 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-754v9" podStartSLOduration=4.053608556 podStartE2EDuration="7.55519462s" podCreationTimestamp="2025-12-11 09:35:35 +0000 UTC" firstStartedPulling="2025-12-11 09:35:36.422025302 +0000 UTC m=+4784.799393999" lastFinishedPulling="2025-12-11 09:35:39.923611356 +0000 UTC m=+4788.300980063" observedRunningTime="2025-12-11 09:35:42.538633969 +0000 UTC m=+4790.916002666" watchObservedRunningTime="2025-12-11 09:35:42.55519462 +0000 UTC m=+4790.932563317" Dec 11 09:35:45 crc kubenswrapper[4881]: I1211 09:35:45.474763 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-754v9" Dec 11 09:35:45 crc kubenswrapper[4881]: I1211 09:35:45.475393 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-754v9" Dec 11 09:35:45 crc kubenswrapper[4881]: I1211 09:35:45.527803 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-754v9" Dec 11 09:35:55 crc kubenswrapper[4881]: I1211 09:35:55.525139 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-754v9" Dec 11 09:35:55 crc kubenswrapper[4881]: I1211 09:35:55.572720 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-754v9"] Dec 11 09:35:55 crc kubenswrapper[4881]: I1211 09:35:55.654240 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-754v9" podUID="61648b74-25cf-409a-939b-4d69483bd1a3" containerName="registry-server" containerID="cri-o://5b6ded7e862224fcff416ecf130799de17cf0ccb0fd120116b07bda743e7ef5a" gracePeriod=2 Dec 11 09:35:56 crc kubenswrapper[4881]: I1211 09:35:56.171020 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-754v9" Dec 11 09:35:56 crc kubenswrapper[4881]: I1211 09:35:56.241069 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cmsn8\" (UniqueName: \"kubernetes.io/projected/61648b74-25cf-409a-939b-4d69483bd1a3-kube-api-access-cmsn8\") pod \"61648b74-25cf-409a-939b-4d69483bd1a3\" (UID: \"61648b74-25cf-409a-939b-4d69483bd1a3\") " Dec 11 09:35:56 crc kubenswrapper[4881]: I1211 09:35:56.241394 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/61648b74-25cf-409a-939b-4d69483bd1a3-utilities\") pod \"61648b74-25cf-409a-939b-4d69483bd1a3\" (UID: \"61648b74-25cf-409a-939b-4d69483bd1a3\") " Dec 11 09:35:56 crc kubenswrapper[4881]: I1211 09:35:56.241489 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/61648b74-25cf-409a-939b-4d69483bd1a3-catalog-content\") pod \"61648b74-25cf-409a-939b-4d69483bd1a3\" (UID: \"61648b74-25cf-409a-939b-4d69483bd1a3\") " Dec 11 09:35:56 crc kubenswrapper[4881]: I1211 09:35:56.242738 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/61648b74-25cf-409a-939b-4d69483bd1a3-utilities" (OuterVolumeSpecName: "utilities") pod "61648b74-25cf-409a-939b-4d69483bd1a3" (UID: "61648b74-25cf-409a-939b-4d69483bd1a3"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 09:35:56 crc kubenswrapper[4881]: I1211 09:35:56.251320 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/61648b74-25cf-409a-939b-4d69483bd1a3-kube-api-access-cmsn8" (OuterVolumeSpecName: "kube-api-access-cmsn8") pod "61648b74-25cf-409a-939b-4d69483bd1a3" (UID: "61648b74-25cf-409a-939b-4d69483bd1a3"). InnerVolumeSpecName "kube-api-access-cmsn8". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 09:35:56 crc kubenswrapper[4881]: I1211 09:35:56.294589 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/61648b74-25cf-409a-939b-4d69483bd1a3-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "61648b74-25cf-409a-939b-4d69483bd1a3" (UID: "61648b74-25cf-409a-939b-4d69483bd1a3"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 09:35:56 crc kubenswrapper[4881]: I1211 09:35:56.344570 4881 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/61648b74-25cf-409a-939b-4d69483bd1a3-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 11 09:35:56 crc kubenswrapper[4881]: I1211 09:35:56.344616 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cmsn8\" (UniqueName: \"kubernetes.io/projected/61648b74-25cf-409a-939b-4d69483bd1a3-kube-api-access-cmsn8\") on node \"crc\" DevicePath \"\"" Dec 11 09:35:56 crc kubenswrapper[4881]: I1211 09:35:56.344629 4881 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/61648b74-25cf-409a-939b-4d69483bd1a3-utilities\") on node \"crc\" DevicePath \"\"" Dec 11 09:35:56 crc kubenswrapper[4881]: I1211 09:35:56.669670 4881 generic.go:334] "Generic (PLEG): container finished" podID="61648b74-25cf-409a-939b-4d69483bd1a3" containerID="5b6ded7e862224fcff416ecf130799de17cf0ccb0fd120116b07bda743e7ef5a" exitCode=0 Dec 11 09:35:56 crc kubenswrapper[4881]: I1211 09:35:56.669726 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-754v9" Dec 11 09:35:56 crc kubenswrapper[4881]: I1211 09:35:56.669747 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-754v9" event={"ID":"61648b74-25cf-409a-939b-4d69483bd1a3","Type":"ContainerDied","Data":"5b6ded7e862224fcff416ecf130799de17cf0ccb0fd120116b07bda743e7ef5a"} Dec 11 09:35:56 crc kubenswrapper[4881]: I1211 09:35:56.670462 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-754v9" event={"ID":"61648b74-25cf-409a-939b-4d69483bd1a3","Type":"ContainerDied","Data":"ca0486f61110208221ca25dd3d50fa66df39e8ee53b69cab43390719547be156"} Dec 11 09:35:56 crc kubenswrapper[4881]: I1211 09:35:56.670514 4881 scope.go:117] "RemoveContainer" containerID="5b6ded7e862224fcff416ecf130799de17cf0ccb0fd120116b07bda743e7ef5a" Dec 11 09:35:56 crc kubenswrapper[4881]: I1211 09:35:56.700207 4881 scope.go:117] "RemoveContainer" containerID="7c97c298edc3a4b5703c53d08a6dbd508490a63e86b21811b52802fe09061819" Dec 11 09:35:56 crc kubenswrapper[4881]: I1211 09:35:56.706051 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-754v9"] Dec 11 09:35:56 crc kubenswrapper[4881]: I1211 09:35:56.716508 4881 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-754v9"] Dec 11 09:35:56 crc kubenswrapper[4881]: I1211 09:35:56.748358 4881 scope.go:117] "RemoveContainer" containerID="2d4474ccb667abdbc7e57d0980bf0e9c5003b79e16293f724875b427f1ec0f57" Dec 11 09:35:56 crc kubenswrapper[4881]: I1211 09:35:56.777515 4881 scope.go:117] "RemoveContainer" containerID="5b6ded7e862224fcff416ecf130799de17cf0ccb0fd120116b07bda743e7ef5a" Dec 11 09:35:56 crc kubenswrapper[4881]: E1211 09:35:56.778049 4881 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5b6ded7e862224fcff416ecf130799de17cf0ccb0fd120116b07bda743e7ef5a\": container with ID starting with 5b6ded7e862224fcff416ecf130799de17cf0ccb0fd120116b07bda743e7ef5a not found: ID does not exist" containerID="5b6ded7e862224fcff416ecf130799de17cf0ccb0fd120116b07bda743e7ef5a" Dec 11 09:35:56 crc kubenswrapper[4881]: I1211 09:35:56.778102 4881 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5b6ded7e862224fcff416ecf130799de17cf0ccb0fd120116b07bda743e7ef5a"} err="failed to get container status \"5b6ded7e862224fcff416ecf130799de17cf0ccb0fd120116b07bda743e7ef5a\": rpc error: code = NotFound desc = could not find container \"5b6ded7e862224fcff416ecf130799de17cf0ccb0fd120116b07bda743e7ef5a\": container with ID starting with 5b6ded7e862224fcff416ecf130799de17cf0ccb0fd120116b07bda743e7ef5a not found: ID does not exist" Dec 11 09:35:56 crc kubenswrapper[4881]: I1211 09:35:56.778137 4881 scope.go:117] "RemoveContainer" containerID="7c97c298edc3a4b5703c53d08a6dbd508490a63e86b21811b52802fe09061819" Dec 11 09:35:56 crc kubenswrapper[4881]: E1211 09:35:56.778571 4881 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7c97c298edc3a4b5703c53d08a6dbd508490a63e86b21811b52802fe09061819\": container with ID starting with 7c97c298edc3a4b5703c53d08a6dbd508490a63e86b21811b52802fe09061819 not found: ID does not exist" containerID="7c97c298edc3a4b5703c53d08a6dbd508490a63e86b21811b52802fe09061819" Dec 11 09:35:56 crc kubenswrapper[4881]: I1211 09:35:56.778599 4881 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7c97c298edc3a4b5703c53d08a6dbd508490a63e86b21811b52802fe09061819"} err="failed to get container status \"7c97c298edc3a4b5703c53d08a6dbd508490a63e86b21811b52802fe09061819\": rpc error: code = NotFound desc = could not find container \"7c97c298edc3a4b5703c53d08a6dbd508490a63e86b21811b52802fe09061819\": container with ID starting with 7c97c298edc3a4b5703c53d08a6dbd508490a63e86b21811b52802fe09061819 not found: ID does not exist" Dec 11 09:35:56 crc kubenswrapper[4881]: I1211 09:35:56.778617 4881 scope.go:117] "RemoveContainer" containerID="2d4474ccb667abdbc7e57d0980bf0e9c5003b79e16293f724875b427f1ec0f57" Dec 11 09:35:56 crc kubenswrapper[4881]: E1211 09:35:56.778991 4881 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2d4474ccb667abdbc7e57d0980bf0e9c5003b79e16293f724875b427f1ec0f57\": container with ID starting with 2d4474ccb667abdbc7e57d0980bf0e9c5003b79e16293f724875b427f1ec0f57 not found: ID does not exist" containerID="2d4474ccb667abdbc7e57d0980bf0e9c5003b79e16293f724875b427f1ec0f57" Dec 11 09:35:56 crc kubenswrapper[4881]: I1211 09:35:56.779056 4881 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2d4474ccb667abdbc7e57d0980bf0e9c5003b79e16293f724875b427f1ec0f57"} err="failed to get container status \"2d4474ccb667abdbc7e57d0980bf0e9c5003b79e16293f724875b427f1ec0f57\": rpc error: code = NotFound desc = could not find container \"2d4474ccb667abdbc7e57d0980bf0e9c5003b79e16293f724875b427f1ec0f57\": container with ID starting with 2d4474ccb667abdbc7e57d0980bf0e9c5003b79e16293f724875b427f1ec0f57 not found: ID does not exist" Dec 11 09:35:57 crc kubenswrapper[4881]: I1211 09:35:57.019396 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="61648b74-25cf-409a-939b-4d69483bd1a3" path="/var/lib/kubelet/pods/61648b74-25cf-409a-939b-4d69483bd1a3/volumes" Dec 11 09:37:52 crc kubenswrapper[4881]: I1211 09:37:52.063704 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/tempest-tests-tempest"] Dec 11 09:37:52 crc kubenswrapper[4881]: E1211 09:37:52.065197 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="61648b74-25cf-409a-939b-4d69483bd1a3" containerName="extract-utilities" Dec 11 09:37:52 crc kubenswrapper[4881]: I1211 09:37:52.065215 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="61648b74-25cf-409a-939b-4d69483bd1a3" containerName="extract-utilities" Dec 11 09:37:52 crc kubenswrapper[4881]: E1211 09:37:52.065250 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="61648b74-25cf-409a-939b-4d69483bd1a3" containerName="extract-content" Dec 11 09:37:52 crc kubenswrapper[4881]: I1211 09:37:52.065278 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="61648b74-25cf-409a-939b-4d69483bd1a3" containerName="extract-content" Dec 11 09:37:52 crc kubenswrapper[4881]: E1211 09:37:52.065324 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="61648b74-25cf-409a-939b-4d69483bd1a3" containerName="registry-server" Dec 11 09:37:52 crc kubenswrapper[4881]: I1211 09:37:52.065356 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="61648b74-25cf-409a-939b-4d69483bd1a3" containerName="registry-server" Dec 11 09:37:52 crc kubenswrapper[4881]: I1211 09:37:52.065727 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="61648b74-25cf-409a-939b-4d69483bd1a3" containerName="registry-server" Dec 11 09:37:52 crc kubenswrapper[4881]: I1211 09:37:52.066891 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/tempest-tests-tempest" Dec 11 09:37:52 crc kubenswrapper[4881]: I1211 09:37:52.070895 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"tempest-tests-tempest-env-vars-s0" Dec 11 09:37:52 crc kubenswrapper[4881]: I1211 09:37:52.071074 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"test-operator-controller-priv-key" Dec 11 09:37:52 crc kubenswrapper[4881]: I1211 09:37:52.072122 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"default-dockercfg-2j44r" Dec 11 09:37:52 crc kubenswrapper[4881]: I1211 09:37:52.074556 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"tempest-tests-tempest-custom-data-s0" Dec 11 09:37:52 crc kubenswrapper[4881]: I1211 09:37:52.085754 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/tempest-tests-tempest"] Dec 11 09:37:52 crc kubenswrapper[4881]: I1211 09:37:52.170888 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/44483fe0-748e-4e0e-9591-f5c14c4cd3f8-test-operator-ephemeral-workdir\") pod \"tempest-tests-tempest\" (UID: \"44483fe0-748e-4e0e-9591-f5c14c4cd3f8\") " pod="openstack/tempest-tests-tempest" Dec 11 09:37:52 crc kubenswrapper[4881]: I1211 09:37:52.170938 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/44483fe0-748e-4e0e-9591-f5c14c4cd3f8-ca-certs\") pod \"tempest-tests-tempest\" (UID: \"44483fe0-748e-4e0e-9591-f5c14c4cd3f8\") " pod="openstack/tempest-tests-tempest" Dec 11 09:37:52 crc kubenswrapper[4881]: I1211 09:37:52.170988 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/44483fe0-748e-4e0e-9591-f5c14c4cd3f8-openstack-config-secret\") pod \"tempest-tests-tempest\" (UID: \"44483fe0-748e-4e0e-9591-f5c14c4cd3f8\") " pod="openstack/tempest-tests-tempest" Dec 11 09:37:52 crc kubenswrapper[4881]: I1211 09:37:52.171203 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vwstd\" (UniqueName: \"kubernetes.io/projected/44483fe0-748e-4e0e-9591-f5c14c4cd3f8-kube-api-access-vwstd\") pod \"tempest-tests-tempest\" (UID: \"44483fe0-748e-4e0e-9591-f5c14c4cd3f8\") " pod="openstack/tempest-tests-tempest" Dec 11 09:37:52 crc kubenswrapper[4881]: I1211 09:37:52.171223 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/44483fe0-748e-4e0e-9591-f5c14c4cd3f8-ssh-key\") pod \"tempest-tests-tempest\" (UID: \"44483fe0-748e-4e0e-9591-f5c14c4cd3f8\") " pod="openstack/tempest-tests-tempest" Dec 11 09:37:52 crc kubenswrapper[4881]: I1211 09:37:52.171251 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/44483fe0-748e-4e0e-9591-f5c14c4cd3f8-openstack-config\") pod \"tempest-tests-tempest\" (UID: \"44483fe0-748e-4e0e-9591-f5c14c4cd3f8\") " pod="openstack/tempest-tests-tempest" Dec 11 09:37:52 crc kubenswrapper[4881]: I1211 09:37:52.171271 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/44483fe0-748e-4e0e-9591-f5c14c4cd3f8-test-operator-ephemeral-temporary\") pod \"tempest-tests-tempest\" (UID: \"44483fe0-748e-4e0e-9591-f5c14c4cd3f8\") " pod="openstack/tempest-tests-tempest" Dec 11 09:37:52 crc kubenswrapper[4881]: I1211 09:37:52.171321 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"tempest-tests-tempest\" (UID: \"44483fe0-748e-4e0e-9591-f5c14c4cd3f8\") " pod="openstack/tempest-tests-tempest" Dec 11 09:37:52 crc kubenswrapper[4881]: I1211 09:37:52.171586 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/44483fe0-748e-4e0e-9591-f5c14c4cd3f8-config-data\") pod \"tempest-tests-tempest\" (UID: \"44483fe0-748e-4e0e-9591-f5c14c4cd3f8\") " pod="openstack/tempest-tests-tempest" Dec 11 09:37:52 crc kubenswrapper[4881]: I1211 09:37:52.274022 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/44483fe0-748e-4e0e-9591-f5c14c4cd3f8-config-data\") pod \"tempest-tests-tempest\" (UID: \"44483fe0-748e-4e0e-9591-f5c14c4cd3f8\") " pod="openstack/tempest-tests-tempest" Dec 11 09:37:52 crc kubenswrapper[4881]: I1211 09:37:52.274106 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/44483fe0-748e-4e0e-9591-f5c14c4cd3f8-test-operator-ephemeral-workdir\") pod \"tempest-tests-tempest\" (UID: \"44483fe0-748e-4e0e-9591-f5c14c4cd3f8\") " pod="openstack/tempest-tests-tempest" Dec 11 09:37:52 crc kubenswrapper[4881]: I1211 09:37:52.274127 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/44483fe0-748e-4e0e-9591-f5c14c4cd3f8-ca-certs\") pod \"tempest-tests-tempest\" (UID: \"44483fe0-748e-4e0e-9591-f5c14c4cd3f8\") " pod="openstack/tempest-tests-tempest" Dec 11 09:37:52 crc kubenswrapper[4881]: I1211 09:37:52.274154 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/44483fe0-748e-4e0e-9591-f5c14c4cd3f8-openstack-config-secret\") pod \"tempest-tests-tempest\" (UID: \"44483fe0-748e-4e0e-9591-f5c14c4cd3f8\") " pod="openstack/tempest-tests-tempest" Dec 11 09:37:52 crc kubenswrapper[4881]: I1211 09:37:52.274392 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vwstd\" (UniqueName: \"kubernetes.io/projected/44483fe0-748e-4e0e-9591-f5c14c4cd3f8-kube-api-access-vwstd\") pod \"tempest-tests-tempest\" (UID: \"44483fe0-748e-4e0e-9591-f5c14c4cd3f8\") " pod="openstack/tempest-tests-tempest" Dec 11 09:37:52 crc kubenswrapper[4881]: I1211 09:37:52.274420 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/44483fe0-748e-4e0e-9591-f5c14c4cd3f8-ssh-key\") pod \"tempest-tests-tempest\" (UID: \"44483fe0-748e-4e0e-9591-f5c14c4cd3f8\") " pod="openstack/tempest-tests-tempest" Dec 11 09:37:52 crc kubenswrapper[4881]: I1211 09:37:52.274460 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/44483fe0-748e-4e0e-9591-f5c14c4cd3f8-openstack-config\") pod \"tempest-tests-tempest\" (UID: \"44483fe0-748e-4e0e-9591-f5c14c4cd3f8\") " pod="openstack/tempest-tests-tempest" Dec 11 09:37:52 crc kubenswrapper[4881]: I1211 09:37:52.274489 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/44483fe0-748e-4e0e-9591-f5c14c4cd3f8-test-operator-ephemeral-temporary\") pod \"tempest-tests-tempest\" (UID: \"44483fe0-748e-4e0e-9591-f5c14c4cd3f8\") " pod="openstack/tempest-tests-tempest" Dec 11 09:37:52 crc kubenswrapper[4881]: I1211 09:37:52.274524 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"tempest-tests-tempest\" (UID: \"44483fe0-748e-4e0e-9591-f5c14c4cd3f8\") " pod="openstack/tempest-tests-tempest" Dec 11 09:37:52 crc kubenswrapper[4881]: I1211 09:37:52.274721 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/44483fe0-748e-4e0e-9591-f5c14c4cd3f8-test-operator-ephemeral-workdir\") pod \"tempest-tests-tempest\" (UID: \"44483fe0-748e-4e0e-9591-f5c14c4cd3f8\") " pod="openstack/tempest-tests-tempest" Dec 11 09:37:52 crc kubenswrapper[4881]: I1211 09:37:52.275025 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/44483fe0-748e-4e0e-9591-f5c14c4cd3f8-test-operator-ephemeral-temporary\") pod \"tempest-tests-tempest\" (UID: \"44483fe0-748e-4e0e-9591-f5c14c4cd3f8\") " pod="openstack/tempest-tests-tempest" Dec 11 09:37:52 crc kubenswrapper[4881]: I1211 09:37:52.275441 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/44483fe0-748e-4e0e-9591-f5c14c4cd3f8-openstack-config\") pod \"tempest-tests-tempest\" (UID: \"44483fe0-748e-4e0e-9591-f5c14c4cd3f8\") " pod="openstack/tempest-tests-tempest" Dec 11 09:37:52 crc kubenswrapper[4881]: I1211 09:37:52.277395 4881 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"tempest-tests-tempest\" (UID: \"44483fe0-748e-4e0e-9591-f5c14c4cd3f8\") device mount path \"/mnt/openstack/pv06\"" pod="openstack/tempest-tests-tempest" Dec 11 09:37:52 crc kubenswrapper[4881]: I1211 09:37:52.278299 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/44483fe0-748e-4e0e-9591-f5c14c4cd3f8-config-data\") pod \"tempest-tests-tempest\" (UID: \"44483fe0-748e-4e0e-9591-f5c14c4cd3f8\") " pod="openstack/tempest-tests-tempest" Dec 11 09:37:52 crc kubenswrapper[4881]: I1211 09:37:52.280834 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/44483fe0-748e-4e0e-9591-f5c14c4cd3f8-ssh-key\") pod \"tempest-tests-tempest\" (UID: \"44483fe0-748e-4e0e-9591-f5c14c4cd3f8\") " pod="openstack/tempest-tests-tempest" Dec 11 09:37:52 crc kubenswrapper[4881]: I1211 09:37:52.282958 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/44483fe0-748e-4e0e-9591-f5c14c4cd3f8-openstack-config-secret\") pod \"tempest-tests-tempest\" (UID: \"44483fe0-748e-4e0e-9591-f5c14c4cd3f8\") " pod="openstack/tempest-tests-tempest" Dec 11 09:37:52 crc kubenswrapper[4881]: I1211 09:37:52.287026 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/44483fe0-748e-4e0e-9591-f5c14c4cd3f8-ca-certs\") pod \"tempest-tests-tempest\" (UID: \"44483fe0-748e-4e0e-9591-f5c14c4cd3f8\") " pod="openstack/tempest-tests-tempest" Dec 11 09:37:52 crc kubenswrapper[4881]: I1211 09:37:52.293732 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vwstd\" (UniqueName: \"kubernetes.io/projected/44483fe0-748e-4e0e-9591-f5c14c4cd3f8-kube-api-access-vwstd\") pod \"tempest-tests-tempest\" (UID: \"44483fe0-748e-4e0e-9591-f5c14c4cd3f8\") " pod="openstack/tempest-tests-tempest" Dec 11 09:37:52 crc kubenswrapper[4881]: I1211 09:37:52.310252 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"tempest-tests-tempest\" (UID: \"44483fe0-748e-4e0e-9591-f5c14c4cd3f8\") " pod="openstack/tempest-tests-tempest" Dec 11 09:37:52 crc kubenswrapper[4881]: I1211 09:37:52.391900 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/tempest-tests-tempest" Dec 11 09:37:52 crc kubenswrapper[4881]: I1211 09:37:52.952600 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/tempest-tests-tempest"] Dec 11 09:37:52 crc kubenswrapper[4881]: I1211 09:37:52.962782 4881 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Dec 11 09:37:53 crc kubenswrapper[4881]: I1211 09:37:53.934278 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tempest-tests-tempest" event={"ID":"44483fe0-748e-4e0e-9591-f5c14c4cd3f8","Type":"ContainerStarted","Data":"4d621f27cca3dccf655cd1c0f6fea8d3326ae0bdd62c14a4a6128cc5225d5257"} Dec 11 09:37:59 crc kubenswrapper[4881]: I1211 09:37:59.398845 4881 patch_prober.go:28] interesting pod/machine-config-daemon-z9nnh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 11 09:37:59 crc kubenswrapper[4881]: I1211 09:37:59.399445 4881 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 11 09:38:24 crc kubenswrapper[4881]: I1211 09:38:24.485102 4881 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/swift-proxy-5f6c547b6c-rjk9h" podUID="910014af-7b9e-49b8-99e3-b80a15d72faf" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 502" Dec 11 09:38:29 crc kubenswrapper[4881]: I1211 09:38:29.396829 4881 patch_prober.go:28] interesting pod/machine-config-daemon-z9nnh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 11 09:38:29 crc kubenswrapper[4881]: I1211 09:38:29.397119 4881 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 11 09:38:36 crc kubenswrapper[4881]: E1211 09:38:36.903496 4881 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-tempest-all:current-podified" Dec 11 09:38:36 crc kubenswrapper[4881]: E1211 09:38:36.905129 4881 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:tempest-tests-tempest-tests-runner,Image:quay.io/podified-antelope-centos9/openstack-tempest-all:current-podified,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:test-operator-ephemeral-workdir,ReadOnly:false,MountPath:/var/lib/tempest,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:test-operator-ephemeral-temporary,ReadOnly:false,MountPath:/tmp,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:false,MountPath:/etc/test_operator,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:test-operator-logs,ReadOnly:false,MountPath:/var/lib/tempest/external_files,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:openstack-config,ReadOnly:true,MountPath:/etc/openstack/clouds.yaml,SubPath:clouds.yaml,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:openstack-config,ReadOnly:true,MountPath:/var/lib/tempest/.config/openstack/clouds.yaml,SubPath:clouds.yaml,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:openstack-config-secret,ReadOnly:false,MountPath:/etc/openstack/secure.yaml,SubPath:secure.yaml,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ca-certs,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ssh-key,ReadOnly:false,MountPath:/var/lib/tempest/id_ecdsa,SubPath:ssh_key,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-vwstd,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42480,RunAsNonRoot:*false,ReadOnlyRootFilesystem:*false,AllowPrivilegeEscalation:*true,RunAsGroup:*42480,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{EnvFromSource{Prefix:,ConfigMapRef:&ConfigMapEnvSource{LocalObjectReference:LocalObjectReference{Name:tempest-tests-tempest-custom-data-s0,},Optional:nil,},SecretRef:nil,},EnvFromSource{Prefix:,ConfigMapRef:&ConfigMapEnvSource{LocalObjectReference:LocalObjectReference{Name:tempest-tests-tempest-env-vars-s0,},Optional:nil,},SecretRef:nil,},},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod tempest-tests-tempest_openstack(44483fe0-748e-4e0e-9591-f5c14c4cd3f8): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 11 09:38:36 crc kubenswrapper[4881]: E1211 09:38:36.906503 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"tempest-tests-tempest-tests-runner\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/tempest-tests-tempest" podUID="44483fe0-748e-4e0e-9591-f5c14c4cd3f8" Dec 11 09:38:37 crc kubenswrapper[4881]: E1211 09:38:37.475812 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"tempest-tests-tempest-tests-runner\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-tempest-all:current-podified\\\"\"" pod="openstack/tempest-tests-tempest" podUID="44483fe0-748e-4e0e-9591-f5c14c4cd3f8" Dec 11 09:38:51 crc kubenswrapper[4881]: I1211 09:38:51.738604 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"tempest-tests-tempest-env-vars-s0" Dec 11 09:38:55 crc kubenswrapper[4881]: I1211 09:38:55.699468 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tempest-tests-tempest" event={"ID":"44483fe0-748e-4e0e-9591-f5c14c4cd3f8","Type":"ContainerStarted","Data":"0d43a46784be8743eb464e585024988ef5557f3ae744b53e88854c3e5939f70f"} Dec 11 09:38:55 crc kubenswrapper[4881]: I1211 09:38:55.721099 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/tempest-tests-tempest" podStartSLOduration=6.947536641 podStartE2EDuration="1m5.721081905s" podCreationTimestamp="2025-12-11 09:37:50 +0000 UTC" firstStartedPulling="2025-12-11 09:37:52.962476392 +0000 UTC m=+4921.339845099" lastFinishedPulling="2025-12-11 09:38:51.736021676 +0000 UTC m=+4980.113390363" observedRunningTime="2025-12-11 09:38:55.719814814 +0000 UTC m=+4984.097183511" watchObservedRunningTime="2025-12-11 09:38:55.721081905 +0000 UTC m=+4984.098450592" Dec 11 09:38:59 crc kubenswrapper[4881]: I1211 09:38:59.397634 4881 patch_prober.go:28] interesting pod/machine-config-daemon-z9nnh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 11 09:38:59 crc kubenswrapper[4881]: I1211 09:38:59.398248 4881 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 11 09:38:59 crc kubenswrapper[4881]: I1211 09:38:59.398307 4881 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" Dec 11 09:38:59 crc kubenswrapper[4881]: I1211 09:38:59.399427 4881 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"337643c0cd1de28485c17857df37f2bfd89fb1fbf082effe9ba1f659e31f1340"} pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Dec 11 09:38:59 crc kubenswrapper[4881]: I1211 09:38:59.399494 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" containerName="machine-config-daemon" containerID="cri-o://337643c0cd1de28485c17857df37f2bfd89fb1fbf082effe9ba1f659e31f1340" gracePeriod=600 Dec 11 09:39:01 crc kubenswrapper[4881]: I1211 09:39:01.775939 4881 generic.go:334] "Generic (PLEG): container finished" podID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" containerID="337643c0cd1de28485c17857df37f2bfd89fb1fbf082effe9ba1f659e31f1340" exitCode=0 Dec 11 09:39:01 crc kubenswrapper[4881]: I1211 09:39:01.776209 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" event={"ID":"56d69133-af36-4cbd-af7d-3a58cc4dd8ca","Type":"ContainerDied","Data":"337643c0cd1de28485c17857df37f2bfd89fb1fbf082effe9ba1f659e31f1340"} Dec 11 09:39:01 crc kubenswrapper[4881]: I1211 09:39:01.776667 4881 scope.go:117] "RemoveContainer" containerID="1badfedc757c795081819123d86bb178d21e88e179f66cb3bbd3bb4b81192bc0" Dec 11 09:39:02 crc kubenswrapper[4881]: I1211 09:39:02.791261 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" event={"ID":"56d69133-af36-4cbd-af7d-3a58cc4dd8ca","Type":"ContainerStarted","Data":"4411d7e1bcace55a5c503982d05efe329d9375b32a09a2062445c0d6f9e63a29"} Dec 11 09:41:17 crc kubenswrapper[4881]: I1211 09:41:17.978851 4881 fsHandler.go:133] fs: disk usage and inodes count on following dirs took 1.583063465s: [/var/lib/containers/storage/overlay/902003d80a5cd366c2e66ec9732eb2dbd08a60313a46aa4754acc63dfcf6f1f1/diff /var/log/pods/openshift-nmstate_nmstate-console-plugin-6ff7998486-nj8ls_809482cd-c05d-41df-96db-84149e666743/nmstate-console-plugin/0.log]; will not log again for this container unless duration exceeds 2s Dec 11 09:41:17 crc kubenswrapper[4881]: I1211 09:41:17.982224 4881 trace.go:236] Trace[991492074]: "Calculate volume metrics of config-data-generated for pod openstack/openstack-galera-0" (11-Dec-2025 09:41:16.106) (total time: 1871ms): Dec 11 09:41:17 crc kubenswrapper[4881]: Trace[991492074]: [1.87119674s] [1.87119674s] END Dec 11 09:41:17 crc kubenswrapper[4881]: I1211 09:41:17.982228 4881 trace.go:236] Trace[733762361]: "Calculate volume metrics of config for pod openshift-cluster-machine-approver/machine-approver-56656f9798-mdqnb" (11-Dec-2025 09:41:16.598) (total time: 1378ms): Dec 11 09:41:17 crc kubenswrapper[4881]: Trace[733762361]: [1.378491567s] [1.378491567s] END Dec 11 09:41:17 crc kubenswrapper[4881]: I1211 09:41:17.989014 4881 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/openstack-cell1-galera-0" podUID="70402eec-968d-4ceb-b259-5e2508ee21a0" containerName="galera" probeResult="failure" output="command timed out" Dec 11 09:41:17 crc kubenswrapper[4881]: I1211 09:41:17.989939 4881 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/openstack-cell1-galera-0" podUID="70402eec-968d-4ceb-b259-5e2508ee21a0" containerName="galera" probeResult="failure" output="command timed out" Dec 11 09:41:29 crc kubenswrapper[4881]: I1211 09:41:29.396947 4881 patch_prober.go:28] interesting pod/machine-config-daemon-z9nnh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 11 09:41:29 crc kubenswrapper[4881]: I1211 09:41:29.397617 4881 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 11 09:41:59 crc kubenswrapper[4881]: I1211 09:41:59.396740 4881 patch_prober.go:28] interesting pod/machine-config-daemon-z9nnh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 11 09:41:59 crc kubenswrapper[4881]: I1211 09:41:59.397206 4881 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 11 09:42:29 crc kubenswrapper[4881]: I1211 09:42:29.396938 4881 patch_prober.go:28] interesting pod/machine-config-daemon-z9nnh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 11 09:42:29 crc kubenswrapper[4881]: I1211 09:42:29.398820 4881 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 11 09:42:29 crc kubenswrapper[4881]: I1211 09:42:29.399357 4881 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" Dec 11 09:42:29 crc kubenswrapper[4881]: I1211 09:42:29.401323 4881 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"4411d7e1bcace55a5c503982d05efe329d9375b32a09a2062445c0d6f9e63a29"} pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Dec 11 09:42:29 crc kubenswrapper[4881]: I1211 09:42:29.401909 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" containerName="machine-config-daemon" containerID="cri-o://4411d7e1bcace55a5c503982d05efe329d9375b32a09a2062445c0d6f9e63a29" gracePeriod=600 Dec 11 09:42:29 crc kubenswrapper[4881]: I1211 09:42:29.968777 4881 generic.go:334] "Generic (PLEG): container finished" podID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" containerID="4411d7e1bcace55a5c503982d05efe329d9375b32a09a2062445c0d6f9e63a29" exitCode=0 Dec 11 09:42:29 crc kubenswrapper[4881]: I1211 09:42:29.968874 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" event={"ID":"56d69133-af36-4cbd-af7d-3a58cc4dd8ca","Type":"ContainerDied","Data":"4411d7e1bcace55a5c503982d05efe329d9375b32a09a2062445c0d6f9e63a29"} Dec 11 09:42:29 crc kubenswrapper[4881]: I1211 09:42:29.969975 4881 scope.go:117] "RemoveContainer" containerID="337643c0cd1de28485c17857df37f2bfd89fb1fbf082effe9ba1f659e31f1340" Dec 11 09:42:30 crc kubenswrapper[4881]: E1211 09:42:30.152530 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 09:42:30 crc kubenswrapper[4881]: I1211 09:42:30.995806 4881 scope.go:117] "RemoveContainer" containerID="4411d7e1bcace55a5c503982d05efe329d9375b32a09a2062445c0d6f9e63a29" Dec 11 09:42:30 crc kubenswrapper[4881]: E1211 09:42:30.996406 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 09:42:31 crc kubenswrapper[4881]: I1211 09:42:31.319630 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-qvtgk"] Dec 11 09:42:31 crc kubenswrapper[4881]: I1211 09:42:31.327520 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-qvtgk" Dec 11 09:42:31 crc kubenswrapper[4881]: I1211 09:42:31.428982 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-qvtgk"] Dec 11 09:42:31 crc kubenswrapper[4881]: I1211 09:42:31.476648 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e8f6d1a2-0a10-4a88-8543-1e13853f64f3-catalog-content\") pod \"community-operators-qvtgk\" (UID: \"e8f6d1a2-0a10-4a88-8543-1e13853f64f3\") " pod="openshift-marketplace/community-operators-qvtgk" Dec 11 09:42:31 crc kubenswrapper[4881]: I1211 09:42:31.476722 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ljw4v\" (UniqueName: \"kubernetes.io/projected/e8f6d1a2-0a10-4a88-8543-1e13853f64f3-kube-api-access-ljw4v\") pod \"community-operators-qvtgk\" (UID: \"e8f6d1a2-0a10-4a88-8543-1e13853f64f3\") " pod="openshift-marketplace/community-operators-qvtgk" Dec 11 09:42:31 crc kubenswrapper[4881]: I1211 09:42:31.476978 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e8f6d1a2-0a10-4a88-8543-1e13853f64f3-utilities\") pod \"community-operators-qvtgk\" (UID: \"e8f6d1a2-0a10-4a88-8543-1e13853f64f3\") " pod="openshift-marketplace/community-operators-qvtgk" Dec 11 09:42:31 crc kubenswrapper[4881]: I1211 09:42:31.580626 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e8f6d1a2-0a10-4a88-8543-1e13853f64f3-catalog-content\") pod \"community-operators-qvtgk\" (UID: \"e8f6d1a2-0a10-4a88-8543-1e13853f64f3\") " pod="openshift-marketplace/community-operators-qvtgk" Dec 11 09:42:31 crc kubenswrapper[4881]: I1211 09:42:31.580687 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ljw4v\" (UniqueName: \"kubernetes.io/projected/e8f6d1a2-0a10-4a88-8543-1e13853f64f3-kube-api-access-ljw4v\") pod \"community-operators-qvtgk\" (UID: \"e8f6d1a2-0a10-4a88-8543-1e13853f64f3\") " pod="openshift-marketplace/community-operators-qvtgk" Dec 11 09:42:31 crc kubenswrapper[4881]: I1211 09:42:31.580813 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e8f6d1a2-0a10-4a88-8543-1e13853f64f3-utilities\") pod \"community-operators-qvtgk\" (UID: \"e8f6d1a2-0a10-4a88-8543-1e13853f64f3\") " pod="openshift-marketplace/community-operators-qvtgk" Dec 11 09:42:31 crc kubenswrapper[4881]: I1211 09:42:31.582373 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e8f6d1a2-0a10-4a88-8543-1e13853f64f3-catalog-content\") pod \"community-operators-qvtgk\" (UID: \"e8f6d1a2-0a10-4a88-8543-1e13853f64f3\") " pod="openshift-marketplace/community-operators-qvtgk" Dec 11 09:42:31 crc kubenswrapper[4881]: I1211 09:42:31.582637 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e8f6d1a2-0a10-4a88-8543-1e13853f64f3-utilities\") pod \"community-operators-qvtgk\" (UID: \"e8f6d1a2-0a10-4a88-8543-1e13853f64f3\") " pod="openshift-marketplace/community-operators-qvtgk" Dec 11 09:42:31 crc kubenswrapper[4881]: I1211 09:42:31.894710 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ljw4v\" (UniqueName: \"kubernetes.io/projected/e8f6d1a2-0a10-4a88-8543-1e13853f64f3-kube-api-access-ljw4v\") pod \"community-operators-qvtgk\" (UID: \"e8f6d1a2-0a10-4a88-8543-1e13853f64f3\") " pod="openshift-marketplace/community-operators-qvtgk" Dec 11 09:42:31 crc kubenswrapper[4881]: I1211 09:42:31.963214 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-qvtgk" Dec 11 09:42:32 crc kubenswrapper[4881]: I1211 09:42:32.893387 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-qvtgk"] Dec 11 09:42:33 crc kubenswrapper[4881]: I1211 09:42:33.021868 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qvtgk" event={"ID":"e8f6d1a2-0a10-4a88-8543-1e13853f64f3","Type":"ContainerStarted","Data":"03960a7f6c7625998f72d4c9716f99d28afd5eef09b5cdab65a611699ffce741"} Dec 11 09:42:34 crc kubenswrapper[4881]: I1211 09:42:34.034502 4881 generic.go:334] "Generic (PLEG): container finished" podID="e8f6d1a2-0a10-4a88-8543-1e13853f64f3" containerID="48e825a01365af28ed3623b811a422ae97761ae811faf9919ee1d71648e9513e" exitCode=0 Dec 11 09:42:34 crc kubenswrapper[4881]: I1211 09:42:34.034578 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qvtgk" event={"ID":"e8f6d1a2-0a10-4a88-8543-1e13853f64f3","Type":"ContainerDied","Data":"48e825a01365af28ed3623b811a422ae97761ae811faf9919ee1d71648e9513e"} Dec 11 09:42:36 crc kubenswrapper[4881]: I1211 09:42:36.107138 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qvtgk" event={"ID":"e8f6d1a2-0a10-4a88-8543-1e13853f64f3","Type":"ContainerStarted","Data":"f42786682b72a7cc857a70e3dfcdef1f943ec1b30ed0ea572c9060868892366b"} Dec 11 09:42:37 crc kubenswrapper[4881]: I1211 09:42:37.124898 4881 generic.go:334] "Generic (PLEG): container finished" podID="e8f6d1a2-0a10-4a88-8543-1e13853f64f3" containerID="f42786682b72a7cc857a70e3dfcdef1f943ec1b30ed0ea572c9060868892366b" exitCode=0 Dec 11 09:42:37 crc kubenswrapper[4881]: I1211 09:42:37.125165 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qvtgk" event={"ID":"e8f6d1a2-0a10-4a88-8543-1e13853f64f3","Type":"ContainerDied","Data":"f42786682b72a7cc857a70e3dfcdef1f943ec1b30ed0ea572c9060868892366b"} Dec 11 09:42:39 crc kubenswrapper[4881]: I1211 09:42:39.146379 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qvtgk" event={"ID":"e8f6d1a2-0a10-4a88-8543-1e13853f64f3","Type":"ContainerStarted","Data":"9375b4272f4e077f66f2adc02d69c5110fb704dceccdddb10e530edd21688341"} Dec 11 09:42:39 crc kubenswrapper[4881]: I1211 09:42:39.170581 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-qvtgk" podStartSLOduration=3.805845619 podStartE2EDuration="8.170286555s" podCreationTimestamp="2025-12-11 09:42:31 +0000 UTC" firstStartedPulling="2025-12-11 09:42:34.037762505 +0000 UTC m=+5202.415131202" lastFinishedPulling="2025-12-11 09:42:38.402203441 +0000 UTC m=+5206.779572138" observedRunningTime="2025-12-11 09:42:39.166206725 +0000 UTC m=+5207.543575432" watchObservedRunningTime="2025-12-11 09:42:39.170286555 +0000 UTC m=+5207.547655252" Dec 11 09:42:41 crc kubenswrapper[4881]: I1211 09:42:41.963993 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-qvtgk" Dec 11 09:42:41 crc kubenswrapper[4881]: I1211 09:42:41.964663 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-qvtgk" Dec 11 09:42:42 crc kubenswrapper[4881]: I1211 09:42:42.005718 4881 scope.go:117] "RemoveContainer" containerID="4411d7e1bcace55a5c503982d05efe329d9375b32a09a2062445c0d6f9e63a29" Dec 11 09:42:42 crc kubenswrapper[4881]: E1211 09:42:42.006102 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 09:42:42 crc kubenswrapper[4881]: I1211 09:42:42.041224 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-qvtgk" Dec 11 09:42:52 crc kubenswrapper[4881]: I1211 09:42:52.015116 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-qvtgk" Dec 11 09:42:52 crc kubenswrapper[4881]: I1211 09:42:52.070221 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-qvtgk"] Dec 11 09:42:52 crc kubenswrapper[4881]: I1211 09:42:52.286020 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-qvtgk" podUID="e8f6d1a2-0a10-4a88-8543-1e13853f64f3" containerName="registry-server" containerID="cri-o://9375b4272f4e077f66f2adc02d69c5110fb704dceccdddb10e530edd21688341" gracePeriod=2 Dec 11 09:42:52 crc kubenswrapper[4881]: I1211 09:42:52.947514 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-qvtgk" Dec 11 09:42:52 crc kubenswrapper[4881]: I1211 09:42:52.958010 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e8f6d1a2-0a10-4a88-8543-1e13853f64f3-catalog-content\") pod \"e8f6d1a2-0a10-4a88-8543-1e13853f64f3\" (UID: \"e8f6d1a2-0a10-4a88-8543-1e13853f64f3\") " Dec 11 09:42:52 crc kubenswrapper[4881]: I1211 09:42:52.958412 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ljw4v\" (UniqueName: \"kubernetes.io/projected/e8f6d1a2-0a10-4a88-8543-1e13853f64f3-kube-api-access-ljw4v\") pod \"e8f6d1a2-0a10-4a88-8543-1e13853f64f3\" (UID: \"e8f6d1a2-0a10-4a88-8543-1e13853f64f3\") " Dec 11 09:42:52 crc kubenswrapper[4881]: I1211 09:42:52.958472 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e8f6d1a2-0a10-4a88-8543-1e13853f64f3-utilities\") pod \"e8f6d1a2-0a10-4a88-8543-1e13853f64f3\" (UID: \"e8f6d1a2-0a10-4a88-8543-1e13853f64f3\") " Dec 11 09:42:52 crc kubenswrapper[4881]: I1211 09:42:52.958969 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e8f6d1a2-0a10-4a88-8543-1e13853f64f3-utilities" (OuterVolumeSpecName: "utilities") pod "e8f6d1a2-0a10-4a88-8543-1e13853f64f3" (UID: "e8f6d1a2-0a10-4a88-8543-1e13853f64f3"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 09:42:52 crc kubenswrapper[4881]: I1211 09:42:52.959493 4881 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e8f6d1a2-0a10-4a88-8543-1e13853f64f3-utilities\") on node \"crc\" DevicePath \"\"" Dec 11 09:42:52 crc kubenswrapper[4881]: I1211 09:42:52.982600 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e8f6d1a2-0a10-4a88-8543-1e13853f64f3-kube-api-access-ljw4v" (OuterVolumeSpecName: "kube-api-access-ljw4v") pod "e8f6d1a2-0a10-4a88-8543-1e13853f64f3" (UID: "e8f6d1a2-0a10-4a88-8543-1e13853f64f3"). InnerVolumeSpecName "kube-api-access-ljw4v". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 09:42:53 crc kubenswrapper[4881]: I1211 09:42:53.069956 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ljw4v\" (UniqueName: \"kubernetes.io/projected/e8f6d1a2-0a10-4a88-8543-1e13853f64f3-kube-api-access-ljw4v\") on node \"crc\" DevicePath \"\"" Dec 11 09:42:53 crc kubenswrapper[4881]: I1211 09:42:53.140814 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e8f6d1a2-0a10-4a88-8543-1e13853f64f3-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "e8f6d1a2-0a10-4a88-8543-1e13853f64f3" (UID: "e8f6d1a2-0a10-4a88-8543-1e13853f64f3"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 09:42:53 crc kubenswrapper[4881]: I1211 09:42:53.172520 4881 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e8f6d1a2-0a10-4a88-8543-1e13853f64f3-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 11 09:42:53 crc kubenswrapper[4881]: I1211 09:42:53.299482 4881 generic.go:334] "Generic (PLEG): container finished" podID="e8f6d1a2-0a10-4a88-8543-1e13853f64f3" containerID="9375b4272f4e077f66f2adc02d69c5110fb704dceccdddb10e530edd21688341" exitCode=0 Dec 11 09:42:53 crc kubenswrapper[4881]: I1211 09:42:53.299522 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qvtgk" event={"ID":"e8f6d1a2-0a10-4a88-8543-1e13853f64f3","Type":"ContainerDied","Data":"9375b4272f4e077f66f2adc02d69c5110fb704dceccdddb10e530edd21688341"} Dec 11 09:42:53 crc kubenswrapper[4881]: I1211 09:42:53.299828 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qvtgk" event={"ID":"e8f6d1a2-0a10-4a88-8543-1e13853f64f3","Type":"ContainerDied","Data":"03960a7f6c7625998f72d4c9716f99d28afd5eef09b5cdab65a611699ffce741"} Dec 11 09:42:53 crc kubenswrapper[4881]: I1211 09:42:53.299851 4881 scope.go:117] "RemoveContainer" containerID="9375b4272f4e077f66f2adc02d69c5110fb704dceccdddb10e530edd21688341" Dec 11 09:42:53 crc kubenswrapper[4881]: I1211 09:42:53.299553 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-qvtgk" Dec 11 09:42:53 crc kubenswrapper[4881]: I1211 09:42:53.356987 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-qvtgk"] Dec 11 09:42:53 crc kubenswrapper[4881]: I1211 09:42:53.365242 4881 scope.go:117] "RemoveContainer" containerID="f42786682b72a7cc857a70e3dfcdef1f943ec1b30ed0ea572c9060868892366b" Dec 11 09:42:53 crc kubenswrapper[4881]: I1211 09:42:53.374529 4881 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-qvtgk"] Dec 11 09:42:53 crc kubenswrapper[4881]: I1211 09:42:53.391746 4881 scope.go:117] "RemoveContainer" containerID="48e825a01365af28ed3623b811a422ae97761ae811faf9919ee1d71648e9513e" Dec 11 09:42:53 crc kubenswrapper[4881]: I1211 09:42:53.457971 4881 scope.go:117] "RemoveContainer" containerID="9375b4272f4e077f66f2adc02d69c5110fb704dceccdddb10e530edd21688341" Dec 11 09:42:53 crc kubenswrapper[4881]: E1211 09:42:53.459399 4881 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9375b4272f4e077f66f2adc02d69c5110fb704dceccdddb10e530edd21688341\": container with ID starting with 9375b4272f4e077f66f2adc02d69c5110fb704dceccdddb10e530edd21688341 not found: ID does not exist" containerID="9375b4272f4e077f66f2adc02d69c5110fb704dceccdddb10e530edd21688341" Dec 11 09:42:53 crc kubenswrapper[4881]: I1211 09:42:53.459453 4881 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9375b4272f4e077f66f2adc02d69c5110fb704dceccdddb10e530edd21688341"} err="failed to get container status \"9375b4272f4e077f66f2adc02d69c5110fb704dceccdddb10e530edd21688341\": rpc error: code = NotFound desc = could not find container \"9375b4272f4e077f66f2adc02d69c5110fb704dceccdddb10e530edd21688341\": container with ID starting with 9375b4272f4e077f66f2adc02d69c5110fb704dceccdddb10e530edd21688341 not found: ID does not exist" Dec 11 09:42:53 crc kubenswrapper[4881]: I1211 09:42:53.459484 4881 scope.go:117] "RemoveContainer" containerID="f42786682b72a7cc857a70e3dfcdef1f943ec1b30ed0ea572c9060868892366b" Dec 11 09:42:53 crc kubenswrapper[4881]: E1211 09:42:53.460002 4881 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f42786682b72a7cc857a70e3dfcdef1f943ec1b30ed0ea572c9060868892366b\": container with ID starting with f42786682b72a7cc857a70e3dfcdef1f943ec1b30ed0ea572c9060868892366b not found: ID does not exist" containerID="f42786682b72a7cc857a70e3dfcdef1f943ec1b30ed0ea572c9060868892366b" Dec 11 09:42:53 crc kubenswrapper[4881]: I1211 09:42:53.460041 4881 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f42786682b72a7cc857a70e3dfcdef1f943ec1b30ed0ea572c9060868892366b"} err="failed to get container status \"f42786682b72a7cc857a70e3dfcdef1f943ec1b30ed0ea572c9060868892366b\": rpc error: code = NotFound desc = could not find container \"f42786682b72a7cc857a70e3dfcdef1f943ec1b30ed0ea572c9060868892366b\": container with ID starting with f42786682b72a7cc857a70e3dfcdef1f943ec1b30ed0ea572c9060868892366b not found: ID does not exist" Dec 11 09:42:53 crc kubenswrapper[4881]: I1211 09:42:53.460062 4881 scope.go:117] "RemoveContainer" containerID="48e825a01365af28ed3623b811a422ae97761ae811faf9919ee1d71648e9513e" Dec 11 09:42:53 crc kubenswrapper[4881]: E1211 09:42:53.461643 4881 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"48e825a01365af28ed3623b811a422ae97761ae811faf9919ee1d71648e9513e\": container with ID starting with 48e825a01365af28ed3623b811a422ae97761ae811faf9919ee1d71648e9513e not found: ID does not exist" containerID="48e825a01365af28ed3623b811a422ae97761ae811faf9919ee1d71648e9513e" Dec 11 09:42:53 crc kubenswrapper[4881]: I1211 09:42:53.461678 4881 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"48e825a01365af28ed3623b811a422ae97761ae811faf9919ee1d71648e9513e"} err="failed to get container status \"48e825a01365af28ed3623b811a422ae97761ae811faf9919ee1d71648e9513e\": rpc error: code = NotFound desc = could not find container \"48e825a01365af28ed3623b811a422ae97761ae811faf9919ee1d71648e9513e\": container with ID starting with 48e825a01365af28ed3623b811a422ae97761ae811faf9919ee1d71648e9513e not found: ID does not exist" Dec 11 09:42:54 crc kubenswrapper[4881]: I1211 09:42:54.005675 4881 scope.go:117] "RemoveContainer" containerID="4411d7e1bcace55a5c503982d05efe329d9375b32a09a2062445c0d6f9e63a29" Dec 11 09:42:54 crc kubenswrapper[4881]: E1211 09:42:54.006223 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 09:42:55 crc kubenswrapper[4881]: I1211 09:42:55.020444 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e8f6d1a2-0a10-4a88-8543-1e13853f64f3" path="/var/lib/kubelet/pods/e8f6d1a2-0a10-4a88-8543-1e13853f64f3/volumes" Dec 11 09:43:08 crc kubenswrapper[4881]: I1211 09:43:08.005696 4881 scope.go:117] "RemoveContainer" containerID="4411d7e1bcace55a5c503982d05efe329d9375b32a09a2062445c0d6f9e63a29" Dec 11 09:43:08 crc kubenswrapper[4881]: E1211 09:43:08.006636 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 09:43:22 crc kubenswrapper[4881]: I1211 09:43:22.006429 4881 scope.go:117] "RemoveContainer" containerID="4411d7e1bcace55a5c503982d05efe329d9375b32a09a2062445c0d6f9e63a29" Dec 11 09:43:22 crc kubenswrapper[4881]: E1211 09:43:22.007603 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 09:43:34 crc kubenswrapper[4881]: I1211 09:43:34.006158 4881 scope.go:117] "RemoveContainer" containerID="4411d7e1bcace55a5c503982d05efe329d9375b32a09a2062445c0d6f9e63a29" Dec 11 09:43:34 crc kubenswrapper[4881]: E1211 09:43:34.007026 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 09:43:48 crc kubenswrapper[4881]: I1211 09:43:48.005826 4881 scope.go:117] "RemoveContainer" containerID="4411d7e1bcace55a5c503982d05efe329d9375b32a09a2062445c0d6f9e63a29" Dec 11 09:43:48 crc kubenswrapper[4881]: E1211 09:43:48.006687 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 09:43:48 crc kubenswrapper[4881]: I1211 09:43:48.259649 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-b44g8"] Dec 11 09:43:48 crc kubenswrapper[4881]: E1211 09:43:48.260819 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e8f6d1a2-0a10-4a88-8543-1e13853f64f3" containerName="extract-utilities" Dec 11 09:43:48 crc kubenswrapper[4881]: I1211 09:43:48.260856 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="e8f6d1a2-0a10-4a88-8543-1e13853f64f3" containerName="extract-utilities" Dec 11 09:43:48 crc kubenswrapper[4881]: E1211 09:43:48.260891 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e8f6d1a2-0a10-4a88-8543-1e13853f64f3" containerName="extract-content" Dec 11 09:43:48 crc kubenswrapper[4881]: I1211 09:43:48.260899 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="e8f6d1a2-0a10-4a88-8543-1e13853f64f3" containerName="extract-content" Dec 11 09:43:48 crc kubenswrapper[4881]: E1211 09:43:48.260908 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e8f6d1a2-0a10-4a88-8543-1e13853f64f3" containerName="registry-server" Dec 11 09:43:48 crc kubenswrapper[4881]: I1211 09:43:48.260916 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="e8f6d1a2-0a10-4a88-8543-1e13853f64f3" containerName="registry-server" Dec 11 09:43:48 crc kubenswrapper[4881]: I1211 09:43:48.261384 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="e8f6d1a2-0a10-4a88-8543-1e13853f64f3" containerName="registry-server" Dec 11 09:43:48 crc kubenswrapper[4881]: I1211 09:43:48.263894 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-b44g8" Dec 11 09:43:48 crc kubenswrapper[4881]: I1211 09:43:48.272084 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-b44g8"] Dec 11 09:43:48 crc kubenswrapper[4881]: I1211 09:43:48.402627 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-44rjj\" (UniqueName: \"kubernetes.io/projected/131bdc29-71ea-417f-8d16-d89ff120fc23-kube-api-access-44rjj\") pod \"redhat-operators-b44g8\" (UID: \"131bdc29-71ea-417f-8d16-d89ff120fc23\") " pod="openshift-marketplace/redhat-operators-b44g8" Dec 11 09:43:48 crc kubenswrapper[4881]: I1211 09:43:48.402940 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/131bdc29-71ea-417f-8d16-d89ff120fc23-catalog-content\") pod \"redhat-operators-b44g8\" (UID: \"131bdc29-71ea-417f-8d16-d89ff120fc23\") " pod="openshift-marketplace/redhat-operators-b44g8" Dec 11 09:43:48 crc kubenswrapper[4881]: I1211 09:43:48.403017 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/131bdc29-71ea-417f-8d16-d89ff120fc23-utilities\") pod \"redhat-operators-b44g8\" (UID: \"131bdc29-71ea-417f-8d16-d89ff120fc23\") " pod="openshift-marketplace/redhat-operators-b44g8" Dec 11 09:43:48 crc kubenswrapper[4881]: I1211 09:43:48.505951 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-44rjj\" (UniqueName: \"kubernetes.io/projected/131bdc29-71ea-417f-8d16-d89ff120fc23-kube-api-access-44rjj\") pod \"redhat-operators-b44g8\" (UID: \"131bdc29-71ea-417f-8d16-d89ff120fc23\") " pod="openshift-marketplace/redhat-operators-b44g8" Dec 11 09:43:48 crc kubenswrapper[4881]: I1211 09:43:48.506121 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/131bdc29-71ea-417f-8d16-d89ff120fc23-catalog-content\") pod \"redhat-operators-b44g8\" (UID: \"131bdc29-71ea-417f-8d16-d89ff120fc23\") " pod="openshift-marketplace/redhat-operators-b44g8" Dec 11 09:43:48 crc kubenswrapper[4881]: I1211 09:43:48.506171 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/131bdc29-71ea-417f-8d16-d89ff120fc23-utilities\") pod \"redhat-operators-b44g8\" (UID: \"131bdc29-71ea-417f-8d16-d89ff120fc23\") " pod="openshift-marketplace/redhat-operators-b44g8" Dec 11 09:43:48 crc kubenswrapper[4881]: I1211 09:43:48.506769 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/131bdc29-71ea-417f-8d16-d89ff120fc23-catalog-content\") pod \"redhat-operators-b44g8\" (UID: \"131bdc29-71ea-417f-8d16-d89ff120fc23\") " pod="openshift-marketplace/redhat-operators-b44g8" Dec 11 09:43:48 crc kubenswrapper[4881]: I1211 09:43:48.506769 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/131bdc29-71ea-417f-8d16-d89ff120fc23-utilities\") pod \"redhat-operators-b44g8\" (UID: \"131bdc29-71ea-417f-8d16-d89ff120fc23\") " pod="openshift-marketplace/redhat-operators-b44g8" Dec 11 09:43:48 crc kubenswrapper[4881]: I1211 09:43:48.526648 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-44rjj\" (UniqueName: \"kubernetes.io/projected/131bdc29-71ea-417f-8d16-d89ff120fc23-kube-api-access-44rjj\") pod \"redhat-operators-b44g8\" (UID: \"131bdc29-71ea-417f-8d16-d89ff120fc23\") " pod="openshift-marketplace/redhat-operators-b44g8" Dec 11 09:43:48 crc kubenswrapper[4881]: I1211 09:43:48.586140 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-b44g8" Dec 11 09:43:50 crc kubenswrapper[4881]: I1211 09:43:50.082445 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-b44g8"] Dec 11 09:43:50 crc kubenswrapper[4881]: I1211 09:43:50.240368 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-b44g8" event={"ID":"131bdc29-71ea-417f-8d16-d89ff120fc23","Type":"ContainerStarted","Data":"90e37f02af1d66143b4a8958481cd69f2f16729881091c66defe47f8f20bc6d7"} Dec 11 09:43:51 crc kubenswrapper[4881]: I1211 09:43:51.251647 4881 generic.go:334] "Generic (PLEG): container finished" podID="131bdc29-71ea-417f-8d16-d89ff120fc23" containerID="eb571444bcbdeb7b4c0f5b6d57feaacd8482414c69a4bb988d322d61a2808a6c" exitCode=0 Dec 11 09:43:51 crc kubenswrapper[4881]: I1211 09:43:51.251961 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-b44g8" event={"ID":"131bdc29-71ea-417f-8d16-d89ff120fc23","Type":"ContainerDied","Data":"eb571444bcbdeb7b4c0f5b6d57feaacd8482414c69a4bb988d322d61a2808a6c"} Dec 11 09:43:51 crc kubenswrapper[4881]: I1211 09:43:51.255464 4881 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Dec 11 09:43:53 crc kubenswrapper[4881]: I1211 09:43:53.275097 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-b44g8" event={"ID":"131bdc29-71ea-417f-8d16-d89ff120fc23","Type":"ContainerStarted","Data":"b82e9c218bc5b934a5b5fa841b40144dd1e278c77905770ed77664b38aad47c4"} Dec 11 09:44:02 crc kubenswrapper[4881]: I1211 09:44:02.417257 4881 generic.go:334] "Generic (PLEG): container finished" podID="131bdc29-71ea-417f-8d16-d89ff120fc23" containerID="b82e9c218bc5b934a5b5fa841b40144dd1e278c77905770ed77664b38aad47c4" exitCode=0 Dec 11 09:44:02 crc kubenswrapper[4881]: I1211 09:44:02.417311 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-b44g8" event={"ID":"131bdc29-71ea-417f-8d16-d89ff120fc23","Type":"ContainerDied","Data":"b82e9c218bc5b934a5b5fa841b40144dd1e278c77905770ed77664b38aad47c4"} Dec 11 09:44:03 crc kubenswrapper[4881]: I1211 09:44:03.020701 4881 scope.go:117] "RemoveContainer" containerID="4411d7e1bcace55a5c503982d05efe329d9375b32a09a2062445c0d6f9e63a29" Dec 11 09:44:03 crc kubenswrapper[4881]: E1211 09:44:03.021106 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 09:44:03 crc kubenswrapper[4881]: I1211 09:44:03.435502 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-b44g8" event={"ID":"131bdc29-71ea-417f-8d16-d89ff120fc23","Type":"ContainerStarted","Data":"f69c9c9af0fce944ad52312948d6f8d468de723a628af4f53e2f3738c2befe04"} Dec 11 09:44:03 crc kubenswrapper[4881]: I1211 09:44:03.462044 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-b44g8" podStartSLOduration=3.659432763 podStartE2EDuration="15.462010644s" podCreationTimestamp="2025-12-11 09:43:48 +0000 UTC" firstStartedPulling="2025-12-11 09:43:51.255224477 +0000 UTC m=+5279.632593174" lastFinishedPulling="2025-12-11 09:44:03.057802358 +0000 UTC m=+5291.435171055" observedRunningTime="2025-12-11 09:44:03.456184171 +0000 UTC m=+5291.833552878" watchObservedRunningTime="2025-12-11 09:44:03.462010644 +0000 UTC m=+5291.839379391" Dec 11 09:44:08 crc kubenswrapper[4881]: I1211 09:44:08.586809 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-b44g8" Dec 11 09:44:08 crc kubenswrapper[4881]: I1211 09:44:08.587512 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-b44g8" Dec 11 09:44:09 crc kubenswrapper[4881]: I1211 09:44:09.643356 4881 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-b44g8" podUID="131bdc29-71ea-417f-8d16-d89ff120fc23" containerName="registry-server" probeResult="failure" output=< Dec 11 09:44:09 crc kubenswrapper[4881]: timeout: failed to connect service ":50051" within 1s Dec 11 09:44:09 crc kubenswrapper[4881]: > Dec 11 09:44:18 crc kubenswrapper[4881]: I1211 09:44:18.005602 4881 scope.go:117] "RemoveContainer" containerID="4411d7e1bcace55a5c503982d05efe329d9375b32a09a2062445c0d6f9e63a29" Dec 11 09:44:18 crc kubenswrapper[4881]: E1211 09:44:18.006723 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 09:44:19 crc kubenswrapper[4881]: I1211 09:44:19.643964 4881 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-b44g8" podUID="131bdc29-71ea-417f-8d16-d89ff120fc23" containerName="registry-server" probeResult="failure" output=< Dec 11 09:44:19 crc kubenswrapper[4881]: timeout: failed to connect service ":50051" within 1s Dec 11 09:44:19 crc kubenswrapper[4881]: > Dec 11 09:44:28 crc kubenswrapper[4881]: I1211 09:44:28.672344 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-b44g8" Dec 11 09:44:28 crc kubenswrapper[4881]: I1211 09:44:28.739165 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-b44g8" Dec 11 09:44:28 crc kubenswrapper[4881]: I1211 09:44:28.917874 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-b44g8"] Dec 11 09:44:29 crc kubenswrapper[4881]: I1211 09:44:29.006129 4881 scope.go:117] "RemoveContainer" containerID="4411d7e1bcace55a5c503982d05efe329d9375b32a09a2062445c0d6f9e63a29" Dec 11 09:44:29 crc kubenswrapper[4881]: E1211 09:44:29.006616 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 09:44:29 crc kubenswrapper[4881]: I1211 09:44:29.720911 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-b44g8" podUID="131bdc29-71ea-417f-8d16-d89ff120fc23" containerName="registry-server" containerID="cri-o://f69c9c9af0fce944ad52312948d6f8d468de723a628af4f53e2f3738c2befe04" gracePeriod=2 Dec 11 09:44:30 crc kubenswrapper[4881]: I1211 09:44:30.452086 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-b44g8" Dec 11 09:44:30 crc kubenswrapper[4881]: I1211 09:44:30.512642 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-44rjj\" (UniqueName: \"kubernetes.io/projected/131bdc29-71ea-417f-8d16-d89ff120fc23-kube-api-access-44rjj\") pod \"131bdc29-71ea-417f-8d16-d89ff120fc23\" (UID: \"131bdc29-71ea-417f-8d16-d89ff120fc23\") " Dec 11 09:44:30 crc kubenswrapper[4881]: I1211 09:44:30.513212 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/131bdc29-71ea-417f-8d16-d89ff120fc23-catalog-content\") pod \"131bdc29-71ea-417f-8d16-d89ff120fc23\" (UID: \"131bdc29-71ea-417f-8d16-d89ff120fc23\") " Dec 11 09:44:30 crc kubenswrapper[4881]: I1211 09:44:30.524533 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/131bdc29-71ea-417f-8d16-d89ff120fc23-kube-api-access-44rjj" (OuterVolumeSpecName: "kube-api-access-44rjj") pod "131bdc29-71ea-417f-8d16-d89ff120fc23" (UID: "131bdc29-71ea-417f-8d16-d89ff120fc23"). InnerVolumeSpecName "kube-api-access-44rjj". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 09:44:30 crc kubenswrapper[4881]: I1211 09:44:30.616763 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/131bdc29-71ea-417f-8d16-d89ff120fc23-utilities\") pod \"131bdc29-71ea-417f-8d16-d89ff120fc23\" (UID: \"131bdc29-71ea-417f-8d16-d89ff120fc23\") " Dec 11 09:44:30 crc kubenswrapper[4881]: I1211 09:44:30.617649 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-44rjj\" (UniqueName: \"kubernetes.io/projected/131bdc29-71ea-417f-8d16-d89ff120fc23-kube-api-access-44rjj\") on node \"crc\" DevicePath \"\"" Dec 11 09:44:30 crc kubenswrapper[4881]: I1211 09:44:30.618307 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/131bdc29-71ea-417f-8d16-d89ff120fc23-utilities" (OuterVolumeSpecName: "utilities") pod "131bdc29-71ea-417f-8d16-d89ff120fc23" (UID: "131bdc29-71ea-417f-8d16-d89ff120fc23"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 09:44:30 crc kubenswrapper[4881]: I1211 09:44:30.653939 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/131bdc29-71ea-417f-8d16-d89ff120fc23-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "131bdc29-71ea-417f-8d16-d89ff120fc23" (UID: "131bdc29-71ea-417f-8d16-d89ff120fc23"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 09:44:30 crc kubenswrapper[4881]: I1211 09:44:30.718975 4881 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/131bdc29-71ea-417f-8d16-d89ff120fc23-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 11 09:44:30 crc kubenswrapper[4881]: I1211 09:44:30.719008 4881 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/131bdc29-71ea-417f-8d16-d89ff120fc23-utilities\") on node \"crc\" DevicePath \"\"" Dec 11 09:44:30 crc kubenswrapper[4881]: I1211 09:44:30.733652 4881 generic.go:334] "Generic (PLEG): container finished" podID="131bdc29-71ea-417f-8d16-d89ff120fc23" containerID="f69c9c9af0fce944ad52312948d6f8d468de723a628af4f53e2f3738c2befe04" exitCode=0 Dec 11 09:44:30 crc kubenswrapper[4881]: I1211 09:44:30.733704 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-b44g8" event={"ID":"131bdc29-71ea-417f-8d16-d89ff120fc23","Type":"ContainerDied","Data":"f69c9c9af0fce944ad52312948d6f8d468de723a628af4f53e2f3738c2befe04"} Dec 11 09:44:30 crc kubenswrapper[4881]: I1211 09:44:30.733744 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-b44g8" event={"ID":"131bdc29-71ea-417f-8d16-d89ff120fc23","Type":"ContainerDied","Data":"90e37f02af1d66143b4a8958481cd69f2f16729881091c66defe47f8f20bc6d7"} Dec 11 09:44:30 crc kubenswrapper[4881]: I1211 09:44:30.733767 4881 scope.go:117] "RemoveContainer" containerID="f69c9c9af0fce944ad52312948d6f8d468de723a628af4f53e2f3738c2befe04" Dec 11 09:44:30 crc kubenswrapper[4881]: I1211 09:44:30.733773 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-b44g8" Dec 11 09:44:30 crc kubenswrapper[4881]: I1211 09:44:30.765943 4881 scope.go:117] "RemoveContainer" containerID="b82e9c218bc5b934a5b5fa841b40144dd1e278c77905770ed77664b38aad47c4" Dec 11 09:44:30 crc kubenswrapper[4881]: I1211 09:44:30.783955 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-b44g8"] Dec 11 09:44:30 crc kubenswrapper[4881]: I1211 09:44:30.797151 4881 scope.go:117] "RemoveContainer" containerID="eb571444bcbdeb7b4c0f5b6d57feaacd8482414c69a4bb988d322d61a2808a6c" Dec 11 09:44:30 crc kubenswrapper[4881]: I1211 09:44:30.809097 4881 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-b44g8"] Dec 11 09:44:30 crc kubenswrapper[4881]: I1211 09:44:30.880978 4881 scope.go:117] "RemoveContainer" containerID="f69c9c9af0fce944ad52312948d6f8d468de723a628af4f53e2f3738c2befe04" Dec 11 09:44:30 crc kubenswrapper[4881]: E1211 09:44:30.882491 4881 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f69c9c9af0fce944ad52312948d6f8d468de723a628af4f53e2f3738c2befe04\": container with ID starting with f69c9c9af0fce944ad52312948d6f8d468de723a628af4f53e2f3738c2befe04 not found: ID does not exist" containerID="f69c9c9af0fce944ad52312948d6f8d468de723a628af4f53e2f3738c2befe04" Dec 11 09:44:30 crc kubenswrapper[4881]: I1211 09:44:30.882582 4881 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f69c9c9af0fce944ad52312948d6f8d468de723a628af4f53e2f3738c2befe04"} err="failed to get container status \"f69c9c9af0fce944ad52312948d6f8d468de723a628af4f53e2f3738c2befe04\": rpc error: code = NotFound desc = could not find container \"f69c9c9af0fce944ad52312948d6f8d468de723a628af4f53e2f3738c2befe04\": container with ID starting with f69c9c9af0fce944ad52312948d6f8d468de723a628af4f53e2f3738c2befe04 not found: ID does not exist" Dec 11 09:44:30 crc kubenswrapper[4881]: I1211 09:44:30.882620 4881 scope.go:117] "RemoveContainer" containerID="b82e9c218bc5b934a5b5fa841b40144dd1e278c77905770ed77664b38aad47c4" Dec 11 09:44:30 crc kubenswrapper[4881]: E1211 09:44:30.886299 4881 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b82e9c218bc5b934a5b5fa841b40144dd1e278c77905770ed77664b38aad47c4\": container with ID starting with b82e9c218bc5b934a5b5fa841b40144dd1e278c77905770ed77664b38aad47c4 not found: ID does not exist" containerID="b82e9c218bc5b934a5b5fa841b40144dd1e278c77905770ed77664b38aad47c4" Dec 11 09:44:30 crc kubenswrapper[4881]: I1211 09:44:30.886360 4881 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b82e9c218bc5b934a5b5fa841b40144dd1e278c77905770ed77664b38aad47c4"} err="failed to get container status \"b82e9c218bc5b934a5b5fa841b40144dd1e278c77905770ed77664b38aad47c4\": rpc error: code = NotFound desc = could not find container \"b82e9c218bc5b934a5b5fa841b40144dd1e278c77905770ed77664b38aad47c4\": container with ID starting with b82e9c218bc5b934a5b5fa841b40144dd1e278c77905770ed77664b38aad47c4 not found: ID does not exist" Dec 11 09:44:30 crc kubenswrapper[4881]: I1211 09:44:30.886386 4881 scope.go:117] "RemoveContainer" containerID="eb571444bcbdeb7b4c0f5b6d57feaacd8482414c69a4bb988d322d61a2808a6c" Dec 11 09:44:30 crc kubenswrapper[4881]: E1211 09:44:30.890468 4881 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"eb571444bcbdeb7b4c0f5b6d57feaacd8482414c69a4bb988d322d61a2808a6c\": container with ID starting with eb571444bcbdeb7b4c0f5b6d57feaacd8482414c69a4bb988d322d61a2808a6c not found: ID does not exist" containerID="eb571444bcbdeb7b4c0f5b6d57feaacd8482414c69a4bb988d322d61a2808a6c" Dec 11 09:44:30 crc kubenswrapper[4881]: I1211 09:44:30.890493 4881 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"eb571444bcbdeb7b4c0f5b6d57feaacd8482414c69a4bb988d322d61a2808a6c"} err="failed to get container status \"eb571444bcbdeb7b4c0f5b6d57feaacd8482414c69a4bb988d322d61a2808a6c\": rpc error: code = NotFound desc = could not find container \"eb571444bcbdeb7b4c0f5b6d57feaacd8482414c69a4bb988d322d61a2808a6c\": container with ID starting with eb571444bcbdeb7b4c0f5b6d57feaacd8482414c69a4bb988d322d61a2808a6c not found: ID does not exist" Dec 11 09:44:31 crc kubenswrapper[4881]: I1211 09:44:31.022939 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="131bdc29-71ea-417f-8d16-d89ff120fc23" path="/var/lib/kubelet/pods/131bdc29-71ea-417f-8d16-d89ff120fc23/volumes" Dec 11 09:44:31 crc kubenswrapper[4881]: E1211 09:44:31.028441 4881 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod131bdc29_71ea_417f_8d16_d89ff120fc23.slice/crio-90e37f02af1d66143b4a8958481cd69f2f16729881091c66defe47f8f20bc6d7\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod131bdc29_71ea_417f_8d16_d89ff120fc23.slice\": RecentStats: unable to find data in memory cache]" Dec 11 09:44:43 crc kubenswrapper[4881]: I1211 09:44:43.016688 4881 scope.go:117] "RemoveContainer" containerID="4411d7e1bcace55a5c503982d05efe329d9375b32a09a2062445c0d6f9e63a29" Dec 11 09:44:43 crc kubenswrapper[4881]: E1211 09:44:43.017729 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 09:44:54 crc kubenswrapper[4881]: I1211 09:44:54.006662 4881 scope.go:117] "RemoveContainer" containerID="4411d7e1bcace55a5c503982d05efe329d9375b32a09a2062445c0d6f9e63a29" Dec 11 09:44:54 crc kubenswrapper[4881]: E1211 09:44:54.007494 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 09:44:54 crc kubenswrapper[4881]: I1211 09:44:54.169144 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-glwkp"] Dec 11 09:44:54 crc kubenswrapper[4881]: E1211 09:44:54.170225 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="131bdc29-71ea-417f-8d16-d89ff120fc23" containerName="registry-server" Dec 11 09:44:54 crc kubenswrapper[4881]: I1211 09:44:54.170347 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="131bdc29-71ea-417f-8d16-d89ff120fc23" containerName="registry-server" Dec 11 09:44:54 crc kubenswrapper[4881]: E1211 09:44:54.170456 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="131bdc29-71ea-417f-8d16-d89ff120fc23" containerName="extract-utilities" Dec 11 09:44:54 crc kubenswrapper[4881]: I1211 09:44:54.170589 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="131bdc29-71ea-417f-8d16-d89ff120fc23" containerName="extract-utilities" Dec 11 09:44:54 crc kubenswrapper[4881]: E1211 09:44:54.170682 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="131bdc29-71ea-417f-8d16-d89ff120fc23" containerName="extract-content" Dec 11 09:44:54 crc kubenswrapper[4881]: I1211 09:44:54.170766 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="131bdc29-71ea-417f-8d16-d89ff120fc23" containerName="extract-content" Dec 11 09:44:54 crc kubenswrapper[4881]: I1211 09:44:54.171408 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="131bdc29-71ea-417f-8d16-d89ff120fc23" containerName="registry-server" Dec 11 09:44:54 crc kubenswrapper[4881]: I1211 09:44:54.174480 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-glwkp" Dec 11 09:44:54 crc kubenswrapper[4881]: I1211 09:44:54.190060 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-glwkp"] Dec 11 09:44:54 crc kubenswrapper[4881]: I1211 09:44:54.308803 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/12534c03-891e-4adf-9f5b-0acdaa76297d-utilities\") pod \"redhat-marketplace-glwkp\" (UID: \"12534c03-891e-4adf-9f5b-0acdaa76297d\") " pod="openshift-marketplace/redhat-marketplace-glwkp" Dec 11 09:44:54 crc kubenswrapper[4881]: I1211 09:44:54.308861 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mhhnz\" (UniqueName: \"kubernetes.io/projected/12534c03-891e-4adf-9f5b-0acdaa76297d-kube-api-access-mhhnz\") pod \"redhat-marketplace-glwkp\" (UID: \"12534c03-891e-4adf-9f5b-0acdaa76297d\") " pod="openshift-marketplace/redhat-marketplace-glwkp" Dec 11 09:44:54 crc kubenswrapper[4881]: I1211 09:44:54.309126 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/12534c03-891e-4adf-9f5b-0acdaa76297d-catalog-content\") pod \"redhat-marketplace-glwkp\" (UID: \"12534c03-891e-4adf-9f5b-0acdaa76297d\") " pod="openshift-marketplace/redhat-marketplace-glwkp" Dec 11 09:44:54 crc kubenswrapper[4881]: I1211 09:44:54.411577 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/12534c03-891e-4adf-9f5b-0acdaa76297d-catalog-content\") pod \"redhat-marketplace-glwkp\" (UID: \"12534c03-891e-4adf-9f5b-0acdaa76297d\") " pod="openshift-marketplace/redhat-marketplace-glwkp" Dec 11 09:44:54 crc kubenswrapper[4881]: I1211 09:44:54.412081 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/12534c03-891e-4adf-9f5b-0acdaa76297d-utilities\") pod \"redhat-marketplace-glwkp\" (UID: \"12534c03-891e-4adf-9f5b-0acdaa76297d\") " pod="openshift-marketplace/redhat-marketplace-glwkp" Dec 11 09:44:54 crc kubenswrapper[4881]: I1211 09:44:54.412243 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mhhnz\" (UniqueName: \"kubernetes.io/projected/12534c03-891e-4adf-9f5b-0acdaa76297d-kube-api-access-mhhnz\") pod \"redhat-marketplace-glwkp\" (UID: \"12534c03-891e-4adf-9f5b-0acdaa76297d\") " pod="openshift-marketplace/redhat-marketplace-glwkp" Dec 11 09:44:54 crc kubenswrapper[4881]: I1211 09:44:54.412290 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/12534c03-891e-4adf-9f5b-0acdaa76297d-catalog-content\") pod \"redhat-marketplace-glwkp\" (UID: \"12534c03-891e-4adf-9f5b-0acdaa76297d\") " pod="openshift-marketplace/redhat-marketplace-glwkp" Dec 11 09:44:54 crc kubenswrapper[4881]: I1211 09:44:54.412646 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/12534c03-891e-4adf-9f5b-0acdaa76297d-utilities\") pod \"redhat-marketplace-glwkp\" (UID: \"12534c03-891e-4adf-9f5b-0acdaa76297d\") " pod="openshift-marketplace/redhat-marketplace-glwkp" Dec 11 09:44:54 crc kubenswrapper[4881]: I1211 09:44:54.442015 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mhhnz\" (UniqueName: \"kubernetes.io/projected/12534c03-891e-4adf-9f5b-0acdaa76297d-kube-api-access-mhhnz\") pod \"redhat-marketplace-glwkp\" (UID: \"12534c03-891e-4adf-9f5b-0acdaa76297d\") " pod="openshift-marketplace/redhat-marketplace-glwkp" Dec 11 09:44:54 crc kubenswrapper[4881]: I1211 09:44:54.499574 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-glwkp" Dec 11 09:44:55 crc kubenswrapper[4881]: I1211 09:44:55.041123 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-glwkp"] Dec 11 09:44:55 crc kubenswrapper[4881]: W1211 09:44:55.042368 4881 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod12534c03_891e_4adf_9f5b_0acdaa76297d.slice/crio-c08b6b47c6fc0f56c07c0c5dd72ba595275bfce092e462df21e503bc69d3b74d WatchSource:0}: Error finding container c08b6b47c6fc0f56c07c0c5dd72ba595275bfce092e462df21e503bc69d3b74d: Status 404 returned error can't find the container with id c08b6b47c6fc0f56c07c0c5dd72ba595275bfce092e462df21e503bc69d3b74d Dec 11 09:44:56 crc kubenswrapper[4881]: I1211 09:44:56.002806 4881 generic.go:334] "Generic (PLEG): container finished" podID="12534c03-891e-4adf-9f5b-0acdaa76297d" containerID="43e25cb831a0b299e0345e397435806115ab1b4f4540c9792e98c64fc6dfa20c" exitCode=0 Dec 11 09:44:56 crc kubenswrapper[4881]: I1211 09:44:56.002898 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-glwkp" event={"ID":"12534c03-891e-4adf-9f5b-0acdaa76297d","Type":"ContainerDied","Data":"43e25cb831a0b299e0345e397435806115ab1b4f4540c9792e98c64fc6dfa20c"} Dec 11 09:44:56 crc kubenswrapper[4881]: I1211 09:44:56.003311 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-glwkp" event={"ID":"12534c03-891e-4adf-9f5b-0acdaa76297d","Type":"ContainerStarted","Data":"c08b6b47c6fc0f56c07c0c5dd72ba595275bfce092e462df21e503bc69d3b74d"} Dec 11 09:44:57 crc kubenswrapper[4881]: I1211 09:44:57.020965 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-glwkp" event={"ID":"12534c03-891e-4adf-9f5b-0acdaa76297d","Type":"ContainerStarted","Data":"03a7707c9792ab7cd54292f29989f35fcc865bd209d12ee7e352a44a2c62e865"} Dec 11 09:44:59 crc kubenswrapper[4881]: I1211 09:44:59.041100 4881 generic.go:334] "Generic (PLEG): container finished" podID="12534c03-891e-4adf-9f5b-0acdaa76297d" containerID="03a7707c9792ab7cd54292f29989f35fcc865bd209d12ee7e352a44a2c62e865" exitCode=0 Dec 11 09:44:59 crc kubenswrapper[4881]: I1211 09:44:59.041176 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-glwkp" event={"ID":"12534c03-891e-4adf-9f5b-0acdaa76297d","Type":"ContainerDied","Data":"03a7707c9792ab7cd54292f29989f35fcc865bd209d12ee7e352a44a2c62e865"} Dec 11 09:45:00 crc kubenswrapper[4881]: I1211 09:45:00.197967 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29424105-gthlf"] Dec 11 09:45:00 crc kubenswrapper[4881]: I1211 09:45:00.200529 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29424105-gthlf" Dec 11 09:45:00 crc kubenswrapper[4881]: I1211 09:45:00.206513 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Dec 11 09:45:00 crc kubenswrapper[4881]: I1211 09:45:00.206979 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Dec 11 09:45:00 crc kubenswrapper[4881]: I1211 09:45:00.266617 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4h5bd\" (UniqueName: \"kubernetes.io/projected/564c2778-0185-49a5-8738-82dc14524c6e-kube-api-access-4h5bd\") pod \"collect-profiles-29424105-gthlf\" (UID: \"564c2778-0185-49a5-8738-82dc14524c6e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29424105-gthlf" Dec 11 09:45:00 crc kubenswrapper[4881]: I1211 09:45:00.266741 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/564c2778-0185-49a5-8738-82dc14524c6e-config-volume\") pod \"collect-profiles-29424105-gthlf\" (UID: \"564c2778-0185-49a5-8738-82dc14524c6e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29424105-gthlf" Dec 11 09:45:00 crc kubenswrapper[4881]: I1211 09:45:00.266862 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/564c2778-0185-49a5-8738-82dc14524c6e-secret-volume\") pod \"collect-profiles-29424105-gthlf\" (UID: \"564c2778-0185-49a5-8738-82dc14524c6e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29424105-gthlf" Dec 11 09:45:00 crc kubenswrapper[4881]: I1211 09:45:00.272323 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29424105-gthlf"] Dec 11 09:45:00 crc kubenswrapper[4881]: I1211 09:45:00.371182 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4h5bd\" (UniqueName: \"kubernetes.io/projected/564c2778-0185-49a5-8738-82dc14524c6e-kube-api-access-4h5bd\") pod \"collect-profiles-29424105-gthlf\" (UID: \"564c2778-0185-49a5-8738-82dc14524c6e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29424105-gthlf" Dec 11 09:45:00 crc kubenswrapper[4881]: I1211 09:45:00.371615 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/564c2778-0185-49a5-8738-82dc14524c6e-config-volume\") pod \"collect-profiles-29424105-gthlf\" (UID: \"564c2778-0185-49a5-8738-82dc14524c6e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29424105-gthlf" Dec 11 09:45:00 crc kubenswrapper[4881]: I1211 09:45:00.371765 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/564c2778-0185-49a5-8738-82dc14524c6e-secret-volume\") pod \"collect-profiles-29424105-gthlf\" (UID: \"564c2778-0185-49a5-8738-82dc14524c6e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29424105-gthlf" Dec 11 09:45:00 crc kubenswrapper[4881]: I1211 09:45:00.373895 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/564c2778-0185-49a5-8738-82dc14524c6e-config-volume\") pod \"collect-profiles-29424105-gthlf\" (UID: \"564c2778-0185-49a5-8738-82dc14524c6e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29424105-gthlf" Dec 11 09:45:00 crc kubenswrapper[4881]: I1211 09:45:00.378576 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/564c2778-0185-49a5-8738-82dc14524c6e-secret-volume\") pod \"collect-profiles-29424105-gthlf\" (UID: \"564c2778-0185-49a5-8738-82dc14524c6e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29424105-gthlf" Dec 11 09:45:00 crc kubenswrapper[4881]: I1211 09:45:00.388456 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4h5bd\" (UniqueName: \"kubernetes.io/projected/564c2778-0185-49a5-8738-82dc14524c6e-kube-api-access-4h5bd\") pod \"collect-profiles-29424105-gthlf\" (UID: \"564c2778-0185-49a5-8738-82dc14524c6e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29424105-gthlf" Dec 11 09:45:00 crc kubenswrapper[4881]: I1211 09:45:00.541279 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29424105-gthlf" Dec 11 09:45:01 crc kubenswrapper[4881]: I1211 09:45:01.256983 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-glwkp" event={"ID":"12534c03-891e-4adf-9f5b-0acdaa76297d","Type":"ContainerStarted","Data":"8a47a138f8c5c9c0b010bd585dc4d27f9a3735e2a8c39d8fa7d7be5755be6e7a"} Dec 11 09:45:01 crc kubenswrapper[4881]: I1211 09:45:01.300478 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-glwkp" podStartSLOduration=3.55005179 podStartE2EDuration="7.300461935s" podCreationTimestamp="2025-12-11 09:44:54 +0000 UTC" firstStartedPulling="2025-12-11 09:44:56.005657802 +0000 UTC m=+5344.383026499" lastFinishedPulling="2025-12-11 09:44:59.756067947 +0000 UTC m=+5348.133436644" observedRunningTime="2025-12-11 09:45:01.293670309 +0000 UTC m=+5349.671039016" watchObservedRunningTime="2025-12-11 09:45:01.300461935 +0000 UTC m=+5349.677830632" Dec 11 09:45:01 crc kubenswrapper[4881]: I1211 09:45:01.329923 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29424105-gthlf"] Dec 11 09:45:02 crc kubenswrapper[4881]: I1211 09:45:02.273104 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29424105-gthlf" event={"ID":"564c2778-0185-49a5-8738-82dc14524c6e","Type":"ContainerStarted","Data":"b4d81e82237abf66598b4892a4dd6a9aef4d23ca9ea5c08e0ea6af66129178df"} Dec 11 09:45:02 crc kubenswrapper[4881]: I1211 09:45:02.273704 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29424105-gthlf" event={"ID":"564c2778-0185-49a5-8738-82dc14524c6e","Type":"ContainerStarted","Data":"72ef9454b88bf56d7f67070028ff7e294684d9dc6d9e7190bcc4cc2a522f1a3f"} Dec 11 09:45:02 crc kubenswrapper[4881]: I1211 09:45:02.301557 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29424105-gthlf" podStartSLOduration=2.301533023 podStartE2EDuration="2.301533023s" podCreationTimestamp="2025-12-11 09:45:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 09:45:02.297623407 +0000 UTC m=+5350.674992104" watchObservedRunningTime="2025-12-11 09:45:02.301533023 +0000 UTC m=+5350.678901720" Dec 11 09:45:03 crc kubenswrapper[4881]: I1211 09:45:03.300673 4881 generic.go:334] "Generic (PLEG): container finished" podID="564c2778-0185-49a5-8738-82dc14524c6e" containerID="b4d81e82237abf66598b4892a4dd6a9aef4d23ca9ea5c08e0ea6af66129178df" exitCode=0 Dec 11 09:45:03 crc kubenswrapper[4881]: I1211 09:45:03.300948 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29424105-gthlf" event={"ID":"564c2778-0185-49a5-8738-82dc14524c6e","Type":"ContainerDied","Data":"b4d81e82237abf66598b4892a4dd6a9aef4d23ca9ea5c08e0ea6af66129178df"} Dec 11 09:45:04 crc kubenswrapper[4881]: I1211 09:45:04.500395 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-glwkp" Dec 11 09:45:04 crc kubenswrapper[4881]: I1211 09:45:04.500817 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-glwkp" Dec 11 09:45:04 crc kubenswrapper[4881]: I1211 09:45:04.575172 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-glwkp" Dec 11 09:45:04 crc kubenswrapper[4881]: I1211 09:45:04.853248 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29424105-gthlf" Dec 11 09:45:04 crc kubenswrapper[4881]: I1211 09:45:04.962385 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/564c2778-0185-49a5-8738-82dc14524c6e-secret-volume\") pod \"564c2778-0185-49a5-8738-82dc14524c6e\" (UID: \"564c2778-0185-49a5-8738-82dc14524c6e\") " Dec 11 09:45:04 crc kubenswrapper[4881]: I1211 09:45:04.962454 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/564c2778-0185-49a5-8738-82dc14524c6e-config-volume\") pod \"564c2778-0185-49a5-8738-82dc14524c6e\" (UID: \"564c2778-0185-49a5-8738-82dc14524c6e\") " Dec 11 09:45:04 crc kubenswrapper[4881]: I1211 09:45:04.962582 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4h5bd\" (UniqueName: \"kubernetes.io/projected/564c2778-0185-49a5-8738-82dc14524c6e-kube-api-access-4h5bd\") pod \"564c2778-0185-49a5-8738-82dc14524c6e\" (UID: \"564c2778-0185-49a5-8738-82dc14524c6e\") " Dec 11 09:45:04 crc kubenswrapper[4881]: I1211 09:45:04.963323 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/564c2778-0185-49a5-8738-82dc14524c6e-config-volume" (OuterVolumeSpecName: "config-volume") pod "564c2778-0185-49a5-8738-82dc14524c6e" (UID: "564c2778-0185-49a5-8738-82dc14524c6e"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 09:45:04 crc kubenswrapper[4881]: I1211 09:45:04.970409 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/564c2778-0185-49a5-8738-82dc14524c6e-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "564c2778-0185-49a5-8738-82dc14524c6e" (UID: "564c2778-0185-49a5-8738-82dc14524c6e"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 09:45:04 crc kubenswrapper[4881]: I1211 09:45:04.970942 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/564c2778-0185-49a5-8738-82dc14524c6e-kube-api-access-4h5bd" (OuterVolumeSpecName: "kube-api-access-4h5bd") pod "564c2778-0185-49a5-8738-82dc14524c6e" (UID: "564c2778-0185-49a5-8738-82dc14524c6e"). InnerVolumeSpecName "kube-api-access-4h5bd". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 09:45:05 crc kubenswrapper[4881]: I1211 09:45:05.066264 4881 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/564c2778-0185-49a5-8738-82dc14524c6e-secret-volume\") on node \"crc\" DevicePath \"\"" Dec 11 09:45:05 crc kubenswrapper[4881]: I1211 09:45:05.066297 4881 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/564c2778-0185-49a5-8738-82dc14524c6e-config-volume\") on node \"crc\" DevicePath \"\"" Dec 11 09:45:05 crc kubenswrapper[4881]: I1211 09:45:05.066309 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4h5bd\" (UniqueName: \"kubernetes.io/projected/564c2778-0185-49a5-8738-82dc14524c6e-kube-api-access-4h5bd\") on node \"crc\" DevicePath \"\"" Dec 11 09:45:05 crc kubenswrapper[4881]: I1211 09:45:05.330017 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29424105-gthlf" event={"ID":"564c2778-0185-49a5-8738-82dc14524c6e","Type":"ContainerDied","Data":"72ef9454b88bf56d7f67070028ff7e294684d9dc6d9e7190bcc4cc2a522f1a3f"} Dec 11 09:45:05 crc kubenswrapper[4881]: I1211 09:45:05.330073 4881 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="72ef9454b88bf56d7f67070028ff7e294684d9dc6d9e7190bcc4cc2a522f1a3f" Dec 11 09:45:05 crc kubenswrapper[4881]: I1211 09:45:05.330033 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29424105-gthlf" Dec 11 09:45:05 crc kubenswrapper[4881]: I1211 09:45:05.410705 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-glwkp" Dec 11 09:45:05 crc kubenswrapper[4881]: I1211 09:45:05.412318 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29424060-lzkkj"] Dec 11 09:45:05 crc kubenswrapper[4881]: I1211 09:45:05.428241 4881 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29424060-lzkkj"] Dec 11 09:45:05 crc kubenswrapper[4881]: I1211 09:45:05.467951 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-glwkp"] Dec 11 09:45:07 crc kubenswrapper[4881]: I1211 09:45:07.006263 4881 scope.go:117] "RemoveContainer" containerID="4411d7e1bcace55a5c503982d05efe329d9375b32a09a2062445c0d6f9e63a29" Dec 11 09:45:07 crc kubenswrapper[4881]: E1211 09:45:07.007072 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 09:45:07 crc kubenswrapper[4881]: I1211 09:45:07.019869 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9eba9e53-1f89-432d-8099-28ec7d7ce331" path="/var/lib/kubelet/pods/9eba9e53-1f89-432d-8099-28ec7d7ce331/volumes" Dec 11 09:45:07 crc kubenswrapper[4881]: I1211 09:45:07.350767 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-glwkp" podUID="12534c03-891e-4adf-9f5b-0acdaa76297d" containerName="registry-server" containerID="cri-o://8a47a138f8c5c9c0b010bd585dc4d27f9a3735e2a8c39d8fa7d7be5755be6e7a" gracePeriod=2 Dec 11 09:45:08 crc kubenswrapper[4881]: I1211 09:45:08.363270 4881 generic.go:334] "Generic (PLEG): container finished" podID="12534c03-891e-4adf-9f5b-0acdaa76297d" containerID="8a47a138f8c5c9c0b010bd585dc4d27f9a3735e2a8c39d8fa7d7be5755be6e7a" exitCode=0 Dec 11 09:45:08 crc kubenswrapper[4881]: I1211 09:45:08.363633 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-glwkp" event={"ID":"12534c03-891e-4adf-9f5b-0acdaa76297d","Type":"ContainerDied","Data":"8a47a138f8c5c9c0b010bd585dc4d27f9a3735e2a8c39d8fa7d7be5755be6e7a"} Dec 11 09:45:08 crc kubenswrapper[4881]: I1211 09:45:08.506793 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-glwkp" Dec 11 09:45:08 crc kubenswrapper[4881]: I1211 09:45:08.556091 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/12534c03-891e-4adf-9f5b-0acdaa76297d-utilities\") pod \"12534c03-891e-4adf-9f5b-0acdaa76297d\" (UID: \"12534c03-891e-4adf-9f5b-0acdaa76297d\") " Dec 11 09:45:08 crc kubenswrapper[4881]: I1211 09:45:08.556141 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/12534c03-891e-4adf-9f5b-0acdaa76297d-catalog-content\") pod \"12534c03-891e-4adf-9f5b-0acdaa76297d\" (UID: \"12534c03-891e-4adf-9f5b-0acdaa76297d\") " Dec 11 09:45:08 crc kubenswrapper[4881]: I1211 09:45:08.556185 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mhhnz\" (UniqueName: \"kubernetes.io/projected/12534c03-891e-4adf-9f5b-0acdaa76297d-kube-api-access-mhhnz\") pod \"12534c03-891e-4adf-9f5b-0acdaa76297d\" (UID: \"12534c03-891e-4adf-9f5b-0acdaa76297d\") " Dec 11 09:45:08 crc kubenswrapper[4881]: I1211 09:45:08.556845 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/12534c03-891e-4adf-9f5b-0acdaa76297d-utilities" (OuterVolumeSpecName: "utilities") pod "12534c03-891e-4adf-9f5b-0acdaa76297d" (UID: "12534c03-891e-4adf-9f5b-0acdaa76297d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 09:45:08 crc kubenswrapper[4881]: I1211 09:45:08.557162 4881 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/12534c03-891e-4adf-9f5b-0acdaa76297d-utilities\") on node \"crc\" DevicePath \"\"" Dec 11 09:45:08 crc kubenswrapper[4881]: I1211 09:45:08.569708 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/12534c03-891e-4adf-9f5b-0acdaa76297d-kube-api-access-mhhnz" (OuterVolumeSpecName: "kube-api-access-mhhnz") pod "12534c03-891e-4adf-9f5b-0acdaa76297d" (UID: "12534c03-891e-4adf-9f5b-0acdaa76297d"). InnerVolumeSpecName "kube-api-access-mhhnz". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 09:45:08 crc kubenswrapper[4881]: I1211 09:45:08.583848 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/12534c03-891e-4adf-9f5b-0acdaa76297d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "12534c03-891e-4adf-9f5b-0acdaa76297d" (UID: "12534c03-891e-4adf-9f5b-0acdaa76297d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 09:45:08 crc kubenswrapper[4881]: I1211 09:45:08.660395 4881 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/12534c03-891e-4adf-9f5b-0acdaa76297d-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 11 09:45:08 crc kubenswrapper[4881]: I1211 09:45:08.660444 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mhhnz\" (UniqueName: \"kubernetes.io/projected/12534c03-891e-4adf-9f5b-0acdaa76297d-kube-api-access-mhhnz\") on node \"crc\" DevicePath \"\"" Dec 11 09:45:09 crc kubenswrapper[4881]: I1211 09:45:09.389850 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-glwkp" event={"ID":"12534c03-891e-4adf-9f5b-0acdaa76297d","Type":"ContainerDied","Data":"c08b6b47c6fc0f56c07c0c5dd72ba595275bfce092e462df21e503bc69d3b74d"} Dec 11 09:45:09 crc kubenswrapper[4881]: I1211 09:45:09.390227 4881 scope.go:117] "RemoveContainer" containerID="8a47a138f8c5c9c0b010bd585dc4d27f9a3735e2a8c39d8fa7d7be5755be6e7a" Dec 11 09:45:09 crc kubenswrapper[4881]: I1211 09:45:09.390520 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-glwkp" Dec 11 09:45:09 crc kubenswrapper[4881]: I1211 09:45:09.426408 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-glwkp"] Dec 11 09:45:09 crc kubenswrapper[4881]: I1211 09:45:09.438730 4881 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-glwkp"] Dec 11 09:45:09 crc kubenswrapper[4881]: I1211 09:45:09.446809 4881 scope.go:117] "RemoveContainer" containerID="03a7707c9792ab7cd54292f29989f35fcc865bd209d12ee7e352a44a2c62e865" Dec 11 09:45:09 crc kubenswrapper[4881]: I1211 09:45:09.652902 4881 scope.go:117] "RemoveContainer" containerID="43e25cb831a0b299e0345e397435806115ab1b4f4540c9792e98c64fc6dfa20c" Dec 11 09:45:11 crc kubenswrapper[4881]: I1211 09:45:11.020061 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="12534c03-891e-4adf-9f5b-0acdaa76297d" path="/var/lib/kubelet/pods/12534c03-891e-4adf-9f5b-0acdaa76297d/volumes" Dec 11 09:45:12 crc kubenswrapper[4881]: I1211 09:45:12.886006 4881 scope.go:117] "RemoveContainer" containerID="9688ead5aa86528b8dc1a04c530da212bcd09429c2f4eaec759ef0bcbe3f78e7" Dec 11 09:45:21 crc kubenswrapper[4881]: I1211 09:45:21.006525 4881 scope.go:117] "RemoveContainer" containerID="4411d7e1bcace55a5c503982d05efe329d9375b32a09a2062445c0d6f9e63a29" Dec 11 09:45:21 crc kubenswrapper[4881]: E1211 09:45:21.007452 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 09:45:33 crc kubenswrapper[4881]: I1211 09:45:33.017138 4881 scope.go:117] "RemoveContainer" containerID="4411d7e1bcace55a5c503982d05efe329d9375b32a09a2062445c0d6f9e63a29" Dec 11 09:45:33 crc kubenswrapper[4881]: E1211 09:45:33.018511 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 09:45:35 crc kubenswrapper[4881]: I1211 09:45:35.686270 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-qnffx"] Dec 11 09:45:35 crc kubenswrapper[4881]: E1211 09:45:35.687355 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="12534c03-891e-4adf-9f5b-0acdaa76297d" containerName="registry-server" Dec 11 09:45:35 crc kubenswrapper[4881]: I1211 09:45:35.687372 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="12534c03-891e-4adf-9f5b-0acdaa76297d" containerName="registry-server" Dec 11 09:45:35 crc kubenswrapper[4881]: E1211 09:45:35.687389 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="12534c03-891e-4adf-9f5b-0acdaa76297d" containerName="extract-utilities" Dec 11 09:45:35 crc kubenswrapper[4881]: I1211 09:45:35.687395 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="12534c03-891e-4adf-9f5b-0acdaa76297d" containerName="extract-utilities" Dec 11 09:45:35 crc kubenswrapper[4881]: E1211 09:45:35.687412 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="12534c03-891e-4adf-9f5b-0acdaa76297d" containerName="extract-content" Dec 11 09:45:35 crc kubenswrapper[4881]: I1211 09:45:35.687418 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="12534c03-891e-4adf-9f5b-0acdaa76297d" containerName="extract-content" Dec 11 09:45:35 crc kubenswrapper[4881]: E1211 09:45:35.687462 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="564c2778-0185-49a5-8738-82dc14524c6e" containerName="collect-profiles" Dec 11 09:45:35 crc kubenswrapper[4881]: I1211 09:45:35.687468 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="564c2778-0185-49a5-8738-82dc14524c6e" containerName="collect-profiles" Dec 11 09:45:35 crc kubenswrapper[4881]: I1211 09:45:35.687698 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="12534c03-891e-4adf-9f5b-0acdaa76297d" containerName="registry-server" Dec 11 09:45:35 crc kubenswrapper[4881]: I1211 09:45:35.687742 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="564c2778-0185-49a5-8738-82dc14524c6e" containerName="collect-profiles" Dec 11 09:45:35 crc kubenswrapper[4881]: I1211 09:45:35.689956 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-qnffx" Dec 11 09:45:35 crc kubenswrapper[4881]: I1211 09:45:35.699502 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-qnffx"] Dec 11 09:45:35 crc kubenswrapper[4881]: I1211 09:45:35.856696 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/18611892-d199-4d6c-a3b6-391c8c78511c-catalog-content\") pod \"certified-operators-qnffx\" (UID: \"18611892-d199-4d6c-a3b6-391c8c78511c\") " pod="openshift-marketplace/certified-operators-qnffx" Dec 11 09:45:35 crc kubenswrapper[4881]: I1211 09:45:35.856826 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/18611892-d199-4d6c-a3b6-391c8c78511c-utilities\") pod \"certified-operators-qnffx\" (UID: \"18611892-d199-4d6c-a3b6-391c8c78511c\") " pod="openshift-marketplace/certified-operators-qnffx" Dec 11 09:45:35 crc kubenswrapper[4881]: I1211 09:45:35.856985 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lwv6k\" (UniqueName: \"kubernetes.io/projected/18611892-d199-4d6c-a3b6-391c8c78511c-kube-api-access-lwv6k\") pod \"certified-operators-qnffx\" (UID: \"18611892-d199-4d6c-a3b6-391c8c78511c\") " pod="openshift-marketplace/certified-operators-qnffx" Dec 11 09:45:35 crc kubenswrapper[4881]: I1211 09:45:35.958813 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lwv6k\" (UniqueName: \"kubernetes.io/projected/18611892-d199-4d6c-a3b6-391c8c78511c-kube-api-access-lwv6k\") pod \"certified-operators-qnffx\" (UID: \"18611892-d199-4d6c-a3b6-391c8c78511c\") " pod="openshift-marketplace/certified-operators-qnffx" Dec 11 09:45:35 crc kubenswrapper[4881]: I1211 09:45:35.958992 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/18611892-d199-4d6c-a3b6-391c8c78511c-catalog-content\") pod \"certified-operators-qnffx\" (UID: \"18611892-d199-4d6c-a3b6-391c8c78511c\") " pod="openshift-marketplace/certified-operators-qnffx" Dec 11 09:45:35 crc kubenswrapper[4881]: I1211 09:45:35.959075 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/18611892-d199-4d6c-a3b6-391c8c78511c-utilities\") pod \"certified-operators-qnffx\" (UID: \"18611892-d199-4d6c-a3b6-391c8c78511c\") " pod="openshift-marketplace/certified-operators-qnffx" Dec 11 09:45:35 crc kubenswrapper[4881]: I1211 09:45:35.959637 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/18611892-d199-4d6c-a3b6-391c8c78511c-utilities\") pod \"certified-operators-qnffx\" (UID: \"18611892-d199-4d6c-a3b6-391c8c78511c\") " pod="openshift-marketplace/certified-operators-qnffx" Dec 11 09:45:35 crc kubenswrapper[4881]: I1211 09:45:35.960170 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/18611892-d199-4d6c-a3b6-391c8c78511c-catalog-content\") pod \"certified-operators-qnffx\" (UID: \"18611892-d199-4d6c-a3b6-391c8c78511c\") " pod="openshift-marketplace/certified-operators-qnffx" Dec 11 09:45:35 crc kubenswrapper[4881]: I1211 09:45:35.987489 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lwv6k\" (UniqueName: \"kubernetes.io/projected/18611892-d199-4d6c-a3b6-391c8c78511c-kube-api-access-lwv6k\") pod \"certified-operators-qnffx\" (UID: \"18611892-d199-4d6c-a3b6-391c8c78511c\") " pod="openshift-marketplace/certified-operators-qnffx" Dec 11 09:45:36 crc kubenswrapper[4881]: I1211 09:45:36.070077 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-qnffx" Dec 11 09:45:36 crc kubenswrapper[4881]: I1211 09:45:36.589523 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-qnffx"] Dec 11 09:45:36 crc kubenswrapper[4881]: I1211 09:45:36.693856 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qnffx" event={"ID":"18611892-d199-4d6c-a3b6-391c8c78511c","Type":"ContainerStarted","Data":"4e32154deaa9969ad013f87943862b0e043cec2d2253c929b574a99fc05417e2"} Dec 11 09:45:37 crc kubenswrapper[4881]: I1211 09:45:37.706110 4881 generic.go:334] "Generic (PLEG): container finished" podID="18611892-d199-4d6c-a3b6-391c8c78511c" containerID="3f3bad3e4c68da49042cc569b5837d7a86ec6647a796869ab3b5a4796a6b42ab" exitCode=0 Dec 11 09:45:37 crc kubenswrapper[4881]: I1211 09:45:37.706161 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qnffx" event={"ID":"18611892-d199-4d6c-a3b6-391c8c78511c","Type":"ContainerDied","Data":"3f3bad3e4c68da49042cc569b5837d7a86ec6647a796869ab3b5a4796a6b42ab"} Dec 11 09:45:47 crc kubenswrapper[4881]: I1211 09:45:47.826525 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qnffx" event={"ID":"18611892-d199-4d6c-a3b6-391c8c78511c","Type":"ContainerStarted","Data":"856ac1a98e884829e1e4342f1291bdf1644fa7f56aea003a15f4b3759e226818"} Dec 11 09:45:48 crc kubenswrapper[4881]: I1211 09:45:48.006632 4881 scope.go:117] "RemoveContainer" containerID="4411d7e1bcace55a5c503982d05efe329d9375b32a09a2062445c0d6f9e63a29" Dec 11 09:45:48 crc kubenswrapper[4881]: E1211 09:45:48.006973 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 09:45:48 crc kubenswrapper[4881]: E1211 09:45:48.405107 4881 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod18611892_d199_4d6c_a3b6_391c8c78511c.slice/crio-conmon-856ac1a98e884829e1e4342f1291bdf1644fa7f56aea003a15f4b3759e226818.scope\": RecentStats: unable to find data in memory cache]" Dec 11 09:45:48 crc kubenswrapper[4881]: E1211 09:45:48.405134 4881 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod18611892_d199_4d6c_a3b6_391c8c78511c.slice/crio-conmon-856ac1a98e884829e1e4342f1291bdf1644fa7f56aea003a15f4b3759e226818.scope\": RecentStats: unable to find data in memory cache]" Dec 11 09:45:48 crc kubenswrapper[4881]: I1211 09:45:48.840689 4881 generic.go:334] "Generic (PLEG): container finished" podID="18611892-d199-4d6c-a3b6-391c8c78511c" containerID="856ac1a98e884829e1e4342f1291bdf1644fa7f56aea003a15f4b3759e226818" exitCode=0 Dec 11 09:45:48 crc kubenswrapper[4881]: I1211 09:45:48.840774 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qnffx" event={"ID":"18611892-d199-4d6c-a3b6-391c8c78511c","Type":"ContainerDied","Data":"856ac1a98e884829e1e4342f1291bdf1644fa7f56aea003a15f4b3759e226818"} Dec 11 09:45:49 crc kubenswrapper[4881]: I1211 09:45:49.854800 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qnffx" event={"ID":"18611892-d199-4d6c-a3b6-391c8c78511c","Type":"ContainerStarted","Data":"27dbf9ee9d832288c0d25cfa1054884dbfa1cae929ee7bcdc2323aea0ea009e4"} Dec 11 09:45:49 crc kubenswrapper[4881]: I1211 09:45:49.884869 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-qnffx" podStartSLOduration=3.058463628 podStartE2EDuration="14.88484895s" podCreationTimestamp="2025-12-11 09:45:35 +0000 UTC" firstStartedPulling="2025-12-11 09:45:37.709564876 +0000 UTC m=+5386.086933573" lastFinishedPulling="2025-12-11 09:45:49.535950198 +0000 UTC m=+5397.913318895" observedRunningTime="2025-12-11 09:45:49.876324441 +0000 UTC m=+5398.253693138" watchObservedRunningTime="2025-12-11 09:45:49.88484895 +0000 UTC m=+5398.262217647" Dec 11 09:45:56 crc kubenswrapper[4881]: I1211 09:45:56.070749 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-qnffx" Dec 11 09:45:56 crc kubenswrapper[4881]: I1211 09:45:56.071283 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-qnffx" Dec 11 09:45:56 crc kubenswrapper[4881]: I1211 09:45:56.124930 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-qnffx" Dec 11 09:45:56 crc kubenswrapper[4881]: I1211 09:45:56.985652 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-qnffx" Dec 11 09:45:57 crc kubenswrapper[4881]: I1211 09:45:57.152450 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-qnffx"] Dec 11 09:45:57 crc kubenswrapper[4881]: I1211 09:45:57.244515 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-zqwb5"] Dec 11 09:45:57 crc kubenswrapper[4881]: I1211 09:45:57.247156 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-zqwb5" podUID="cf59f823-b688-420e-9e5b-20f4441c9635" containerName="registry-server" containerID="cri-o://6c2c15c53f770597eb023fce51b25b81ecd613594b349f50f6f73fbdce968f02" gracePeriod=2 Dec 11 09:45:57 crc kubenswrapper[4881]: I1211 09:45:57.937190 4881 generic.go:334] "Generic (PLEG): container finished" podID="cf59f823-b688-420e-9e5b-20f4441c9635" containerID="6c2c15c53f770597eb023fce51b25b81ecd613594b349f50f6f73fbdce968f02" exitCode=0 Dec 11 09:45:57 crc kubenswrapper[4881]: I1211 09:45:57.937597 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zqwb5" event={"ID":"cf59f823-b688-420e-9e5b-20f4441c9635","Type":"ContainerDied","Data":"6c2c15c53f770597eb023fce51b25b81ecd613594b349f50f6f73fbdce968f02"} Dec 11 09:45:58 crc kubenswrapper[4881]: I1211 09:45:58.594806 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-zqwb5" Dec 11 09:45:58 crc kubenswrapper[4881]: I1211 09:45:58.744966 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w4pkf\" (UniqueName: \"kubernetes.io/projected/cf59f823-b688-420e-9e5b-20f4441c9635-kube-api-access-w4pkf\") pod \"cf59f823-b688-420e-9e5b-20f4441c9635\" (UID: \"cf59f823-b688-420e-9e5b-20f4441c9635\") " Dec 11 09:45:58 crc kubenswrapper[4881]: I1211 09:45:58.745303 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cf59f823-b688-420e-9e5b-20f4441c9635-utilities\") pod \"cf59f823-b688-420e-9e5b-20f4441c9635\" (UID: \"cf59f823-b688-420e-9e5b-20f4441c9635\") " Dec 11 09:45:58 crc kubenswrapper[4881]: I1211 09:45:58.745576 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cf59f823-b688-420e-9e5b-20f4441c9635-catalog-content\") pod \"cf59f823-b688-420e-9e5b-20f4441c9635\" (UID: \"cf59f823-b688-420e-9e5b-20f4441c9635\") " Dec 11 09:45:58 crc kubenswrapper[4881]: I1211 09:45:58.747016 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cf59f823-b688-420e-9e5b-20f4441c9635-utilities" (OuterVolumeSpecName: "utilities") pod "cf59f823-b688-420e-9e5b-20f4441c9635" (UID: "cf59f823-b688-420e-9e5b-20f4441c9635"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 09:45:58 crc kubenswrapper[4881]: I1211 09:45:58.828374 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cf59f823-b688-420e-9e5b-20f4441c9635-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "cf59f823-b688-420e-9e5b-20f4441c9635" (UID: "cf59f823-b688-420e-9e5b-20f4441c9635"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 09:45:58 crc kubenswrapper[4881]: I1211 09:45:58.849191 4881 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cf59f823-b688-420e-9e5b-20f4441c9635-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 11 09:45:58 crc kubenswrapper[4881]: I1211 09:45:58.849251 4881 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cf59f823-b688-420e-9e5b-20f4441c9635-utilities\") on node \"crc\" DevicePath \"\"" Dec 11 09:45:58 crc kubenswrapper[4881]: I1211 09:45:58.949882 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zqwb5" event={"ID":"cf59f823-b688-420e-9e5b-20f4441c9635","Type":"ContainerDied","Data":"f369d688603915cfe07a9c76948634ab1ca7aae50ac50587fd5acc10f5ab3da4"} Dec 11 09:45:58 crc kubenswrapper[4881]: I1211 09:45:58.949953 4881 scope.go:117] "RemoveContainer" containerID="6c2c15c53f770597eb023fce51b25b81ecd613594b349f50f6f73fbdce968f02" Dec 11 09:45:58 crc kubenswrapper[4881]: I1211 09:45:58.949977 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-zqwb5" Dec 11 09:45:58 crc kubenswrapper[4881]: I1211 09:45:58.981351 4881 scope.go:117] "RemoveContainer" containerID="1df2724fa0b6a6da73cbc0003b0e611442485a47a448a17b0619b54bc573c5b9" Dec 11 09:45:59 crc kubenswrapper[4881]: I1211 09:45:59.193480 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cf59f823-b688-420e-9e5b-20f4441c9635-kube-api-access-w4pkf" (OuterVolumeSpecName: "kube-api-access-w4pkf") pod "cf59f823-b688-420e-9e5b-20f4441c9635" (UID: "cf59f823-b688-420e-9e5b-20f4441c9635"). InnerVolumeSpecName "kube-api-access-w4pkf". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 09:45:59 crc kubenswrapper[4881]: I1211 09:45:59.207935 4881 scope.go:117] "RemoveContainer" containerID="10e8ddc36165caabb22f5f175b1bd4a8b0f3295773054c008d4965a4f6a1ae01" Dec 11 09:45:59 crc kubenswrapper[4881]: I1211 09:45:59.258784 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w4pkf\" (UniqueName: \"kubernetes.io/projected/cf59f823-b688-420e-9e5b-20f4441c9635-kube-api-access-w4pkf\") on node \"crc\" DevicePath \"\"" Dec 11 09:45:59 crc kubenswrapper[4881]: I1211 09:45:59.384098 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-zqwb5"] Dec 11 09:45:59 crc kubenswrapper[4881]: I1211 09:45:59.400750 4881 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-zqwb5"] Dec 11 09:46:01 crc kubenswrapper[4881]: I1211 09:46:01.005572 4881 scope.go:117] "RemoveContainer" containerID="4411d7e1bcace55a5c503982d05efe329d9375b32a09a2062445c0d6f9e63a29" Dec 11 09:46:01 crc kubenswrapper[4881]: E1211 09:46:01.006151 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 09:46:01 crc kubenswrapper[4881]: I1211 09:46:01.020621 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cf59f823-b688-420e-9e5b-20f4441c9635" path="/var/lib/kubelet/pods/cf59f823-b688-420e-9e5b-20f4441c9635/volumes" Dec 11 09:46:16 crc kubenswrapper[4881]: I1211 09:46:16.006288 4881 scope.go:117] "RemoveContainer" containerID="4411d7e1bcace55a5c503982d05efe329d9375b32a09a2062445c0d6f9e63a29" Dec 11 09:46:16 crc kubenswrapper[4881]: E1211 09:46:16.007079 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 09:46:30 crc kubenswrapper[4881]: I1211 09:46:30.006311 4881 scope.go:117] "RemoveContainer" containerID="4411d7e1bcace55a5c503982d05efe329d9375b32a09a2062445c0d6f9e63a29" Dec 11 09:46:30 crc kubenswrapper[4881]: E1211 09:46:30.007301 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 09:46:45 crc kubenswrapper[4881]: I1211 09:46:45.010193 4881 scope.go:117] "RemoveContainer" containerID="4411d7e1bcace55a5c503982d05efe329d9375b32a09a2062445c0d6f9e63a29" Dec 11 09:46:45 crc kubenswrapper[4881]: E1211 09:46:45.010946 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 09:46:58 crc kubenswrapper[4881]: I1211 09:46:58.006197 4881 scope.go:117] "RemoveContainer" containerID="4411d7e1bcace55a5c503982d05efe329d9375b32a09a2062445c0d6f9e63a29" Dec 11 09:46:58 crc kubenswrapper[4881]: E1211 09:46:58.007032 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 09:47:12 crc kubenswrapper[4881]: I1211 09:47:12.005640 4881 scope.go:117] "RemoveContainer" containerID="4411d7e1bcace55a5c503982d05efe329d9375b32a09a2062445c0d6f9e63a29" Dec 11 09:47:12 crc kubenswrapper[4881]: E1211 09:47:12.006399 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 09:47:25 crc kubenswrapper[4881]: I1211 09:47:25.006783 4881 scope.go:117] "RemoveContainer" containerID="4411d7e1bcace55a5c503982d05efe329d9375b32a09a2062445c0d6f9e63a29" Dec 11 09:47:25 crc kubenswrapper[4881]: E1211 09:47:25.007535 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 09:47:39 crc kubenswrapper[4881]: I1211 09:47:39.006237 4881 scope.go:117] "RemoveContainer" containerID="4411d7e1bcace55a5c503982d05efe329d9375b32a09a2062445c0d6f9e63a29" Dec 11 09:47:40 crc kubenswrapper[4881]: I1211 09:47:40.252325 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" event={"ID":"56d69133-af36-4cbd-af7d-3a58cc4dd8ca","Type":"ContainerStarted","Data":"0ae6707fd3cdd808adb6344e3f25bb421b3ca9971b8b6b09793435fa04ad9161"} Dec 11 09:49:59 crc kubenswrapper[4881]: I1211 09:49:59.396956 4881 patch_prober.go:28] interesting pod/machine-config-daemon-z9nnh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 11 09:49:59 crc kubenswrapper[4881]: I1211 09:49:59.397544 4881 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 11 09:50:29 crc kubenswrapper[4881]: I1211 09:50:29.397859 4881 patch_prober.go:28] interesting pod/machine-config-daemon-z9nnh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 11 09:50:29 crc kubenswrapper[4881]: I1211 09:50:29.398451 4881 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 11 09:50:59 crc kubenswrapper[4881]: I1211 09:50:59.397133 4881 patch_prober.go:28] interesting pod/machine-config-daemon-z9nnh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 11 09:50:59 crc kubenswrapper[4881]: I1211 09:50:59.397685 4881 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 11 09:50:59 crc kubenswrapper[4881]: I1211 09:50:59.397730 4881 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" Dec 11 09:50:59 crc kubenswrapper[4881]: I1211 09:50:59.398684 4881 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"0ae6707fd3cdd808adb6344e3f25bb421b3ca9971b8b6b09793435fa04ad9161"} pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Dec 11 09:50:59 crc kubenswrapper[4881]: I1211 09:50:59.398745 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" containerName="machine-config-daemon" containerID="cri-o://0ae6707fd3cdd808adb6344e3f25bb421b3ca9971b8b6b09793435fa04ad9161" gracePeriod=600 Dec 11 09:50:59 crc kubenswrapper[4881]: I1211 09:50:59.733267 4881 generic.go:334] "Generic (PLEG): container finished" podID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" containerID="0ae6707fd3cdd808adb6344e3f25bb421b3ca9971b8b6b09793435fa04ad9161" exitCode=0 Dec 11 09:50:59 crc kubenswrapper[4881]: I1211 09:50:59.733372 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" event={"ID":"56d69133-af36-4cbd-af7d-3a58cc4dd8ca","Type":"ContainerDied","Data":"0ae6707fd3cdd808adb6344e3f25bb421b3ca9971b8b6b09793435fa04ad9161"} Dec 11 09:50:59 crc kubenswrapper[4881]: I1211 09:50:59.733788 4881 scope.go:117] "RemoveContainer" containerID="4411d7e1bcace55a5c503982d05efe329d9375b32a09a2062445c0d6f9e63a29" Dec 11 09:51:00 crc kubenswrapper[4881]: I1211 09:51:00.749871 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" event={"ID":"56d69133-af36-4cbd-af7d-3a58cc4dd8ca","Type":"ContainerStarted","Data":"50e3b73ab215efbdf949798a8eb564f0477f7755099fe663a304e614a2b9881c"} Dec 11 09:52:59 crc kubenswrapper[4881]: I1211 09:52:59.396691 4881 patch_prober.go:28] interesting pod/machine-config-daemon-z9nnh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 11 09:52:59 crc kubenswrapper[4881]: I1211 09:52:59.397135 4881 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 11 09:53:29 crc kubenswrapper[4881]: I1211 09:53:29.397758 4881 patch_prober.go:28] interesting pod/machine-config-daemon-z9nnh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 11 09:53:29 crc kubenswrapper[4881]: I1211 09:53:29.398303 4881 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 11 09:53:54 crc kubenswrapper[4881]: I1211 09:53:54.545862 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-dx5sx"] Dec 11 09:53:54 crc kubenswrapper[4881]: E1211 09:53:54.547810 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cf59f823-b688-420e-9e5b-20f4441c9635" containerName="extract-utilities" Dec 11 09:53:54 crc kubenswrapper[4881]: I1211 09:53:54.547930 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="cf59f823-b688-420e-9e5b-20f4441c9635" containerName="extract-utilities" Dec 11 09:53:54 crc kubenswrapper[4881]: E1211 09:53:54.547961 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cf59f823-b688-420e-9e5b-20f4441c9635" containerName="registry-server" Dec 11 09:53:54 crc kubenswrapper[4881]: I1211 09:53:54.547969 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="cf59f823-b688-420e-9e5b-20f4441c9635" containerName="registry-server" Dec 11 09:53:54 crc kubenswrapper[4881]: E1211 09:53:54.547992 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cf59f823-b688-420e-9e5b-20f4441c9635" containerName="extract-content" Dec 11 09:53:54 crc kubenswrapper[4881]: I1211 09:53:54.548000 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="cf59f823-b688-420e-9e5b-20f4441c9635" containerName="extract-content" Dec 11 09:53:54 crc kubenswrapper[4881]: I1211 09:53:54.548573 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="cf59f823-b688-420e-9e5b-20f4441c9635" containerName="registry-server" Dec 11 09:53:54 crc kubenswrapper[4881]: I1211 09:53:54.551677 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-dx5sx" Dec 11 09:53:54 crc kubenswrapper[4881]: I1211 09:53:54.566685 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-dx5sx"] Dec 11 09:53:54 crc kubenswrapper[4881]: I1211 09:53:54.647592 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c093c5c6-f018-484d-a227-4a03f10a9a14-catalog-content\") pod \"community-operators-dx5sx\" (UID: \"c093c5c6-f018-484d-a227-4a03f10a9a14\") " pod="openshift-marketplace/community-operators-dx5sx" Dec 11 09:53:54 crc kubenswrapper[4881]: I1211 09:53:54.647684 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c093c5c6-f018-484d-a227-4a03f10a9a14-utilities\") pod \"community-operators-dx5sx\" (UID: \"c093c5c6-f018-484d-a227-4a03f10a9a14\") " pod="openshift-marketplace/community-operators-dx5sx" Dec 11 09:53:54 crc kubenswrapper[4881]: I1211 09:53:54.648404 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d5t6b\" (UniqueName: \"kubernetes.io/projected/c093c5c6-f018-484d-a227-4a03f10a9a14-kube-api-access-d5t6b\") pod \"community-operators-dx5sx\" (UID: \"c093c5c6-f018-484d-a227-4a03f10a9a14\") " pod="openshift-marketplace/community-operators-dx5sx" Dec 11 09:53:54 crc kubenswrapper[4881]: I1211 09:53:54.750367 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d5t6b\" (UniqueName: \"kubernetes.io/projected/c093c5c6-f018-484d-a227-4a03f10a9a14-kube-api-access-d5t6b\") pod \"community-operators-dx5sx\" (UID: \"c093c5c6-f018-484d-a227-4a03f10a9a14\") " pod="openshift-marketplace/community-operators-dx5sx" Dec 11 09:53:54 crc kubenswrapper[4881]: I1211 09:53:54.750569 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c093c5c6-f018-484d-a227-4a03f10a9a14-catalog-content\") pod \"community-operators-dx5sx\" (UID: \"c093c5c6-f018-484d-a227-4a03f10a9a14\") " pod="openshift-marketplace/community-operators-dx5sx" Dec 11 09:53:54 crc kubenswrapper[4881]: I1211 09:53:54.750612 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c093c5c6-f018-484d-a227-4a03f10a9a14-utilities\") pod \"community-operators-dx5sx\" (UID: \"c093c5c6-f018-484d-a227-4a03f10a9a14\") " pod="openshift-marketplace/community-operators-dx5sx" Dec 11 09:53:54 crc kubenswrapper[4881]: I1211 09:53:54.751225 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c093c5c6-f018-484d-a227-4a03f10a9a14-utilities\") pod \"community-operators-dx5sx\" (UID: \"c093c5c6-f018-484d-a227-4a03f10a9a14\") " pod="openshift-marketplace/community-operators-dx5sx" Dec 11 09:53:54 crc kubenswrapper[4881]: I1211 09:53:54.752561 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c093c5c6-f018-484d-a227-4a03f10a9a14-catalog-content\") pod \"community-operators-dx5sx\" (UID: \"c093c5c6-f018-484d-a227-4a03f10a9a14\") " pod="openshift-marketplace/community-operators-dx5sx" Dec 11 09:53:54 crc kubenswrapper[4881]: I1211 09:53:54.774360 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d5t6b\" (UniqueName: \"kubernetes.io/projected/c093c5c6-f018-484d-a227-4a03f10a9a14-kube-api-access-d5t6b\") pod \"community-operators-dx5sx\" (UID: \"c093c5c6-f018-484d-a227-4a03f10a9a14\") " pod="openshift-marketplace/community-operators-dx5sx" Dec 11 09:53:54 crc kubenswrapper[4881]: I1211 09:53:54.880202 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-dx5sx" Dec 11 09:53:55 crc kubenswrapper[4881]: I1211 09:53:55.892187 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-dx5sx"] Dec 11 09:53:56 crc kubenswrapper[4881]: I1211 09:53:56.818505 4881 generic.go:334] "Generic (PLEG): container finished" podID="c093c5c6-f018-484d-a227-4a03f10a9a14" containerID="800639fda7d0ee89f37c7fdb2749005d80eaefaae23ebd7276736a5a7fc0528f" exitCode=0 Dec 11 09:53:56 crc kubenswrapper[4881]: I1211 09:53:56.818607 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-dx5sx" event={"ID":"c093c5c6-f018-484d-a227-4a03f10a9a14","Type":"ContainerDied","Data":"800639fda7d0ee89f37c7fdb2749005d80eaefaae23ebd7276736a5a7fc0528f"} Dec 11 09:53:56 crc kubenswrapper[4881]: I1211 09:53:56.818822 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-dx5sx" event={"ID":"c093c5c6-f018-484d-a227-4a03f10a9a14","Type":"ContainerStarted","Data":"1a07bdbfc13399bc475a4275d07a35d8d8092a4685417c979fe31b6ff5575185"} Dec 11 09:53:56 crc kubenswrapper[4881]: I1211 09:53:56.821472 4881 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Dec 11 09:53:58 crc kubenswrapper[4881]: I1211 09:53:58.843171 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-dx5sx" event={"ID":"c093c5c6-f018-484d-a227-4a03f10a9a14","Type":"ContainerStarted","Data":"ee66dba1245c30a811318bf8d5a2a26fcda6ef99296a160f313f7562c83c1d2b"} Dec 11 09:53:59 crc kubenswrapper[4881]: I1211 09:53:59.396591 4881 patch_prober.go:28] interesting pod/machine-config-daemon-z9nnh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 11 09:53:59 crc kubenswrapper[4881]: I1211 09:53:59.396650 4881 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 11 09:53:59 crc kubenswrapper[4881]: I1211 09:53:59.396700 4881 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" Dec 11 09:53:59 crc kubenswrapper[4881]: I1211 09:53:59.397640 4881 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"50e3b73ab215efbdf949798a8eb564f0477f7755099fe663a304e614a2b9881c"} pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Dec 11 09:53:59 crc kubenswrapper[4881]: I1211 09:53:59.397699 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" containerName="machine-config-daemon" containerID="cri-o://50e3b73ab215efbdf949798a8eb564f0477f7755099fe663a304e614a2b9881c" gracePeriod=600 Dec 11 09:53:59 crc kubenswrapper[4881]: I1211 09:53:59.858039 4881 generic.go:334] "Generic (PLEG): container finished" podID="c093c5c6-f018-484d-a227-4a03f10a9a14" containerID="ee66dba1245c30a811318bf8d5a2a26fcda6ef99296a160f313f7562c83c1d2b" exitCode=0 Dec 11 09:53:59 crc kubenswrapper[4881]: I1211 09:53:59.858134 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-dx5sx" event={"ID":"c093c5c6-f018-484d-a227-4a03f10a9a14","Type":"ContainerDied","Data":"ee66dba1245c30a811318bf8d5a2a26fcda6ef99296a160f313f7562c83c1d2b"} Dec 11 09:53:59 crc kubenswrapper[4881]: E1211 09:53:59.948155 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 09:54:00 crc kubenswrapper[4881]: I1211 09:54:00.876085 4881 generic.go:334] "Generic (PLEG): container finished" podID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" containerID="50e3b73ab215efbdf949798a8eb564f0477f7755099fe663a304e614a2b9881c" exitCode=0 Dec 11 09:54:00 crc kubenswrapper[4881]: I1211 09:54:00.876442 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" event={"ID":"56d69133-af36-4cbd-af7d-3a58cc4dd8ca","Type":"ContainerDied","Data":"50e3b73ab215efbdf949798a8eb564f0477f7755099fe663a304e614a2b9881c"} Dec 11 09:54:00 crc kubenswrapper[4881]: I1211 09:54:00.876495 4881 scope.go:117] "RemoveContainer" containerID="0ae6707fd3cdd808adb6344e3f25bb421b3ca9971b8b6b09793435fa04ad9161" Dec 11 09:54:00 crc kubenswrapper[4881]: I1211 09:54:00.877446 4881 scope.go:117] "RemoveContainer" containerID="50e3b73ab215efbdf949798a8eb564f0477f7755099fe663a304e614a2b9881c" Dec 11 09:54:00 crc kubenswrapper[4881]: E1211 09:54:00.877944 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 09:54:01 crc kubenswrapper[4881]: I1211 09:54:01.889559 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-dx5sx" event={"ID":"c093c5c6-f018-484d-a227-4a03f10a9a14","Type":"ContainerStarted","Data":"402b80dfb7eea534a1f9ab97501c012bcff97eee9f4a8ae39abf56104549e0cf"} Dec 11 09:54:01 crc kubenswrapper[4881]: I1211 09:54:01.918997 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-dx5sx" podStartSLOduration=4.034885119 podStartE2EDuration="7.918195281s" podCreationTimestamp="2025-12-11 09:53:54 +0000 UTC" firstStartedPulling="2025-12-11 09:53:56.820659138 +0000 UTC m=+5885.198027855" lastFinishedPulling="2025-12-11 09:54:00.7039693 +0000 UTC m=+5889.081338017" observedRunningTime="2025-12-11 09:54:01.909869058 +0000 UTC m=+5890.287237765" watchObservedRunningTime="2025-12-11 09:54:01.918195281 +0000 UTC m=+5890.295563978" Dec 11 09:54:04 crc kubenswrapper[4881]: I1211 09:54:04.880736 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-dx5sx" Dec 11 09:54:04 crc kubenswrapper[4881]: I1211 09:54:04.881245 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-dx5sx" Dec 11 09:54:04 crc kubenswrapper[4881]: I1211 09:54:04.935022 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-dx5sx" Dec 11 09:54:10 crc kubenswrapper[4881]: I1211 09:54:10.598213 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-tvcqh"] Dec 11 09:54:10 crc kubenswrapper[4881]: I1211 09:54:10.601773 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-tvcqh" Dec 11 09:54:10 crc kubenswrapper[4881]: I1211 09:54:10.619472 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-tvcqh"] Dec 11 09:54:10 crc kubenswrapper[4881]: I1211 09:54:10.756234 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/977c078a-bb13-4e6c-994a-48a5bd2fa848-catalog-content\") pod \"redhat-operators-tvcqh\" (UID: \"977c078a-bb13-4e6c-994a-48a5bd2fa848\") " pod="openshift-marketplace/redhat-operators-tvcqh" Dec 11 09:54:10 crc kubenswrapper[4881]: I1211 09:54:10.756371 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fg82w\" (UniqueName: \"kubernetes.io/projected/977c078a-bb13-4e6c-994a-48a5bd2fa848-kube-api-access-fg82w\") pod \"redhat-operators-tvcqh\" (UID: \"977c078a-bb13-4e6c-994a-48a5bd2fa848\") " pod="openshift-marketplace/redhat-operators-tvcqh" Dec 11 09:54:10 crc kubenswrapper[4881]: I1211 09:54:10.756567 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/977c078a-bb13-4e6c-994a-48a5bd2fa848-utilities\") pod \"redhat-operators-tvcqh\" (UID: \"977c078a-bb13-4e6c-994a-48a5bd2fa848\") " pod="openshift-marketplace/redhat-operators-tvcqh" Dec 11 09:54:10 crc kubenswrapper[4881]: I1211 09:54:10.859628 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/977c078a-bb13-4e6c-994a-48a5bd2fa848-catalog-content\") pod \"redhat-operators-tvcqh\" (UID: \"977c078a-bb13-4e6c-994a-48a5bd2fa848\") " pod="openshift-marketplace/redhat-operators-tvcqh" Dec 11 09:54:10 crc kubenswrapper[4881]: I1211 09:54:10.859759 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fg82w\" (UniqueName: \"kubernetes.io/projected/977c078a-bb13-4e6c-994a-48a5bd2fa848-kube-api-access-fg82w\") pod \"redhat-operators-tvcqh\" (UID: \"977c078a-bb13-4e6c-994a-48a5bd2fa848\") " pod="openshift-marketplace/redhat-operators-tvcqh" Dec 11 09:54:10 crc kubenswrapper[4881]: I1211 09:54:10.859840 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/977c078a-bb13-4e6c-994a-48a5bd2fa848-utilities\") pod \"redhat-operators-tvcqh\" (UID: \"977c078a-bb13-4e6c-994a-48a5bd2fa848\") " pod="openshift-marketplace/redhat-operators-tvcqh" Dec 11 09:54:10 crc kubenswrapper[4881]: I1211 09:54:10.860148 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/977c078a-bb13-4e6c-994a-48a5bd2fa848-catalog-content\") pod \"redhat-operators-tvcqh\" (UID: \"977c078a-bb13-4e6c-994a-48a5bd2fa848\") " pod="openshift-marketplace/redhat-operators-tvcqh" Dec 11 09:54:10 crc kubenswrapper[4881]: I1211 09:54:10.860309 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/977c078a-bb13-4e6c-994a-48a5bd2fa848-utilities\") pod \"redhat-operators-tvcqh\" (UID: \"977c078a-bb13-4e6c-994a-48a5bd2fa848\") " pod="openshift-marketplace/redhat-operators-tvcqh" Dec 11 09:54:10 crc kubenswrapper[4881]: I1211 09:54:10.892908 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fg82w\" (UniqueName: \"kubernetes.io/projected/977c078a-bb13-4e6c-994a-48a5bd2fa848-kube-api-access-fg82w\") pod \"redhat-operators-tvcqh\" (UID: \"977c078a-bb13-4e6c-994a-48a5bd2fa848\") " pod="openshift-marketplace/redhat-operators-tvcqh" Dec 11 09:54:10 crc kubenswrapper[4881]: I1211 09:54:10.926710 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-tvcqh" Dec 11 09:54:11 crc kubenswrapper[4881]: I1211 09:54:11.772614 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-tvcqh"] Dec 11 09:54:11 crc kubenswrapper[4881]: I1211 09:54:11.995041 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-tvcqh" event={"ID":"977c078a-bb13-4e6c-994a-48a5bd2fa848","Type":"ContainerStarted","Data":"39b072ca70ce9f08779f53b370a7675dc0fa258428b225b244f1bfa5598f211e"} Dec 11 09:54:13 crc kubenswrapper[4881]: I1211 09:54:13.013798 4881 generic.go:334] "Generic (PLEG): container finished" podID="977c078a-bb13-4e6c-994a-48a5bd2fa848" containerID="20c7b15e611b6e3704bf47f268bfa53889bc6096784ee217e63eed7c3f238452" exitCode=0 Dec 11 09:54:13 crc kubenswrapper[4881]: I1211 09:54:13.022763 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-tvcqh" event={"ID":"977c078a-bb13-4e6c-994a-48a5bd2fa848","Type":"ContainerDied","Data":"20c7b15e611b6e3704bf47f268bfa53889bc6096784ee217e63eed7c3f238452"} Dec 11 09:54:14 crc kubenswrapper[4881]: I1211 09:54:14.962601 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-dx5sx" Dec 11 09:54:15 crc kubenswrapper[4881]: I1211 09:54:15.042288 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-tvcqh" event={"ID":"977c078a-bb13-4e6c-994a-48a5bd2fa848","Type":"ContainerStarted","Data":"4c1403fd15e2a6be22fc90609ab4a47f5e7549cd45b4955509a51a5f1752d1c4"} Dec 11 09:54:15 crc kubenswrapper[4881]: I1211 09:54:15.560588 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-dx5sx"] Dec 11 09:54:15 crc kubenswrapper[4881]: I1211 09:54:15.561136 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-dx5sx" podUID="c093c5c6-f018-484d-a227-4a03f10a9a14" containerName="registry-server" containerID="cri-o://402b80dfb7eea534a1f9ab97501c012bcff97eee9f4a8ae39abf56104549e0cf" gracePeriod=2 Dec 11 09:54:16 crc kubenswrapper[4881]: I1211 09:54:16.015013 4881 scope.go:117] "RemoveContainer" containerID="50e3b73ab215efbdf949798a8eb564f0477f7755099fe663a304e614a2b9881c" Dec 11 09:54:16 crc kubenswrapper[4881]: E1211 09:54:16.016104 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 09:54:17 crc kubenswrapper[4881]: I1211 09:54:17.064776 4881 generic.go:334] "Generic (PLEG): container finished" podID="c093c5c6-f018-484d-a227-4a03f10a9a14" containerID="402b80dfb7eea534a1f9ab97501c012bcff97eee9f4a8ae39abf56104549e0cf" exitCode=0 Dec 11 09:54:17 crc kubenswrapper[4881]: I1211 09:54:17.064803 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-dx5sx" event={"ID":"c093c5c6-f018-484d-a227-4a03f10a9a14","Type":"ContainerDied","Data":"402b80dfb7eea534a1f9ab97501c012bcff97eee9f4a8ae39abf56104549e0cf"} Dec 11 09:54:19 crc kubenswrapper[4881]: I1211 09:54:18.741981 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-dx5sx" Dec 11 09:54:19 crc kubenswrapper[4881]: I1211 09:54:18.886230 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c093c5c6-f018-484d-a227-4a03f10a9a14-utilities\") pod \"c093c5c6-f018-484d-a227-4a03f10a9a14\" (UID: \"c093c5c6-f018-484d-a227-4a03f10a9a14\") " Dec 11 09:54:19 crc kubenswrapper[4881]: I1211 09:54:18.886680 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d5t6b\" (UniqueName: \"kubernetes.io/projected/c093c5c6-f018-484d-a227-4a03f10a9a14-kube-api-access-d5t6b\") pod \"c093c5c6-f018-484d-a227-4a03f10a9a14\" (UID: \"c093c5c6-f018-484d-a227-4a03f10a9a14\") " Dec 11 09:54:19 crc kubenswrapper[4881]: I1211 09:54:18.886736 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c093c5c6-f018-484d-a227-4a03f10a9a14-catalog-content\") pod \"c093c5c6-f018-484d-a227-4a03f10a9a14\" (UID: \"c093c5c6-f018-484d-a227-4a03f10a9a14\") " Dec 11 09:54:19 crc kubenswrapper[4881]: I1211 09:54:18.887143 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c093c5c6-f018-484d-a227-4a03f10a9a14-utilities" (OuterVolumeSpecName: "utilities") pod "c093c5c6-f018-484d-a227-4a03f10a9a14" (UID: "c093c5c6-f018-484d-a227-4a03f10a9a14"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 09:54:19 crc kubenswrapper[4881]: I1211 09:54:18.887748 4881 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c093c5c6-f018-484d-a227-4a03f10a9a14-utilities\") on node \"crc\" DevicePath \"\"" Dec 11 09:54:19 crc kubenswrapper[4881]: I1211 09:54:18.902686 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c093c5c6-f018-484d-a227-4a03f10a9a14-kube-api-access-d5t6b" (OuterVolumeSpecName: "kube-api-access-d5t6b") pod "c093c5c6-f018-484d-a227-4a03f10a9a14" (UID: "c093c5c6-f018-484d-a227-4a03f10a9a14"). InnerVolumeSpecName "kube-api-access-d5t6b". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 09:54:19 crc kubenswrapper[4881]: I1211 09:54:18.929726 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c093c5c6-f018-484d-a227-4a03f10a9a14-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "c093c5c6-f018-484d-a227-4a03f10a9a14" (UID: "c093c5c6-f018-484d-a227-4a03f10a9a14"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 09:54:19 crc kubenswrapper[4881]: I1211 09:54:18.990873 4881 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c093c5c6-f018-484d-a227-4a03f10a9a14-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 11 09:54:19 crc kubenswrapper[4881]: I1211 09:54:18.990920 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d5t6b\" (UniqueName: \"kubernetes.io/projected/c093c5c6-f018-484d-a227-4a03f10a9a14-kube-api-access-d5t6b\") on node \"crc\" DevicePath \"\"" Dec 11 09:54:19 crc kubenswrapper[4881]: I1211 09:54:19.093821 4881 generic.go:334] "Generic (PLEG): container finished" podID="977c078a-bb13-4e6c-994a-48a5bd2fa848" containerID="4c1403fd15e2a6be22fc90609ab4a47f5e7549cd45b4955509a51a5f1752d1c4" exitCode=0 Dec 11 09:54:19 crc kubenswrapper[4881]: I1211 09:54:19.093882 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-tvcqh" event={"ID":"977c078a-bb13-4e6c-994a-48a5bd2fa848","Type":"ContainerDied","Data":"4c1403fd15e2a6be22fc90609ab4a47f5e7549cd45b4955509a51a5f1752d1c4"} Dec 11 09:54:19 crc kubenswrapper[4881]: I1211 09:54:19.103513 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-dx5sx" event={"ID":"c093c5c6-f018-484d-a227-4a03f10a9a14","Type":"ContainerDied","Data":"1a07bdbfc13399bc475a4275d07a35d8d8092a4685417c979fe31b6ff5575185"} Dec 11 09:54:19 crc kubenswrapper[4881]: I1211 09:54:19.103559 4881 scope.go:117] "RemoveContainer" containerID="402b80dfb7eea534a1f9ab97501c012bcff97eee9f4a8ae39abf56104549e0cf" Dec 11 09:54:19 crc kubenswrapper[4881]: I1211 09:54:19.103917 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-dx5sx" Dec 11 09:54:19 crc kubenswrapper[4881]: I1211 09:54:19.149860 4881 scope.go:117] "RemoveContainer" containerID="ee66dba1245c30a811318bf8d5a2a26fcda6ef99296a160f313f7562c83c1d2b" Dec 11 09:54:19 crc kubenswrapper[4881]: I1211 09:54:19.192576 4881 scope.go:117] "RemoveContainer" containerID="800639fda7d0ee89f37c7fdb2749005d80eaefaae23ebd7276736a5a7fc0528f" Dec 11 09:54:19 crc kubenswrapper[4881]: I1211 09:54:19.202493 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-dx5sx"] Dec 11 09:54:19 crc kubenswrapper[4881]: I1211 09:54:19.220058 4881 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-dx5sx"] Dec 11 09:54:20 crc kubenswrapper[4881]: I1211 09:54:20.118120 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-tvcqh" event={"ID":"977c078a-bb13-4e6c-994a-48a5bd2fa848","Type":"ContainerStarted","Data":"440db367b56a84c94950929a05a3bf53be243b40913cf0c7d07121a76a385ba7"} Dec 11 09:54:20 crc kubenswrapper[4881]: I1211 09:54:20.147656 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-tvcqh" podStartSLOduration=3.363092431 podStartE2EDuration="10.147631725s" podCreationTimestamp="2025-12-11 09:54:10 +0000 UTC" firstStartedPulling="2025-12-11 09:54:13.020965153 +0000 UTC m=+5901.398333850" lastFinishedPulling="2025-12-11 09:54:19.805504447 +0000 UTC m=+5908.182873144" observedRunningTime="2025-12-11 09:54:20.136583624 +0000 UTC m=+5908.513952321" watchObservedRunningTime="2025-12-11 09:54:20.147631725 +0000 UTC m=+5908.525000422" Dec 11 09:54:20 crc kubenswrapper[4881]: I1211 09:54:20.927221 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-tvcqh" Dec 11 09:54:20 crc kubenswrapper[4881]: I1211 09:54:20.927303 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-tvcqh" Dec 11 09:54:21 crc kubenswrapper[4881]: I1211 09:54:21.021022 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c093c5c6-f018-484d-a227-4a03f10a9a14" path="/var/lib/kubelet/pods/c093c5c6-f018-484d-a227-4a03f10a9a14/volumes" Dec 11 09:54:21 crc kubenswrapper[4881]: I1211 09:54:21.977767 4881 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-tvcqh" podUID="977c078a-bb13-4e6c-994a-48a5bd2fa848" containerName="registry-server" probeResult="failure" output=< Dec 11 09:54:21 crc kubenswrapper[4881]: timeout: failed to connect service ":50051" within 1s Dec 11 09:54:21 crc kubenswrapper[4881]: > Dec 11 09:54:27 crc kubenswrapper[4881]: I1211 09:54:27.005654 4881 scope.go:117] "RemoveContainer" containerID="50e3b73ab215efbdf949798a8eb564f0477f7755099fe663a304e614a2b9881c" Dec 11 09:54:27 crc kubenswrapper[4881]: E1211 09:54:27.006467 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 09:54:31 crc kubenswrapper[4881]: I1211 09:54:31.988779 4881 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-tvcqh" podUID="977c078a-bb13-4e6c-994a-48a5bd2fa848" containerName="registry-server" probeResult="failure" output=< Dec 11 09:54:31 crc kubenswrapper[4881]: timeout: failed to connect service ":50051" within 1s Dec 11 09:54:31 crc kubenswrapper[4881]: > Dec 11 09:54:38 crc kubenswrapper[4881]: I1211 09:54:38.006175 4881 scope.go:117] "RemoveContainer" containerID="50e3b73ab215efbdf949798a8eb564f0477f7755099fe663a304e614a2b9881c" Dec 11 09:54:38 crc kubenswrapper[4881]: E1211 09:54:38.006885 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 09:54:40 crc kubenswrapper[4881]: I1211 09:54:40.977756 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-tvcqh" Dec 11 09:54:41 crc kubenswrapper[4881]: I1211 09:54:41.044740 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-tvcqh" Dec 11 09:54:41 crc kubenswrapper[4881]: I1211 09:54:41.787025 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-tvcqh"] Dec 11 09:54:42 crc kubenswrapper[4881]: I1211 09:54:42.352316 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-tvcqh" podUID="977c078a-bb13-4e6c-994a-48a5bd2fa848" containerName="registry-server" containerID="cri-o://440db367b56a84c94950929a05a3bf53be243b40913cf0c7d07121a76a385ba7" gracePeriod=2 Dec 11 09:54:43 crc kubenswrapper[4881]: I1211 09:54:43.369477 4881 generic.go:334] "Generic (PLEG): container finished" podID="977c078a-bb13-4e6c-994a-48a5bd2fa848" containerID="440db367b56a84c94950929a05a3bf53be243b40913cf0c7d07121a76a385ba7" exitCode=0 Dec 11 09:54:43 crc kubenswrapper[4881]: I1211 09:54:43.369573 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-tvcqh" event={"ID":"977c078a-bb13-4e6c-994a-48a5bd2fa848","Type":"ContainerDied","Data":"440db367b56a84c94950929a05a3bf53be243b40913cf0c7d07121a76a385ba7"} Dec 11 09:54:43 crc kubenswrapper[4881]: I1211 09:54:43.369959 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-tvcqh" event={"ID":"977c078a-bb13-4e6c-994a-48a5bd2fa848","Type":"ContainerDied","Data":"39b072ca70ce9f08779f53b370a7675dc0fa258428b225b244f1bfa5598f211e"} Dec 11 09:54:43 crc kubenswrapper[4881]: I1211 09:54:43.370379 4881 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="39b072ca70ce9f08779f53b370a7675dc0fa258428b225b244f1bfa5598f211e" Dec 11 09:54:43 crc kubenswrapper[4881]: I1211 09:54:43.445378 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-tvcqh" Dec 11 09:54:43 crc kubenswrapper[4881]: I1211 09:54:43.505482 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fg82w\" (UniqueName: \"kubernetes.io/projected/977c078a-bb13-4e6c-994a-48a5bd2fa848-kube-api-access-fg82w\") pod \"977c078a-bb13-4e6c-994a-48a5bd2fa848\" (UID: \"977c078a-bb13-4e6c-994a-48a5bd2fa848\") " Dec 11 09:54:43 crc kubenswrapper[4881]: I1211 09:54:43.505730 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/977c078a-bb13-4e6c-994a-48a5bd2fa848-catalog-content\") pod \"977c078a-bb13-4e6c-994a-48a5bd2fa848\" (UID: \"977c078a-bb13-4e6c-994a-48a5bd2fa848\") " Dec 11 09:54:43 crc kubenswrapper[4881]: I1211 09:54:43.505799 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/977c078a-bb13-4e6c-994a-48a5bd2fa848-utilities\") pod \"977c078a-bb13-4e6c-994a-48a5bd2fa848\" (UID: \"977c078a-bb13-4e6c-994a-48a5bd2fa848\") " Dec 11 09:54:43 crc kubenswrapper[4881]: I1211 09:54:43.506495 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/977c078a-bb13-4e6c-994a-48a5bd2fa848-utilities" (OuterVolumeSpecName: "utilities") pod "977c078a-bb13-4e6c-994a-48a5bd2fa848" (UID: "977c078a-bb13-4e6c-994a-48a5bd2fa848"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 09:54:43 crc kubenswrapper[4881]: I1211 09:54:43.506972 4881 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/977c078a-bb13-4e6c-994a-48a5bd2fa848-utilities\") on node \"crc\" DevicePath \"\"" Dec 11 09:54:43 crc kubenswrapper[4881]: I1211 09:54:43.511480 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/977c078a-bb13-4e6c-994a-48a5bd2fa848-kube-api-access-fg82w" (OuterVolumeSpecName: "kube-api-access-fg82w") pod "977c078a-bb13-4e6c-994a-48a5bd2fa848" (UID: "977c078a-bb13-4e6c-994a-48a5bd2fa848"). InnerVolumeSpecName "kube-api-access-fg82w". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 09:54:43 crc kubenswrapper[4881]: I1211 09:54:43.608738 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fg82w\" (UniqueName: \"kubernetes.io/projected/977c078a-bb13-4e6c-994a-48a5bd2fa848-kube-api-access-fg82w\") on node \"crc\" DevicePath \"\"" Dec 11 09:54:43 crc kubenswrapper[4881]: I1211 09:54:43.625147 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/977c078a-bb13-4e6c-994a-48a5bd2fa848-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "977c078a-bb13-4e6c-994a-48a5bd2fa848" (UID: "977c078a-bb13-4e6c-994a-48a5bd2fa848"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 09:54:43 crc kubenswrapper[4881]: I1211 09:54:43.711200 4881 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/977c078a-bb13-4e6c-994a-48a5bd2fa848-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 11 09:54:44 crc kubenswrapper[4881]: I1211 09:54:44.380896 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-tvcqh" Dec 11 09:54:44 crc kubenswrapper[4881]: I1211 09:54:44.417388 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-tvcqh"] Dec 11 09:54:44 crc kubenswrapper[4881]: I1211 09:54:44.429756 4881 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-tvcqh"] Dec 11 09:54:45 crc kubenswrapper[4881]: I1211 09:54:45.018264 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="977c078a-bb13-4e6c-994a-48a5bd2fa848" path="/var/lib/kubelet/pods/977c078a-bb13-4e6c-994a-48a5bd2fa848/volumes" Dec 11 09:54:53 crc kubenswrapper[4881]: I1211 09:54:53.013428 4881 scope.go:117] "RemoveContainer" containerID="50e3b73ab215efbdf949798a8eb564f0477f7755099fe663a304e614a2b9881c" Dec 11 09:54:53 crc kubenswrapper[4881]: E1211 09:54:53.014325 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 09:55:04 crc kubenswrapper[4881]: I1211 09:55:04.254415 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-klptq"] Dec 11 09:55:04 crc kubenswrapper[4881]: E1211 09:55:04.255365 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="977c078a-bb13-4e6c-994a-48a5bd2fa848" containerName="registry-server" Dec 11 09:55:04 crc kubenswrapper[4881]: I1211 09:55:04.255379 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="977c078a-bb13-4e6c-994a-48a5bd2fa848" containerName="registry-server" Dec 11 09:55:04 crc kubenswrapper[4881]: E1211 09:55:04.255400 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c093c5c6-f018-484d-a227-4a03f10a9a14" containerName="registry-server" Dec 11 09:55:04 crc kubenswrapper[4881]: I1211 09:55:04.255406 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="c093c5c6-f018-484d-a227-4a03f10a9a14" containerName="registry-server" Dec 11 09:55:04 crc kubenswrapper[4881]: E1211 09:55:04.255416 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="977c078a-bb13-4e6c-994a-48a5bd2fa848" containerName="extract-utilities" Dec 11 09:55:04 crc kubenswrapper[4881]: I1211 09:55:04.255422 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="977c078a-bb13-4e6c-994a-48a5bd2fa848" containerName="extract-utilities" Dec 11 09:55:04 crc kubenswrapper[4881]: E1211 09:55:04.255429 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c093c5c6-f018-484d-a227-4a03f10a9a14" containerName="extract-content" Dec 11 09:55:04 crc kubenswrapper[4881]: I1211 09:55:04.255437 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="c093c5c6-f018-484d-a227-4a03f10a9a14" containerName="extract-content" Dec 11 09:55:04 crc kubenswrapper[4881]: E1211 09:55:04.255456 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="977c078a-bb13-4e6c-994a-48a5bd2fa848" containerName="extract-content" Dec 11 09:55:04 crc kubenswrapper[4881]: I1211 09:55:04.255462 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="977c078a-bb13-4e6c-994a-48a5bd2fa848" containerName="extract-content" Dec 11 09:55:04 crc kubenswrapper[4881]: E1211 09:55:04.255522 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c093c5c6-f018-484d-a227-4a03f10a9a14" containerName="extract-utilities" Dec 11 09:55:04 crc kubenswrapper[4881]: I1211 09:55:04.255527 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="c093c5c6-f018-484d-a227-4a03f10a9a14" containerName="extract-utilities" Dec 11 09:55:04 crc kubenswrapper[4881]: I1211 09:55:04.255739 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="977c078a-bb13-4e6c-994a-48a5bd2fa848" containerName="registry-server" Dec 11 09:55:04 crc kubenswrapper[4881]: I1211 09:55:04.255766 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="c093c5c6-f018-484d-a227-4a03f10a9a14" containerName="registry-server" Dec 11 09:55:04 crc kubenswrapper[4881]: I1211 09:55:04.257471 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-klptq" Dec 11 09:55:04 crc kubenswrapper[4881]: I1211 09:55:04.268095 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-klptq"] Dec 11 09:55:04 crc kubenswrapper[4881]: I1211 09:55:04.442213 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jtgz9\" (UniqueName: \"kubernetes.io/projected/5596eb3b-2966-4527-9742-7d8f19c934c0-kube-api-access-jtgz9\") pod \"redhat-marketplace-klptq\" (UID: \"5596eb3b-2966-4527-9742-7d8f19c934c0\") " pod="openshift-marketplace/redhat-marketplace-klptq" Dec 11 09:55:04 crc kubenswrapper[4881]: I1211 09:55:04.442767 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5596eb3b-2966-4527-9742-7d8f19c934c0-utilities\") pod \"redhat-marketplace-klptq\" (UID: \"5596eb3b-2966-4527-9742-7d8f19c934c0\") " pod="openshift-marketplace/redhat-marketplace-klptq" Dec 11 09:55:04 crc kubenswrapper[4881]: I1211 09:55:04.442862 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5596eb3b-2966-4527-9742-7d8f19c934c0-catalog-content\") pod \"redhat-marketplace-klptq\" (UID: \"5596eb3b-2966-4527-9742-7d8f19c934c0\") " pod="openshift-marketplace/redhat-marketplace-klptq" Dec 11 09:55:04 crc kubenswrapper[4881]: I1211 09:55:04.545139 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5596eb3b-2966-4527-9742-7d8f19c934c0-utilities\") pod \"redhat-marketplace-klptq\" (UID: \"5596eb3b-2966-4527-9742-7d8f19c934c0\") " pod="openshift-marketplace/redhat-marketplace-klptq" Dec 11 09:55:04 crc kubenswrapper[4881]: I1211 09:55:04.545278 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5596eb3b-2966-4527-9742-7d8f19c934c0-catalog-content\") pod \"redhat-marketplace-klptq\" (UID: \"5596eb3b-2966-4527-9742-7d8f19c934c0\") " pod="openshift-marketplace/redhat-marketplace-klptq" Dec 11 09:55:04 crc kubenswrapper[4881]: I1211 09:55:04.545424 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jtgz9\" (UniqueName: \"kubernetes.io/projected/5596eb3b-2966-4527-9742-7d8f19c934c0-kube-api-access-jtgz9\") pod \"redhat-marketplace-klptq\" (UID: \"5596eb3b-2966-4527-9742-7d8f19c934c0\") " pod="openshift-marketplace/redhat-marketplace-klptq" Dec 11 09:55:04 crc kubenswrapper[4881]: I1211 09:55:04.545840 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5596eb3b-2966-4527-9742-7d8f19c934c0-utilities\") pod \"redhat-marketplace-klptq\" (UID: \"5596eb3b-2966-4527-9742-7d8f19c934c0\") " pod="openshift-marketplace/redhat-marketplace-klptq" Dec 11 09:55:04 crc kubenswrapper[4881]: I1211 09:55:04.545871 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5596eb3b-2966-4527-9742-7d8f19c934c0-catalog-content\") pod \"redhat-marketplace-klptq\" (UID: \"5596eb3b-2966-4527-9742-7d8f19c934c0\") " pod="openshift-marketplace/redhat-marketplace-klptq" Dec 11 09:55:04 crc kubenswrapper[4881]: I1211 09:55:04.578370 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jtgz9\" (UniqueName: \"kubernetes.io/projected/5596eb3b-2966-4527-9742-7d8f19c934c0-kube-api-access-jtgz9\") pod \"redhat-marketplace-klptq\" (UID: \"5596eb3b-2966-4527-9742-7d8f19c934c0\") " pod="openshift-marketplace/redhat-marketplace-klptq" Dec 11 09:55:04 crc kubenswrapper[4881]: I1211 09:55:04.639692 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-klptq" Dec 11 09:55:05 crc kubenswrapper[4881]: I1211 09:55:05.008430 4881 scope.go:117] "RemoveContainer" containerID="50e3b73ab215efbdf949798a8eb564f0477f7755099fe663a304e614a2b9881c" Dec 11 09:55:05 crc kubenswrapper[4881]: E1211 09:55:05.009051 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 09:55:05 crc kubenswrapper[4881]: I1211 09:55:05.152175 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-klptq"] Dec 11 09:55:05 crc kubenswrapper[4881]: I1211 09:55:05.597385 4881 generic.go:334] "Generic (PLEG): container finished" podID="5596eb3b-2966-4527-9742-7d8f19c934c0" containerID="529bb210999596751304986ff13dad2bd277dc3fe37fc3720e4b5b77c9c954d5" exitCode=0 Dec 11 09:55:05 crc kubenswrapper[4881]: I1211 09:55:05.597460 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-klptq" event={"ID":"5596eb3b-2966-4527-9742-7d8f19c934c0","Type":"ContainerDied","Data":"529bb210999596751304986ff13dad2bd277dc3fe37fc3720e4b5b77c9c954d5"} Dec 11 09:55:05 crc kubenswrapper[4881]: I1211 09:55:05.597691 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-klptq" event={"ID":"5596eb3b-2966-4527-9742-7d8f19c934c0","Type":"ContainerStarted","Data":"e720b820f9ad5a1e550daa4935a669aae6e2abba2dddd89d461d2fc40fd902dd"} Dec 11 09:55:07 crc kubenswrapper[4881]: I1211 09:55:07.621375 4881 generic.go:334] "Generic (PLEG): container finished" podID="5596eb3b-2966-4527-9742-7d8f19c934c0" containerID="0dd0791252e7c933c419e8a7c15f2ef8f34e285b3a2f12a5631dcdbbb99d0246" exitCode=0 Dec 11 09:55:07 crc kubenswrapper[4881]: I1211 09:55:07.621457 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-klptq" event={"ID":"5596eb3b-2966-4527-9742-7d8f19c934c0","Type":"ContainerDied","Data":"0dd0791252e7c933c419e8a7c15f2ef8f34e285b3a2f12a5631dcdbbb99d0246"} Dec 11 09:55:09 crc kubenswrapper[4881]: I1211 09:55:09.649208 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-klptq" event={"ID":"5596eb3b-2966-4527-9742-7d8f19c934c0","Type":"ContainerStarted","Data":"26f862e5ff7bba13b9d3ee53c06feba4cfb8ef134d7a2e7db9ceb88835bc9109"} Dec 11 09:55:09 crc kubenswrapper[4881]: I1211 09:55:09.667992 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-klptq" podStartSLOduration=2.318324605 podStartE2EDuration="5.667977202s" podCreationTimestamp="2025-12-11 09:55:04 +0000 UTC" firstStartedPulling="2025-12-11 09:55:05.601184192 +0000 UTC m=+5953.978552879" lastFinishedPulling="2025-12-11 09:55:08.950836769 +0000 UTC m=+5957.328205476" observedRunningTime="2025-12-11 09:55:09.665392699 +0000 UTC m=+5958.042761396" watchObservedRunningTime="2025-12-11 09:55:09.667977202 +0000 UTC m=+5958.045345899" Dec 11 09:55:14 crc kubenswrapper[4881]: I1211 09:55:14.640578 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-klptq" Dec 11 09:55:14 crc kubenswrapper[4881]: I1211 09:55:14.641318 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-klptq" Dec 11 09:55:14 crc kubenswrapper[4881]: I1211 09:55:14.717435 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-klptq" Dec 11 09:55:14 crc kubenswrapper[4881]: I1211 09:55:14.779737 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-klptq" Dec 11 09:55:14 crc kubenswrapper[4881]: I1211 09:55:14.957534 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-klptq"] Dec 11 09:55:16 crc kubenswrapper[4881]: I1211 09:55:16.722313 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-klptq" podUID="5596eb3b-2966-4527-9742-7d8f19c934c0" containerName="registry-server" containerID="cri-o://26f862e5ff7bba13b9d3ee53c06feba4cfb8ef134d7a2e7db9ceb88835bc9109" gracePeriod=2 Dec 11 09:55:17 crc kubenswrapper[4881]: I1211 09:55:17.298517 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-klptq" Dec 11 09:55:17 crc kubenswrapper[4881]: I1211 09:55:17.466453 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jtgz9\" (UniqueName: \"kubernetes.io/projected/5596eb3b-2966-4527-9742-7d8f19c934c0-kube-api-access-jtgz9\") pod \"5596eb3b-2966-4527-9742-7d8f19c934c0\" (UID: \"5596eb3b-2966-4527-9742-7d8f19c934c0\") " Dec 11 09:55:17 crc kubenswrapper[4881]: I1211 09:55:17.467093 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5596eb3b-2966-4527-9742-7d8f19c934c0-utilities\") pod \"5596eb3b-2966-4527-9742-7d8f19c934c0\" (UID: \"5596eb3b-2966-4527-9742-7d8f19c934c0\") " Dec 11 09:55:17 crc kubenswrapper[4881]: I1211 09:55:17.467156 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5596eb3b-2966-4527-9742-7d8f19c934c0-catalog-content\") pod \"5596eb3b-2966-4527-9742-7d8f19c934c0\" (UID: \"5596eb3b-2966-4527-9742-7d8f19c934c0\") " Dec 11 09:55:17 crc kubenswrapper[4881]: I1211 09:55:17.468303 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5596eb3b-2966-4527-9742-7d8f19c934c0-utilities" (OuterVolumeSpecName: "utilities") pod "5596eb3b-2966-4527-9742-7d8f19c934c0" (UID: "5596eb3b-2966-4527-9742-7d8f19c934c0"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 09:55:17 crc kubenswrapper[4881]: I1211 09:55:17.472875 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5596eb3b-2966-4527-9742-7d8f19c934c0-kube-api-access-jtgz9" (OuterVolumeSpecName: "kube-api-access-jtgz9") pod "5596eb3b-2966-4527-9742-7d8f19c934c0" (UID: "5596eb3b-2966-4527-9742-7d8f19c934c0"). InnerVolumeSpecName "kube-api-access-jtgz9". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 09:55:17 crc kubenswrapper[4881]: I1211 09:55:17.492270 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5596eb3b-2966-4527-9742-7d8f19c934c0-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5596eb3b-2966-4527-9742-7d8f19c934c0" (UID: "5596eb3b-2966-4527-9742-7d8f19c934c0"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 09:55:17 crc kubenswrapper[4881]: I1211 09:55:17.569611 4881 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5596eb3b-2966-4527-9742-7d8f19c934c0-utilities\") on node \"crc\" DevicePath \"\"" Dec 11 09:55:17 crc kubenswrapper[4881]: I1211 09:55:17.569652 4881 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5596eb3b-2966-4527-9742-7d8f19c934c0-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 11 09:55:17 crc kubenswrapper[4881]: I1211 09:55:17.569668 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jtgz9\" (UniqueName: \"kubernetes.io/projected/5596eb3b-2966-4527-9742-7d8f19c934c0-kube-api-access-jtgz9\") on node \"crc\" DevicePath \"\"" Dec 11 09:55:17 crc kubenswrapper[4881]: I1211 09:55:17.734028 4881 generic.go:334] "Generic (PLEG): container finished" podID="5596eb3b-2966-4527-9742-7d8f19c934c0" containerID="26f862e5ff7bba13b9d3ee53c06feba4cfb8ef134d7a2e7db9ceb88835bc9109" exitCode=0 Dec 11 09:55:17 crc kubenswrapper[4881]: I1211 09:55:17.734071 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-klptq" event={"ID":"5596eb3b-2966-4527-9742-7d8f19c934c0","Type":"ContainerDied","Data":"26f862e5ff7bba13b9d3ee53c06feba4cfb8ef134d7a2e7db9ceb88835bc9109"} Dec 11 09:55:17 crc kubenswrapper[4881]: I1211 09:55:17.734095 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-klptq" event={"ID":"5596eb3b-2966-4527-9742-7d8f19c934c0","Type":"ContainerDied","Data":"e720b820f9ad5a1e550daa4935a669aae6e2abba2dddd89d461d2fc40fd902dd"} Dec 11 09:55:17 crc kubenswrapper[4881]: I1211 09:55:17.734112 4881 scope.go:117] "RemoveContainer" containerID="26f862e5ff7bba13b9d3ee53c06feba4cfb8ef134d7a2e7db9ceb88835bc9109" Dec 11 09:55:17 crc kubenswrapper[4881]: I1211 09:55:17.734125 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-klptq" Dec 11 09:55:17 crc kubenswrapper[4881]: I1211 09:55:17.770263 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-klptq"] Dec 11 09:55:17 crc kubenswrapper[4881]: I1211 09:55:17.779485 4881 scope.go:117] "RemoveContainer" containerID="0dd0791252e7c933c419e8a7c15f2ef8f34e285b3a2f12a5631dcdbbb99d0246" Dec 11 09:55:17 crc kubenswrapper[4881]: I1211 09:55:17.781646 4881 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-klptq"] Dec 11 09:55:17 crc kubenswrapper[4881]: I1211 09:55:17.804887 4881 scope.go:117] "RemoveContainer" containerID="529bb210999596751304986ff13dad2bd277dc3fe37fc3720e4b5b77c9c954d5" Dec 11 09:55:17 crc kubenswrapper[4881]: I1211 09:55:17.851453 4881 scope.go:117] "RemoveContainer" containerID="26f862e5ff7bba13b9d3ee53c06feba4cfb8ef134d7a2e7db9ceb88835bc9109" Dec 11 09:55:17 crc kubenswrapper[4881]: E1211 09:55:17.852409 4881 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"26f862e5ff7bba13b9d3ee53c06feba4cfb8ef134d7a2e7db9ceb88835bc9109\": container with ID starting with 26f862e5ff7bba13b9d3ee53c06feba4cfb8ef134d7a2e7db9ceb88835bc9109 not found: ID does not exist" containerID="26f862e5ff7bba13b9d3ee53c06feba4cfb8ef134d7a2e7db9ceb88835bc9109" Dec 11 09:55:17 crc kubenswrapper[4881]: I1211 09:55:17.852447 4881 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"26f862e5ff7bba13b9d3ee53c06feba4cfb8ef134d7a2e7db9ceb88835bc9109"} err="failed to get container status \"26f862e5ff7bba13b9d3ee53c06feba4cfb8ef134d7a2e7db9ceb88835bc9109\": rpc error: code = NotFound desc = could not find container \"26f862e5ff7bba13b9d3ee53c06feba4cfb8ef134d7a2e7db9ceb88835bc9109\": container with ID starting with 26f862e5ff7bba13b9d3ee53c06feba4cfb8ef134d7a2e7db9ceb88835bc9109 not found: ID does not exist" Dec 11 09:55:17 crc kubenswrapper[4881]: I1211 09:55:17.852469 4881 scope.go:117] "RemoveContainer" containerID="0dd0791252e7c933c419e8a7c15f2ef8f34e285b3a2f12a5631dcdbbb99d0246" Dec 11 09:55:17 crc kubenswrapper[4881]: E1211 09:55:17.852913 4881 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0dd0791252e7c933c419e8a7c15f2ef8f34e285b3a2f12a5631dcdbbb99d0246\": container with ID starting with 0dd0791252e7c933c419e8a7c15f2ef8f34e285b3a2f12a5631dcdbbb99d0246 not found: ID does not exist" containerID="0dd0791252e7c933c419e8a7c15f2ef8f34e285b3a2f12a5631dcdbbb99d0246" Dec 11 09:55:17 crc kubenswrapper[4881]: I1211 09:55:17.853022 4881 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0dd0791252e7c933c419e8a7c15f2ef8f34e285b3a2f12a5631dcdbbb99d0246"} err="failed to get container status \"0dd0791252e7c933c419e8a7c15f2ef8f34e285b3a2f12a5631dcdbbb99d0246\": rpc error: code = NotFound desc = could not find container \"0dd0791252e7c933c419e8a7c15f2ef8f34e285b3a2f12a5631dcdbbb99d0246\": container with ID starting with 0dd0791252e7c933c419e8a7c15f2ef8f34e285b3a2f12a5631dcdbbb99d0246 not found: ID does not exist" Dec 11 09:55:17 crc kubenswrapper[4881]: I1211 09:55:17.853106 4881 scope.go:117] "RemoveContainer" containerID="529bb210999596751304986ff13dad2bd277dc3fe37fc3720e4b5b77c9c954d5" Dec 11 09:55:17 crc kubenswrapper[4881]: E1211 09:55:17.853500 4881 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"529bb210999596751304986ff13dad2bd277dc3fe37fc3720e4b5b77c9c954d5\": container with ID starting with 529bb210999596751304986ff13dad2bd277dc3fe37fc3720e4b5b77c9c954d5 not found: ID does not exist" containerID="529bb210999596751304986ff13dad2bd277dc3fe37fc3720e4b5b77c9c954d5" Dec 11 09:55:17 crc kubenswrapper[4881]: I1211 09:55:17.853534 4881 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"529bb210999596751304986ff13dad2bd277dc3fe37fc3720e4b5b77c9c954d5"} err="failed to get container status \"529bb210999596751304986ff13dad2bd277dc3fe37fc3720e4b5b77c9c954d5\": rpc error: code = NotFound desc = could not find container \"529bb210999596751304986ff13dad2bd277dc3fe37fc3720e4b5b77c9c954d5\": container with ID starting with 529bb210999596751304986ff13dad2bd277dc3fe37fc3720e4b5b77c9c954d5 not found: ID does not exist" Dec 11 09:55:18 crc kubenswrapper[4881]: I1211 09:55:18.006076 4881 scope.go:117] "RemoveContainer" containerID="50e3b73ab215efbdf949798a8eb564f0477f7755099fe663a304e614a2b9881c" Dec 11 09:55:18 crc kubenswrapper[4881]: E1211 09:55:18.006539 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 09:55:19 crc kubenswrapper[4881]: I1211 09:55:19.017734 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5596eb3b-2966-4527-9742-7d8f19c934c0" path="/var/lib/kubelet/pods/5596eb3b-2966-4527-9742-7d8f19c934c0/volumes" Dec 11 09:55:32 crc kubenswrapper[4881]: I1211 09:55:32.005692 4881 scope.go:117] "RemoveContainer" containerID="50e3b73ab215efbdf949798a8eb564f0477f7755099fe663a304e614a2b9881c" Dec 11 09:55:32 crc kubenswrapper[4881]: E1211 09:55:32.006632 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 09:55:46 crc kubenswrapper[4881]: I1211 09:55:46.006610 4881 scope.go:117] "RemoveContainer" containerID="50e3b73ab215efbdf949798a8eb564f0477f7755099fe663a304e614a2b9881c" Dec 11 09:55:46 crc kubenswrapper[4881]: E1211 09:55:46.007261 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 09:55:51 crc kubenswrapper[4881]: I1211 09:55:51.872689 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-cdgxn"] Dec 11 09:55:51 crc kubenswrapper[4881]: E1211 09:55:51.873962 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5596eb3b-2966-4527-9742-7d8f19c934c0" containerName="extract-utilities" Dec 11 09:55:51 crc kubenswrapper[4881]: I1211 09:55:51.873979 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="5596eb3b-2966-4527-9742-7d8f19c934c0" containerName="extract-utilities" Dec 11 09:55:51 crc kubenswrapper[4881]: E1211 09:55:51.874000 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5596eb3b-2966-4527-9742-7d8f19c934c0" containerName="extract-content" Dec 11 09:55:51 crc kubenswrapper[4881]: I1211 09:55:51.874006 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="5596eb3b-2966-4527-9742-7d8f19c934c0" containerName="extract-content" Dec 11 09:55:51 crc kubenswrapper[4881]: E1211 09:55:51.874040 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5596eb3b-2966-4527-9742-7d8f19c934c0" containerName="registry-server" Dec 11 09:55:51 crc kubenswrapper[4881]: I1211 09:55:51.874048 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="5596eb3b-2966-4527-9742-7d8f19c934c0" containerName="registry-server" Dec 11 09:55:51 crc kubenswrapper[4881]: I1211 09:55:51.874268 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="5596eb3b-2966-4527-9742-7d8f19c934c0" containerName="registry-server" Dec 11 09:55:51 crc kubenswrapper[4881]: I1211 09:55:51.876397 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-cdgxn" Dec 11 09:55:51 crc kubenswrapper[4881]: I1211 09:55:51.892041 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-cdgxn"] Dec 11 09:55:52 crc kubenswrapper[4881]: I1211 09:55:52.031805 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2hcsp\" (UniqueName: \"kubernetes.io/projected/a821ae49-e74d-43e6-8108-b8844f71613f-kube-api-access-2hcsp\") pod \"certified-operators-cdgxn\" (UID: \"a821ae49-e74d-43e6-8108-b8844f71613f\") " pod="openshift-marketplace/certified-operators-cdgxn" Dec 11 09:55:52 crc kubenswrapper[4881]: I1211 09:55:52.031903 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a821ae49-e74d-43e6-8108-b8844f71613f-catalog-content\") pod \"certified-operators-cdgxn\" (UID: \"a821ae49-e74d-43e6-8108-b8844f71613f\") " pod="openshift-marketplace/certified-operators-cdgxn" Dec 11 09:55:52 crc kubenswrapper[4881]: I1211 09:55:52.032159 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a821ae49-e74d-43e6-8108-b8844f71613f-utilities\") pod \"certified-operators-cdgxn\" (UID: \"a821ae49-e74d-43e6-8108-b8844f71613f\") " pod="openshift-marketplace/certified-operators-cdgxn" Dec 11 09:55:52 crc kubenswrapper[4881]: I1211 09:55:52.134587 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a821ae49-e74d-43e6-8108-b8844f71613f-utilities\") pod \"certified-operators-cdgxn\" (UID: \"a821ae49-e74d-43e6-8108-b8844f71613f\") " pod="openshift-marketplace/certified-operators-cdgxn" Dec 11 09:55:52 crc kubenswrapper[4881]: I1211 09:55:52.135076 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2hcsp\" (UniqueName: \"kubernetes.io/projected/a821ae49-e74d-43e6-8108-b8844f71613f-kube-api-access-2hcsp\") pod \"certified-operators-cdgxn\" (UID: \"a821ae49-e74d-43e6-8108-b8844f71613f\") " pod="openshift-marketplace/certified-operators-cdgxn" Dec 11 09:55:52 crc kubenswrapper[4881]: I1211 09:55:52.135149 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a821ae49-e74d-43e6-8108-b8844f71613f-catalog-content\") pod \"certified-operators-cdgxn\" (UID: \"a821ae49-e74d-43e6-8108-b8844f71613f\") " pod="openshift-marketplace/certified-operators-cdgxn" Dec 11 09:55:52 crc kubenswrapper[4881]: I1211 09:55:52.136093 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a821ae49-e74d-43e6-8108-b8844f71613f-catalog-content\") pod \"certified-operators-cdgxn\" (UID: \"a821ae49-e74d-43e6-8108-b8844f71613f\") " pod="openshift-marketplace/certified-operators-cdgxn" Dec 11 09:55:52 crc kubenswrapper[4881]: I1211 09:55:52.136118 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a821ae49-e74d-43e6-8108-b8844f71613f-utilities\") pod \"certified-operators-cdgxn\" (UID: \"a821ae49-e74d-43e6-8108-b8844f71613f\") " pod="openshift-marketplace/certified-operators-cdgxn" Dec 11 09:55:52 crc kubenswrapper[4881]: I1211 09:55:52.155060 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2hcsp\" (UniqueName: \"kubernetes.io/projected/a821ae49-e74d-43e6-8108-b8844f71613f-kube-api-access-2hcsp\") pod \"certified-operators-cdgxn\" (UID: \"a821ae49-e74d-43e6-8108-b8844f71613f\") " pod="openshift-marketplace/certified-operators-cdgxn" Dec 11 09:55:52 crc kubenswrapper[4881]: I1211 09:55:52.202718 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-cdgxn" Dec 11 09:55:52 crc kubenswrapper[4881]: I1211 09:55:52.990128 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-cdgxn"] Dec 11 09:55:53 crc kubenswrapper[4881]: I1211 09:55:53.149188 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-cdgxn" event={"ID":"a821ae49-e74d-43e6-8108-b8844f71613f","Type":"ContainerStarted","Data":"1b94e60b7522087fcabb675daec7b5ed6e643d27214de672737aead3ee914832"} Dec 11 09:55:54 crc kubenswrapper[4881]: I1211 09:55:54.162243 4881 generic.go:334] "Generic (PLEG): container finished" podID="a821ae49-e74d-43e6-8108-b8844f71613f" containerID="e4b6484e342529487175a3602f5b5b77376670a504999a2c2df993647dcccf2a" exitCode=0 Dec 11 09:55:54 crc kubenswrapper[4881]: I1211 09:55:54.162599 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-cdgxn" event={"ID":"a821ae49-e74d-43e6-8108-b8844f71613f","Type":"ContainerDied","Data":"e4b6484e342529487175a3602f5b5b77376670a504999a2c2df993647dcccf2a"} Dec 11 09:55:56 crc kubenswrapper[4881]: I1211 09:55:56.194270 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-cdgxn" event={"ID":"a821ae49-e74d-43e6-8108-b8844f71613f","Type":"ContainerStarted","Data":"57ad29d4a3a8ddcce780a75f4ecd53cc7364a7934caf6a301ecbeeaaae14faa2"} Dec 11 09:55:56 crc kubenswrapper[4881]: E1211 09:55:56.362910 4881 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda821ae49_e74d_43e6_8108_b8844f71613f.slice/crio-57ad29d4a3a8ddcce780a75f4ecd53cc7364a7934caf6a301ecbeeaaae14faa2.scope\": RecentStats: unable to find data in memory cache]" Dec 11 09:55:57 crc kubenswrapper[4881]: I1211 09:55:57.207029 4881 generic.go:334] "Generic (PLEG): container finished" podID="a821ae49-e74d-43e6-8108-b8844f71613f" containerID="57ad29d4a3a8ddcce780a75f4ecd53cc7364a7934caf6a301ecbeeaaae14faa2" exitCode=0 Dec 11 09:55:57 crc kubenswrapper[4881]: I1211 09:55:57.207090 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-cdgxn" event={"ID":"a821ae49-e74d-43e6-8108-b8844f71613f","Type":"ContainerDied","Data":"57ad29d4a3a8ddcce780a75f4ecd53cc7364a7934caf6a301ecbeeaaae14faa2"} Dec 11 09:55:58 crc kubenswrapper[4881]: I1211 09:55:58.220419 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-cdgxn" event={"ID":"a821ae49-e74d-43e6-8108-b8844f71613f","Type":"ContainerStarted","Data":"fc1505e1a3593fd8fee9c8301d185abe701d815b141f156c4213215cc0c67656"} Dec 11 09:55:58 crc kubenswrapper[4881]: I1211 09:55:58.253905 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-cdgxn" podStartSLOduration=3.668246576 podStartE2EDuration="7.25388503s" podCreationTimestamp="2025-12-11 09:55:51 +0000 UTC" firstStartedPulling="2025-12-11 09:55:54.16511516 +0000 UTC m=+6002.542483857" lastFinishedPulling="2025-12-11 09:55:57.750753614 +0000 UTC m=+6006.128122311" observedRunningTime="2025-12-11 09:55:58.247092783 +0000 UTC m=+6006.624461490" watchObservedRunningTime="2025-12-11 09:55:58.25388503 +0000 UTC m=+6006.631253727" Dec 11 09:56:00 crc kubenswrapper[4881]: I1211 09:56:00.005358 4881 scope.go:117] "RemoveContainer" containerID="50e3b73ab215efbdf949798a8eb564f0477f7755099fe663a304e614a2b9881c" Dec 11 09:56:00 crc kubenswrapper[4881]: E1211 09:56:00.005914 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 09:56:02 crc kubenswrapper[4881]: I1211 09:56:02.203634 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-cdgxn" Dec 11 09:56:02 crc kubenswrapper[4881]: I1211 09:56:02.204099 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-cdgxn" Dec 11 09:56:02 crc kubenswrapper[4881]: I1211 09:56:02.255300 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-cdgxn" Dec 11 09:56:12 crc kubenswrapper[4881]: I1211 09:56:12.256932 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-cdgxn" Dec 11 09:56:12 crc kubenswrapper[4881]: I1211 09:56:12.850986 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-cdgxn"] Dec 11 09:56:12 crc kubenswrapper[4881]: I1211 09:56:12.851526 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-cdgxn" podUID="a821ae49-e74d-43e6-8108-b8844f71613f" containerName="registry-server" containerID="cri-o://fc1505e1a3593fd8fee9c8301d185abe701d815b141f156c4213215cc0c67656" gracePeriod=2 Dec 11 09:56:13 crc kubenswrapper[4881]: I1211 09:56:13.402670 4881 generic.go:334] "Generic (PLEG): container finished" podID="a821ae49-e74d-43e6-8108-b8844f71613f" containerID="fc1505e1a3593fd8fee9c8301d185abe701d815b141f156c4213215cc0c67656" exitCode=0 Dec 11 09:56:13 crc kubenswrapper[4881]: I1211 09:56:13.402728 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-cdgxn" event={"ID":"a821ae49-e74d-43e6-8108-b8844f71613f","Type":"ContainerDied","Data":"fc1505e1a3593fd8fee9c8301d185abe701d815b141f156c4213215cc0c67656"} Dec 11 09:56:13 crc kubenswrapper[4881]: I1211 09:56:13.403022 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-cdgxn" event={"ID":"a821ae49-e74d-43e6-8108-b8844f71613f","Type":"ContainerDied","Data":"1b94e60b7522087fcabb675daec7b5ed6e643d27214de672737aead3ee914832"} Dec 11 09:56:13 crc kubenswrapper[4881]: I1211 09:56:13.403059 4881 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1b94e60b7522087fcabb675daec7b5ed6e643d27214de672737aead3ee914832" Dec 11 09:56:13 crc kubenswrapper[4881]: I1211 09:56:13.493803 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-cdgxn" Dec 11 09:56:13 crc kubenswrapper[4881]: I1211 09:56:13.515153 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a821ae49-e74d-43e6-8108-b8844f71613f-catalog-content\") pod \"a821ae49-e74d-43e6-8108-b8844f71613f\" (UID: \"a821ae49-e74d-43e6-8108-b8844f71613f\") " Dec 11 09:56:13 crc kubenswrapper[4881]: I1211 09:56:13.594618 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a821ae49-e74d-43e6-8108-b8844f71613f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "a821ae49-e74d-43e6-8108-b8844f71613f" (UID: "a821ae49-e74d-43e6-8108-b8844f71613f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 09:56:13 crc kubenswrapper[4881]: I1211 09:56:13.617930 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2hcsp\" (UniqueName: \"kubernetes.io/projected/a821ae49-e74d-43e6-8108-b8844f71613f-kube-api-access-2hcsp\") pod \"a821ae49-e74d-43e6-8108-b8844f71613f\" (UID: \"a821ae49-e74d-43e6-8108-b8844f71613f\") " Dec 11 09:56:13 crc kubenswrapper[4881]: I1211 09:56:13.618097 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a821ae49-e74d-43e6-8108-b8844f71613f-utilities\") pod \"a821ae49-e74d-43e6-8108-b8844f71613f\" (UID: \"a821ae49-e74d-43e6-8108-b8844f71613f\") " Dec 11 09:56:13 crc kubenswrapper[4881]: I1211 09:56:13.619095 4881 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a821ae49-e74d-43e6-8108-b8844f71613f-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 11 09:56:13 crc kubenswrapper[4881]: I1211 09:56:13.619535 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a821ae49-e74d-43e6-8108-b8844f71613f-utilities" (OuterVolumeSpecName: "utilities") pod "a821ae49-e74d-43e6-8108-b8844f71613f" (UID: "a821ae49-e74d-43e6-8108-b8844f71613f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 09:56:13 crc kubenswrapper[4881]: I1211 09:56:13.631731 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a821ae49-e74d-43e6-8108-b8844f71613f-kube-api-access-2hcsp" (OuterVolumeSpecName: "kube-api-access-2hcsp") pod "a821ae49-e74d-43e6-8108-b8844f71613f" (UID: "a821ae49-e74d-43e6-8108-b8844f71613f"). InnerVolumeSpecName "kube-api-access-2hcsp". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 09:56:13 crc kubenswrapper[4881]: I1211 09:56:13.721850 4881 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a821ae49-e74d-43e6-8108-b8844f71613f-utilities\") on node \"crc\" DevicePath \"\"" Dec 11 09:56:13 crc kubenswrapper[4881]: I1211 09:56:13.721886 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2hcsp\" (UniqueName: \"kubernetes.io/projected/a821ae49-e74d-43e6-8108-b8844f71613f-kube-api-access-2hcsp\") on node \"crc\" DevicePath \"\"" Dec 11 09:56:14 crc kubenswrapper[4881]: I1211 09:56:14.006704 4881 scope.go:117] "RemoveContainer" containerID="50e3b73ab215efbdf949798a8eb564f0477f7755099fe663a304e614a2b9881c" Dec 11 09:56:14 crc kubenswrapper[4881]: E1211 09:56:14.007639 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 09:56:14 crc kubenswrapper[4881]: I1211 09:56:14.412785 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-cdgxn" Dec 11 09:56:14 crc kubenswrapper[4881]: I1211 09:56:14.449048 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-cdgxn"] Dec 11 09:56:14 crc kubenswrapper[4881]: I1211 09:56:14.459171 4881 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-cdgxn"] Dec 11 09:56:15 crc kubenswrapper[4881]: I1211 09:56:15.021111 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a821ae49-e74d-43e6-8108-b8844f71613f" path="/var/lib/kubelet/pods/a821ae49-e74d-43e6-8108-b8844f71613f/volumes" Dec 11 09:56:27 crc kubenswrapper[4881]: I1211 09:56:27.005793 4881 scope.go:117] "RemoveContainer" containerID="50e3b73ab215efbdf949798a8eb564f0477f7755099fe663a304e614a2b9881c" Dec 11 09:56:27 crc kubenswrapper[4881]: E1211 09:56:27.006700 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 09:56:41 crc kubenswrapper[4881]: I1211 09:56:41.005874 4881 scope.go:117] "RemoveContainer" containerID="50e3b73ab215efbdf949798a8eb564f0477f7755099fe663a304e614a2b9881c" Dec 11 09:56:41 crc kubenswrapper[4881]: E1211 09:56:41.007738 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 09:56:56 crc kubenswrapper[4881]: I1211 09:56:56.006100 4881 scope.go:117] "RemoveContainer" containerID="50e3b73ab215efbdf949798a8eb564f0477f7755099fe663a304e614a2b9881c" Dec 11 09:56:56 crc kubenswrapper[4881]: E1211 09:56:56.006813 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 09:57:09 crc kubenswrapper[4881]: I1211 09:57:09.005739 4881 scope.go:117] "RemoveContainer" containerID="50e3b73ab215efbdf949798a8eb564f0477f7755099fe663a304e614a2b9881c" Dec 11 09:57:09 crc kubenswrapper[4881]: E1211 09:57:09.006639 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 09:57:23 crc kubenswrapper[4881]: I1211 09:57:23.015531 4881 scope.go:117] "RemoveContainer" containerID="50e3b73ab215efbdf949798a8eb564f0477f7755099fe663a304e614a2b9881c" Dec 11 09:57:23 crc kubenswrapper[4881]: E1211 09:57:23.017423 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 09:57:36 crc kubenswrapper[4881]: I1211 09:57:36.007976 4881 scope.go:117] "RemoveContainer" containerID="50e3b73ab215efbdf949798a8eb564f0477f7755099fe663a304e614a2b9881c" Dec 11 09:57:36 crc kubenswrapper[4881]: E1211 09:57:36.010170 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 09:57:47 crc kubenswrapper[4881]: I1211 09:57:47.005734 4881 scope.go:117] "RemoveContainer" containerID="50e3b73ab215efbdf949798a8eb564f0477f7755099fe663a304e614a2b9881c" Dec 11 09:57:47 crc kubenswrapper[4881]: E1211 09:57:47.006704 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 09:58:01 crc kubenswrapper[4881]: I1211 09:58:01.006304 4881 scope.go:117] "RemoveContainer" containerID="50e3b73ab215efbdf949798a8eb564f0477f7755099fe663a304e614a2b9881c" Dec 11 09:58:01 crc kubenswrapper[4881]: E1211 09:58:01.007111 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 09:58:13 crc kubenswrapper[4881]: I1211 09:58:13.014979 4881 scope.go:117] "RemoveContainer" containerID="50e3b73ab215efbdf949798a8eb564f0477f7755099fe663a304e614a2b9881c" Dec 11 09:58:13 crc kubenswrapper[4881]: E1211 09:58:13.016111 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 09:58:23 crc kubenswrapper[4881]: I1211 09:58:23.876880 4881 generic.go:334] "Generic (PLEG): container finished" podID="44483fe0-748e-4e0e-9591-f5c14c4cd3f8" containerID="0d43a46784be8743eb464e585024988ef5557f3ae744b53e88854c3e5939f70f" exitCode=0 Dec 11 09:58:23 crc kubenswrapper[4881]: I1211 09:58:23.876964 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tempest-tests-tempest" event={"ID":"44483fe0-748e-4e0e-9591-f5c14c4cd3f8","Type":"ContainerDied","Data":"0d43a46784be8743eb464e585024988ef5557f3ae744b53e88854c3e5939f70f"} Dec 11 09:58:25 crc kubenswrapper[4881]: I1211 09:58:25.006658 4881 scope.go:117] "RemoveContainer" containerID="50e3b73ab215efbdf949798a8eb564f0477f7755099fe663a304e614a2b9881c" Dec 11 09:58:25 crc kubenswrapper[4881]: E1211 09:58:25.007842 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 09:58:25 crc kubenswrapper[4881]: I1211 09:58:25.377473 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/tempest-tests-tempest" Dec 11 09:58:25 crc kubenswrapper[4881]: I1211 09:58:25.527754 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/44483fe0-748e-4e0e-9591-f5c14c4cd3f8-openstack-config-secret\") pod \"44483fe0-748e-4e0e-9591-f5c14c4cd3f8\" (UID: \"44483fe0-748e-4e0e-9591-f5c14c4cd3f8\") " Dec 11 09:58:25 crc kubenswrapper[4881]: I1211 09:58:25.528150 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/44483fe0-748e-4e0e-9591-f5c14c4cd3f8-config-data\") pod \"44483fe0-748e-4e0e-9591-f5c14c4cd3f8\" (UID: \"44483fe0-748e-4e0e-9591-f5c14c4cd3f8\") " Dec 11 09:58:25 crc kubenswrapper[4881]: I1211 09:58:25.528580 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/44483fe0-748e-4e0e-9591-f5c14c4cd3f8-ca-certs\") pod \"44483fe0-748e-4e0e-9591-f5c14c4cd3f8\" (UID: \"44483fe0-748e-4e0e-9591-f5c14c4cd3f8\") " Dec 11 09:58:25 crc kubenswrapper[4881]: I1211 09:58:25.529119 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vwstd\" (UniqueName: \"kubernetes.io/projected/44483fe0-748e-4e0e-9591-f5c14c4cd3f8-kube-api-access-vwstd\") pod \"44483fe0-748e-4e0e-9591-f5c14c4cd3f8\" (UID: \"44483fe0-748e-4e0e-9591-f5c14c4cd3f8\") " Dec 11 09:58:25 crc kubenswrapper[4881]: I1211 09:58:25.529461 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/44483fe0-748e-4e0e-9591-f5c14c4cd3f8-config-data" (OuterVolumeSpecName: "config-data") pod "44483fe0-748e-4e0e-9591-f5c14c4cd3f8" (UID: "44483fe0-748e-4e0e-9591-f5c14c4cd3f8"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 09:58:25 crc kubenswrapper[4881]: I1211 09:58:25.529703 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/44483fe0-748e-4e0e-9591-f5c14c4cd3f8-test-operator-ephemeral-workdir\") pod \"44483fe0-748e-4e0e-9591-f5c14c4cd3f8\" (UID: \"44483fe0-748e-4e0e-9591-f5c14c4cd3f8\") " Dec 11 09:58:25 crc kubenswrapper[4881]: I1211 09:58:25.529877 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"test-operator-logs\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"44483fe0-748e-4e0e-9591-f5c14c4cd3f8\" (UID: \"44483fe0-748e-4e0e-9591-f5c14c4cd3f8\") " Dec 11 09:58:25 crc kubenswrapper[4881]: I1211 09:58:25.532279 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/44483fe0-748e-4e0e-9591-f5c14c4cd3f8-test-operator-ephemeral-temporary" (OuterVolumeSpecName: "test-operator-ephemeral-temporary") pod "44483fe0-748e-4e0e-9591-f5c14c4cd3f8" (UID: "44483fe0-748e-4e0e-9591-f5c14c4cd3f8"). InnerVolumeSpecName "test-operator-ephemeral-temporary". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 09:58:25 crc kubenswrapper[4881]: I1211 09:58:25.534530 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/44483fe0-748e-4e0e-9591-f5c14c4cd3f8-kube-api-access-vwstd" (OuterVolumeSpecName: "kube-api-access-vwstd") pod "44483fe0-748e-4e0e-9591-f5c14c4cd3f8" (UID: "44483fe0-748e-4e0e-9591-f5c14c4cd3f8"). InnerVolumeSpecName "kube-api-access-vwstd". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 09:58:25 crc kubenswrapper[4881]: I1211 09:58:25.535682 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage06-crc" (OuterVolumeSpecName: "test-operator-logs") pod "44483fe0-748e-4e0e-9591-f5c14c4cd3f8" (UID: "44483fe0-748e-4e0e-9591-f5c14c4cd3f8"). InnerVolumeSpecName "local-storage06-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Dec 11 09:58:25 crc kubenswrapper[4881]: I1211 09:58:25.538076 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/44483fe0-748e-4e0e-9591-f5c14c4cd3f8-test-operator-ephemeral-workdir" (OuterVolumeSpecName: "test-operator-ephemeral-workdir") pod "44483fe0-748e-4e0e-9591-f5c14c4cd3f8" (UID: "44483fe0-748e-4e0e-9591-f5c14c4cd3f8"). InnerVolumeSpecName "test-operator-ephemeral-workdir". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 09:58:25 crc kubenswrapper[4881]: I1211 09:58:25.538232 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/44483fe0-748e-4e0e-9591-f5c14c4cd3f8-test-operator-ephemeral-temporary\") pod \"44483fe0-748e-4e0e-9591-f5c14c4cd3f8\" (UID: \"44483fe0-748e-4e0e-9591-f5c14c4cd3f8\") " Dec 11 09:58:25 crc kubenswrapper[4881]: I1211 09:58:25.538476 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/44483fe0-748e-4e0e-9591-f5c14c4cd3f8-ssh-key\") pod \"44483fe0-748e-4e0e-9591-f5c14c4cd3f8\" (UID: \"44483fe0-748e-4e0e-9591-f5c14c4cd3f8\") " Dec 11 09:58:25 crc kubenswrapper[4881]: I1211 09:58:25.538682 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/44483fe0-748e-4e0e-9591-f5c14c4cd3f8-openstack-config\") pod \"44483fe0-748e-4e0e-9591-f5c14c4cd3f8\" (UID: \"44483fe0-748e-4e0e-9591-f5c14c4cd3f8\") " Dec 11 09:58:25 crc kubenswrapper[4881]: I1211 09:58:25.539927 4881 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/44483fe0-748e-4e0e-9591-f5c14c4cd3f8-config-data\") on node \"crc\" DevicePath \"\"" Dec 11 09:58:25 crc kubenswrapper[4881]: I1211 09:58:25.540254 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vwstd\" (UniqueName: \"kubernetes.io/projected/44483fe0-748e-4e0e-9591-f5c14c4cd3f8-kube-api-access-vwstd\") on node \"crc\" DevicePath \"\"" Dec 11 09:58:25 crc kubenswrapper[4881]: I1211 09:58:25.540328 4881 reconciler_common.go:293] "Volume detached for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/44483fe0-748e-4e0e-9591-f5c14c4cd3f8-test-operator-ephemeral-workdir\") on node \"crc\" DevicePath \"\"" Dec 11 09:58:25 crc kubenswrapper[4881]: I1211 09:58:25.543693 4881 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") on node \"crc\" " Dec 11 09:58:25 crc kubenswrapper[4881]: I1211 09:58:25.543807 4881 reconciler_common.go:293] "Volume detached for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/44483fe0-748e-4e0e-9591-f5c14c4cd3f8-test-operator-ephemeral-temporary\") on node \"crc\" DevicePath \"\"" Dec 11 09:58:25 crc kubenswrapper[4881]: I1211 09:58:25.567803 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/44483fe0-748e-4e0e-9591-f5c14c4cd3f8-ca-certs" (OuterVolumeSpecName: "ca-certs") pod "44483fe0-748e-4e0e-9591-f5c14c4cd3f8" (UID: "44483fe0-748e-4e0e-9591-f5c14c4cd3f8"). InnerVolumeSpecName "ca-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 09:58:25 crc kubenswrapper[4881]: I1211 09:58:25.572890 4881 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage06-crc" (UniqueName: "kubernetes.io/local-volume/local-storage06-crc") on node "crc" Dec 11 09:58:25 crc kubenswrapper[4881]: I1211 09:58:25.576240 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/44483fe0-748e-4e0e-9591-f5c14c4cd3f8-openstack-config-secret" (OuterVolumeSpecName: "openstack-config-secret") pod "44483fe0-748e-4e0e-9591-f5c14c4cd3f8" (UID: "44483fe0-748e-4e0e-9591-f5c14c4cd3f8"). InnerVolumeSpecName "openstack-config-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 09:58:25 crc kubenswrapper[4881]: I1211 09:58:25.590122 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/44483fe0-748e-4e0e-9591-f5c14c4cd3f8-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "44483fe0-748e-4e0e-9591-f5c14c4cd3f8" (UID: "44483fe0-748e-4e0e-9591-f5c14c4cd3f8"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 09:58:25 crc kubenswrapper[4881]: I1211 09:58:25.599318 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/44483fe0-748e-4e0e-9591-f5c14c4cd3f8-openstack-config" (OuterVolumeSpecName: "openstack-config") pod "44483fe0-748e-4e0e-9591-f5c14c4cd3f8" (UID: "44483fe0-748e-4e0e-9591-f5c14c4cd3f8"). InnerVolumeSpecName "openstack-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 09:58:25 crc kubenswrapper[4881]: I1211 09:58:25.646621 4881 reconciler_common.go:293] "Volume detached for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") on node \"crc\" DevicePath \"\"" Dec 11 09:58:25 crc kubenswrapper[4881]: I1211 09:58:25.646989 4881 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/44483fe0-748e-4e0e-9591-f5c14c4cd3f8-ssh-key\") on node \"crc\" DevicePath \"\"" Dec 11 09:58:25 crc kubenswrapper[4881]: I1211 09:58:25.647005 4881 reconciler_common.go:293] "Volume detached for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/44483fe0-748e-4e0e-9591-f5c14c4cd3f8-openstack-config\") on node \"crc\" DevicePath \"\"" Dec 11 09:58:25 crc kubenswrapper[4881]: I1211 09:58:25.647019 4881 reconciler_common.go:293] "Volume detached for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/44483fe0-748e-4e0e-9591-f5c14c4cd3f8-openstack-config-secret\") on node \"crc\" DevicePath \"\"" Dec 11 09:58:25 crc kubenswrapper[4881]: I1211 09:58:25.647031 4881 reconciler_common.go:293] "Volume detached for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/44483fe0-748e-4e0e-9591-f5c14c4cd3f8-ca-certs\") on node \"crc\" DevicePath \"\"" Dec 11 09:58:25 crc kubenswrapper[4881]: I1211 09:58:25.898323 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tempest-tests-tempest" event={"ID":"44483fe0-748e-4e0e-9591-f5c14c4cd3f8","Type":"ContainerDied","Data":"4d621f27cca3dccf655cd1c0f6fea8d3326ae0bdd62c14a4a6128cc5225d5257"} Dec 11 09:58:25 crc kubenswrapper[4881]: I1211 09:58:25.898383 4881 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4d621f27cca3dccf655cd1c0f6fea8d3326ae0bdd62c14a4a6128cc5225d5257" Dec 11 09:58:25 crc kubenswrapper[4881]: I1211 09:58:25.898438 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/tempest-tests-tempest" Dec 11 09:58:33 crc kubenswrapper[4881]: I1211 09:58:33.380857 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/test-operator-logs-pod-tempest-tempest-tests-tempest"] Dec 11 09:58:33 crc kubenswrapper[4881]: E1211 09:58:33.381839 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a821ae49-e74d-43e6-8108-b8844f71613f" containerName="extract-content" Dec 11 09:58:33 crc kubenswrapper[4881]: I1211 09:58:33.381879 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="a821ae49-e74d-43e6-8108-b8844f71613f" containerName="extract-content" Dec 11 09:58:33 crc kubenswrapper[4881]: E1211 09:58:33.381898 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a821ae49-e74d-43e6-8108-b8844f71613f" containerName="extract-utilities" Dec 11 09:58:33 crc kubenswrapper[4881]: I1211 09:58:33.381906 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="a821ae49-e74d-43e6-8108-b8844f71613f" containerName="extract-utilities" Dec 11 09:58:33 crc kubenswrapper[4881]: E1211 09:58:33.381915 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a821ae49-e74d-43e6-8108-b8844f71613f" containerName="registry-server" Dec 11 09:58:33 crc kubenswrapper[4881]: I1211 09:58:33.381921 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="a821ae49-e74d-43e6-8108-b8844f71613f" containerName="registry-server" Dec 11 09:58:33 crc kubenswrapper[4881]: E1211 09:58:33.381948 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="44483fe0-748e-4e0e-9591-f5c14c4cd3f8" containerName="tempest-tests-tempest-tests-runner" Dec 11 09:58:33 crc kubenswrapper[4881]: I1211 09:58:33.381954 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="44483fe0-748e-4e0e-9591-f5c14c4cd3f8" containerName="tempest-tests-tempest-tests-runner" Dec 11 09:58:33 crc kubenswrapper[4881]: I1211 09:58:33.382169 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="44483fe0-748e-4e0e-9591-f5c14c4cd3f8" containerName="tempest-tests-tempest-tests-runner" Dec 11 09:58:33 crc kubenswrapper[4881]: I1211 09:58:33.382192 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="a821ae49-e74d-43e6-8108-b8844f71613f" containerName="registry-server" Dec 11 09:58:33 crc kubenswrapper[4881]: I1211 09:58:33.383006 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Dec 11 09:58:33 crc kubenswrapper[4881]: I1211 09:58:33.386836 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"default-dockercfg-2j44r" Dec 11 09:58:33 crc kubenswrapper[4881]: I1211 09:58:33.403270 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/test-operator-logs-pod-tempest-tempest-tests-tempest"] Dec 11 09:58:33 crc kubenswrapper[4881]: I1211 09:58:33.563471 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z7bxk\" (UniqueName: \"kubernetes.io/projected/443704cb-4132-4086-9c08-edc325a2bbc5-kube-api-access-z7bxk\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"443704cb-4132-4086-9c08-edc325a2bbc5\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Dec 11 09:58:33 crc kubenswrapper[4881]: I1211 09:58:33.563575 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"443704cb-4132-4086-9c08-edc325a2bbc5\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Dec 11 09:58:33 crc kubenswrapper[4881]: I1211 09:58:33.666184 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z7bxk\" (UniqueName: \"kubernetes.io/projected/443704cb-4132-4086-9c08-edc325a2bbc5-kube-api-access-z7bxk\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"443704cb-4132-4086-9c08-edc325a2bbc5\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Dec 11 09:58:33 crc kubenswrapper[4881]: I1211 09:58:33.666270 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"443704cb-4132-4086-9c08-edc325a2bbc5\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Dec 11 09:58:33 crc kubenswrapper[4881]: I1211 09:58:33.667204 4881 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"443704cb-4132-4086-9c08-edc325a2bbc5\") device mount path \"/mnt/openstack/pv06\"" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Dec 11 09:58:33 crc kubenswrapper[4881]: I1211 09:58:33.692112 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z7bxk\" (UniqueName: \"kubernetes.io/projected/443704cb-4132-4086-9c08-edc325a2bbc5-kube-api-access-z7bxk\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"443704cb-4132-4086-9c08-edc325a2bbc5\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Dec 11 09:58:33 crc kubenswrapper[4881]: I1211 09:58:33.730611 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"443704cb-4132-4086-9c08-edc325a2bbc5\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Dec 11 09:58:34 crc kubenswrapper[4881]: I1211 09:58:34.019779 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Dec 11 09:58:34 crc kubenswrapper[4881]: I1211 09:58:34.502146 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/test-operator-logs-pod-tempest-tempest-tests-tempest"] Dec 11 09:58:35 crc kubenswrapper[4881]: I1211 09:58:35.001987 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" event={"ID":"443704cb-4132-4086-9c08-edc325a2bbc5","Type":"ContainerStarted","Data":"e3fbe31c2f2772be28537547bcf545978ca47e23ae6a7a4ffde7973e5610d729"} Dec 11 09:58:36 crc kubenswrapper[4881]: I1211 09:58:36.013764 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" event={"ID":"443704cb-4132-4086-9c08-edc325a2bbc5","Type":"ContainerStarted","Data":"eea64523358f62277abab217605a23071b80c65930258162ba9f47197abe7c23"} Dec 11 09:58:37 crc kubenswrapper[4881]: I1211 09:58:37.005700 4881 scope.go:117] "RemoveContainer" containerID="50e3b73ab215efbdf949798a8eb564f0477f7755099fe663a304e614a2b9881c" Dec 11 09:58:37 crc kubenswrapper[4881]: E1211 09:58:37.006436 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 09:58:52 crc kubenswrapper[4881]: I1211 09:58:52.005998 4881 scope.go:117] "RemoveContainer" containerID="50e3b73ab215efbdf949798a8eb564f0477f7755099fe663a304e614a2b9881c" Dec 11 09:58:52 crc kubenswrapper[4881]: E1211 09:58:52.006934 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 09:59:06 crc kubenswrapper[4881]: I1211 09:59:06.005084 4881 scope.go:117] "RemoveContainer" containerID="50e3b73ab215efbdf949798a8eb564f0477f7755099fe663a304e614a2b9881c" Dec 11 09:59:07 crc kubenswrapper[4881]: I1211 09:59:07.396043 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" event={"ID":"56d69133-af36-4cbd-af7d-3a58cc4dd8ca","Type":"ContainerStarted","Data":"82dddf54ebc8157bc717b15196b143b5bf6d65e5f3b7b89542d401dd508acd69"} Dec 11 09:59:07 crc kubenswrapper[4881]: I1211 09:59:07.464974 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" podStartSLOduration=33.383542032 podStartE2EDuration="34.464953906s" podCreationTimestamp="2025-12-11 09:58:33 +0000 UTC" firstStartedPulling="2025-12-11 09:58:34.512369322 +0000 UTC m=+6162.889738019" lastFinishedPulling="2025-12-11 09:58:35.593781156 +0000 UTC m=+6163.971149893" observedRunningTime="2025-12-11 09:58:36.030652627 +0000 UTC m=+6164.408021344" watchObservedRunningTime="2025-12-11 09:59:07.464953906 +0000 UTC m=+6195.842322603" Dec 11 09:59:07 crc kubenswrapper[4881]: I1211 09:59:07.605575 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-6ndjj/must-gather-q79pd"] Dec 11 09:59:07 crc kubenswrapper[4881]: I1211 09:59:07.608202 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-6ndjj/must-gather-q79pd" Dec 11 09:59:07 crc kubenswrapper[4881]: I1211 09:59:07.613940 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-6ndjj"/"openshift-service-ca.crt" Dec 11 09:59:07 crc kubenswrapper[4881]: I1211 09:59:07.614279 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-must-gather-6ndjj"/"default-dockercfg-t6pcl" Dec 11 09:59:07 crc kubenswrapper[4881]: I1211 09:59:07.614428 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-6ndjj"/"kube-root-ca.crt" Dec 11 09:59:07 crc kubenswrapper[4881]: I1211 09:59:07.636358 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-6ndjj/must-gather-q79pd"] Dec 11 09:59:07 crc kubenswrapper[4881]: I1211 09:59:07.724656 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vhvfr\" (UniqueName: \"kubernetes.io/projected/8c355bf0-389d-4d9a-a9b3-e4b2856b0bec-kube-api-access-vhvfr\") pod \"must-gather-q79pd\" (UID: \"8c355bf0-389d-4d9a-a9b3-e4b2856b0bec\") " pod="openshift-must-gather-6ndjj/must-gather-q79pd" Dec 11 09:59:07 crc kubenswrapper[4881]: I1211 09:59:07.724860 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/8c355bf0-389d-4d9a-a9b3-e4b2856b0bec-must-gather-output\") pod \"must-gather-q79pd\" (UID: \"8c355bf0-389d-4d9a-a9b3-e4b2856b0bec\") " pod="openshift-must-gather-6ndjj/must-gather-q79pd" Dec 11 09:59:07 crc kubenswrapper[4881]: I1211 09:59:07.827740 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vhvfr\" (UniqueName: \"kubernetes.io/projected/8c355bf0-389d-4d9a-a9b3-e4b2856b0bec-kube-api-access-vhvfr\") pod \"must-gather-q79pd\" (UID: \"8c355bf0-389d-4d9a-a9b3-e4b2856b0bec\") " pod="openshift-must-gather-6ndjj/must-gather-q79pd" Dec 11 09:59:07 crc kubenswrapper[4881]: I1211 09:59:07.827897 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/8c355bf0-389d-4d9a-a9b3-e4b2856b0bec-must-gather-output\") pod \"must-gather-q79pd\" (UID: \"8c355bf0-389d-4d9a-a9b3-e4b2856b0bec\") " pod="openshift-must-gather-6ndjj/must-gather-q79pd" Dec 11 09:59:07 crc kubenswrapper[4881]: I1211 09:59:07.828447 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/8c355bf0-389d-4d9a-a9b3-e4b2856b0bec-must-gather-output\") pod \"must-gather-q79pd\" (UID: \"8c355bf0-389d-4d9a-a9b3-e4b2856b0bec\") " pod="openshift-must-gather-6ndjj/must-gather-q79pd" Dec 11 09:59:07 crc kubenswrapper[4881]: I1211 09:59:07.853288 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vhvfr\" (UniqueName: \"kubernetes.io/projected/8c355bf0-389d-4d9a-a9b3-e4b2856b0bec-kube-api-access-vhvfr\") pod \"must-gather-q79pd\" (UID: \"8c355bf0-389d-4d9a-a9b3-e4b2856b0bec\") " pod="openshift-must-gather-6ndjj/must-gather-q79pd" Dec 11 09:59:07 crc kubenswrapper[4881]: I1211 09:59:07.936075 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-6ndjj/must-gather-q79pd" Dec 11 09:59:08 crc kubenswrapper[4881]: W1211 09:59:08.452728 4881 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8c355bf0_389d_4d9a_a9b3_e4b2856b0bec.slice/crio-dbcb6d7949b58b5c32eb8ff3924da3e33b76876dc002f2d3b5ee72b30dc48641 WatchSource:0}: Error finding container dbcb6d7949b58b5c32eb8ff3924da3e33b76876dc002f2d3b5ee72b30dc48641: Status 404 returned error can't find the container with id dbcb6d7949b58b5c32eb8ff3924da3e33b76876dc002f2d3b5ee72b30dc48641 Dec 11 09:59:08 crc kubenswrapper[4881]: I1211 09:59:08.455350 4881 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Dec 11 09:59:08 crc kubenswrapper[4881]: I1211 09:59:08.462624 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-6ndjj/must-gather-q79pd"] Dec 11 09:59:09 crc kubenswrapper[4881]: I1211 09:59:09.418249 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-6ndjj/must-gather-q79pd" event={"ID":"8c355bf0-389d-4d9a-a9b3-e4b2856b0bec","Type":"ContainerStarted","Data":"dbcb6d7949b58b5c32eb8ff3924da3e33b76876dc002f2d3b5ee72b30dc48641"} Dec 11 09:59:19 crc kubenswrapper[4881]: I1211 09:59:19.531022 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-6ndjj/must-gather-q79pd" event={"ID":"8c355bf0-389d-4d9a-a9b3-e4b2856b0bec","Type":"ContainerStarted","Data":"c195c7f5efedded7ed78f8b661299b794546b44e285d8eaf71b56f319401168b"} Dec 11 09:59:19 crc kubenswrapper[4881]: I1211 09:59:19.532412 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-6ndjj/must-gather-q79pd" event={"ID":"8c355bf0-389d-4d9a-a9b3-e4b2856b0bec","Type":"ContainerStarted","Data":"98b44f760930dd1abfd374422c1eda877139256e8111b6b1442ce6dcb08117ae"} Dec 11 09:59:19 crc kubenswrapper[4881]: I1211 09:59:19.557700 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-6ndjj/must-gather-q79pd" podStartSLOduration=2.459279143 podStartE2EDuration="12.557675389s" podCreationTimestamp="2025-12-11 09:59:07 +0000 UTC" firstStartedPulling="2025-12-11 09:59:08.455139334 +0000 UTC m=+6196.832508031" lastFinishedPulling="2025-12-11 09:59:18.55353557 +0000 UTC m=+6206.930904277" observedRunningTime="2025-12-11 09:59:19.546110186 +0000 UTC m=+6207.923478883" watchObservedRunningTime="2025-12-11 09:59:19.557675389 +0000 UTC m=+6207.935044096" Dec 11 09:59:24 crc kubenswrapper[4881]: I1211 09:59:24.469582 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-6ndjj/crc-debug-h8w9r"] Dec 11 09:59:24 crc kubenswrapper[4881]: I1211 09:59:24.471580 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-6ndjj/crc-debug-h8w9r" Dec 11 09:59:24 crc kubenswrapper[4881]: I1211 09:59:24.566983 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/6cd87b4f-9808-4c98-828d-ce4fdb1d1960-host\") pod \"crc-debug-h8w9r\" (UID: \"6cd87b4f-9808-4c98-828d-ce4fdb1d1960\") " pod="openshift-must-gather-6ndjj/crc-debug-h8w9r" Dec 11 09:59:24 crc kubenswrapper[4881]: I1211 09:59:24.567194 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gp679\" (UniqueName: \"kubernetes.io/projected/6cd87b4f-9808-4c98-828d-ce4fdb1d1960-kube-api-access-gp679\") pod \"crc-debug-h8w9r\" (UID: \"6cd87b4f-9808-4c98-828d-ce4fdb1d1960\") " pod="openshift-must-gather-6ndjj/crc-debug-h8w9r" Dec 11 09:59:24 crc kubenswrapper[4881]: I1211 09:59:24.670851 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/6cd87b4f-9808-4c98-828d-ce4fdb1d1960-host\") pod \"crc-debug-h8w9r\" (UID: \"6cd87b4f-9808-4c98-828d-ce4fdb1d1960\") " pod="openshift-must-gather-6ndjj/crc-debug-h8w9r" Dec 11 09:59:24 crc kubenswrapper[4881]: I1211 09:59:24.671077 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gp679\" (UniqueName: \"kubernetes.io/projected/6cd87b4f-9808-4c98-828d-ce4fdb1d1960-kube-api-access-gp679\") pod \"crc-debug-h8w9r\" (UID: \"6cd87b4f-9808-4c98-828d-ce4fdb1d1960\") " pod="openshift-must-gather-6ndjj/crc-debug-h8w9r" Dec 11 09:59:24 crc kubenswrapper[4881]: I1211 09:59:24.671425 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/6cd87b4f-9808-4c98-828d-ce4fdb1d1960-host\") pod \"crc-debug-h8w9r\" (UID: \"6cd87b4f-9808-4c98-828d-ce4fdb1d1960\") " pod="openshift-must-gather-6ndjj/crc-debug-h8w9r" Dec 11 09:59:24 crc kubenswrapper[4881]: I1211 09:59:24.701244 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gp679\" (UniqueName: \"kubernetes.io/projected/6cd87b4f-9808-4c98-828d-ce4fdb1d1960-kube-api-access-gp679\") pod \"crc-debug-h8w9r\" (UID: \"6cd87b4f-9808-4c98-828d-ce4fdb1d1960\") " pod="openshift-must-gather-6ndjj/crc-debug-h8w9r" Dec 11 09:59:24 crc kubenswrapper[4881]: I1211 09:59:24.790551 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-6ndjj/crc-debug-h8w9r" Dec 11 09:59:25 crc kubenswrapper[4881]: I1211 09:59:25.601615 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-6ndjj/crc-debug-h8w9r" event={"ID":"6cd87b4f-9808-4c98-828d-ce4fdb1d1960","Type":"ContainerStarted","Data":"1a57fd6f62b2369aac0afb5703b15327e7113f78b2b560228eda07c24a67db05"} Dec 11 09:59:26 crc kubenswrapper[4881]: E1211 09:59:26.824318 4881 upgradeaware.go:427] Error proxying data from client to backend: readfrom tcp 38.102.83.20:54036->38.102.83.20:39683: write tcp 38.102.83.20:54036->38.102.83.20:39683: write: broken pipe Dec 11 09:59:39 crc kubenswrapper[4881]: E1211 09:59:39.496758 4881 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6ab858aed98e4fe57e6b144da8e90ad5d6698bb4cc5521206f5c05809f0f9296" Dec 11 09:59:39 crc kubenswrapper[4881]: E1211 09:59:39.498073 4881 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:container-00,Image:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6ab858aed98e4fe57e6b144da8e90ad5d6698bb4cc5521206f5c05809f0f9296,Command:[chroot /host bash -c echo 'TOOLBOX_NAME=toolbox-osp' > /root/.toolboxrc ; rm -rf \"/var/tmp/sos-osp\" && mkdir -p \"/var/tmp/sos-osp\" && sudo podman rm --force toolbox-osp; sudo --preserve-env podman pull --authfile /var/lib/kubelet/config.json registry.redhat.io/rhel9/support-tools && toolbox sos report --batch --all-logs --only-plugins block,cifs,crio,devicemapper,devices,firewall_tables,firewalld,iscsi,lvm2,memory,multipath,nfs,nis,nvme,podman,process,processor,selinux,scsi,udev,logs,crypto --tmp-dir=\"/var/tmp/sos-osp\" && if [[ \"$(ls /var/log/pods/*/{*.log.*,*/*.log.*} 2>/dev/null)\" != '' ]]; then tar --ignore-failed-read --warning=no-file-changed -cJf \"/var/tmp/sos-osp/podlogs.tar.xz\" --transform 's,^,podlogs/,' /var/log/pods/*/{*.log.*,*/*.log.*} || true; fi],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:TMOUT,Value:900,ValueFrom:nil,},EnvVar{Name:HOST,Value:/host,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:host,ReadOnly:false,MountPath:/host,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-gp679,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:*true,SELinuxOptions:nil,RunAsUser:*0,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod crc-debug-h8w9r_openshift-must-gather-6ndjj(6cd87b4f-9808-4c98-828d-ce4fdb1d1960): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 11 09:59:39 crc kubenswrapper[4881]: E1211 09:59:39.499759 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"container-00\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openshift-must-gather-6ndjj/crc-debug-h8w9r" podUID="6cd87b4f-9808-4c98-828d-ce4fdb1d1960" Dec 11 09:59:39 crc kubenswrapper[4881]: E1211 09:59:39.816772 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"container-00\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6ab858aed98e4fe57e6b144da8e90ad5d6698bb4cc5521206f5c05809f0f9296\\\"\"" pod="openshift-must-gather-6ndjj/crc-debug-h8w9r" podUID="6cd87b4f-9808-4c98-828d-ce4fdb1d1960" Dec 11 09:59:55 crc kubenswrapper[4881]: I1211 09:59:55.020011 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-6ndjj/crc-debug-h8w9r" event={"ID":"6cd87b4f-9808-4c98-828d-ce4fdb1d1960","Type":"ContainerStarted","Data":"e245d9b6b5ced786e2aa27b689797e9c33be0f7dcadea5439aad8bd0215f3d62"} Dec 11 09:59:55 crc kubenswrapper[4881]: I1211 09:59:55.044573 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-6ndjj/crc-debug-h8w9r" podStartSLOduration=1.150364023 podStartE2EDuration="31.044554729s" podCreationTimestamp="2025-12-11 09:59:24 +0000 UTC" firstStartedPulling="2025-12-11 09:59:24.840061095 +0000 UTC m=+6213.217429792" lastFinishedPulling="2025-12-11 09:59:54.734251811 +0000 UTC m=+6243.111620498" observedRunningTime="2025-12-11 09:59:55.038510421 +0000 UTC m=+6243.415879118" watchObservedRunningTime="2025-12-11 09:59:55.044554729 +0000 UTC m=+6243.421923426" Dec 11 10:00:00 crc kubenswrapper[4881]: I1211 10:00:00.181297 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29424120-nkspv"] Dec 11 10:00:00 crc kubenswrapper[4881]: I1211 10:00:00.184749 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29424120-nkspv" Dec 11 10:00:00 crc kubenswrapper[4881]: I1211 10:00:00.192283 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Dec 11 10:00:00 crc kubenswrapper[4881]: I1211 10:00:00.192434 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Dec 11 10:00:00 crc kubenswrapper[4881]: I1211 10:00:00.204607 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29424120-nkspv"] Dec 11 10:00:00 crc kubenswrapper[4881]: I1211 10:00:00.340451 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dbgd4\" (UniqueName: \"kubernetes.io/projected/ec312bbf-dc76-4090-b595-1c0fe22592ee-kube-api-access-dbgd4\") pod \"collect-profiles-29424120-nkspv\" (UID: \"ec312bbf-dc76-4090-b595-1c0fe22592ee\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29424120-nkspv" Dec 11 10:00:00 crc kubenswrapper[4881]: I1211 10:00:00.340828 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/ec312bbf-dc76-4090-b595-1c0fe22592ee-secret-volume\") pod \"collect-profiles-29424120-nkspv\" (UID: \"ec312bbf-dc76-4090-b595-1c0fe22592ee\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29424120-nkspv" Dec 11 10:00:00 crc kubenswrapper[4881]: I1211 10:00:00.340941 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ec312bbf-dc76-4090-b595-1c0fe22592ee-config-volume\") pod \"collect-profiles-29424120-nkspv\" (UID: \"ec312bbf-dc76-4090-b595-1c0fe22592ee\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29424120-nkspv" Dec 11 10:00:00 crc kubenswrapper[4881]: I1211 10:00:00.444506 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/ec312bbf-dc76-4090-b595-1c0fe22592ee-secret-volume\") pod \"collect-profiles-29424120-nkspv\" (UID: \"ec312bbf-dc76-4090-b595-1c0fe22592ee\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29424120-nkspv" Dec 11 10:00:00 crc kubenswrapper[4881]: I1211 10:00:00.444592 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ec312bbf-dc76-4090-b595-1c0fe22592ee-config-volume\") pod \"collect-profiles-29424120-nkspv\" (UID: \"ec312bbf-dc76-4090-b595-1c0fe22592ee\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29424120-nkspv" Dec 11 10:00:00 crc kubenswrapper[4881]: I1211 10:00:00.444780 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dbgd4\" (UniqueName: \"kubernetes.io/projected/ec312bbf-dc76-4090-b595-1c0fe22592ee-kube-api-access-dbgd4\") pod \"collect-profiles-29424120-nkspv\" (UID: \"ec312bbf-dc76-4090-b595-1c0fe22592ee\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29424120-nkspv" Dec 11 10:00:00 crc kubenswrapper[4881]: I1211 10:00:00.445423 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ec312bbf-dc76-4090-b595-1c0fe22592ee-config-volume\") pod \"collect-profiles-29424120-nkspv\" (UID: \"ec312bbf-dc76-4090-b595-1c0fe22592ee\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29424120-nkspv" Dec 11 10:00:00 crc kubenswrapper[4881]: I1211 10:00:00.460533 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/ec312bbf-dc76-4090-b595-1c0fe22592ee-secret-volume\") pod \"collect-profiles-29424120-nkspv\" (UID: \"ec312bbf-dc76-4090-b595-1c0fe22592ee\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29424120-nkspv" Dec 11 10:00:00 crc kubenswrapper[4881]: I1211 10:00:00.463050 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dbgd4\" (UniqueName: \"kubernetes.io/projected/ec312bbf-dc76-4090-b595-1c0fe22592ee-kube-api-access-dbgd4\") pod \"collect-profiles-29424120-nkspv\" (UID: \"ec312bbf-dc76-4090-b595-1c0fe22592ee\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29424120-nkspv" Dec 11 10:00:00 crc kubenswrapper[4881]: I1211 10:00:00.523323 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29424120-nkspv" Dec 11 10:00:01 crc kubenswrapper[4881]: I1211 10:00:01.652195 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29424120-nkspv"] Dec 11 10:00:02 crc kubenswrapper[4881]: I1211 10:00:02.105850 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29424120-nkspv" event={"ID":"ec312bbf-dc76-4090-b595-1c0fe22592ee","Type":"ContainerStarted","Data":"3def8f9e74df27ca81203799f57647049608ed7229cb076458c0c2fba94194c7"} Dec 11 10:00:02 crc kubenswrapper[4881]: I1211 10:00:02.106175 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29424120-nkspv" event={"ID":"ec312bbf-dc76-4090-b595-1c0fe22592ee","Type":"ContainerStarted","Data":"85f36c7d80802b73a67fc27c77325e3041633020eb06312e141d5376e36971f9"} Dec 11 10:00:03 crc kubenswrapper[4881]: I1211 10:00:03.202960 4881 generic.go:334] "Generic (PLEG): container finished" podID="ec312bbf-dc76-4090-b595-1c0fe22592ee" containerID="3def8f9e74df27ca81203799f57647049608ed7229cb076458c0c2fba94194c7" exitCode=0 Dec 11 10:00:03 crc kubenswrapper[4881]: I1211 10:00:03.203607 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29424120-nkspv" event={"ID":"ec312bbf-dc76-4090-b595-1c0fe22592ee","Type":"ContainerDied","Data":"3def8f9e74df27ca81203799f57647049608ed7229cb076458c0c2fba94194c7"} Dec 11 10:00:07 crc kubenswrapper[4881]: I1211 10:00:07.362970 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29424120-nkspv" Dec 11 10:00:07 crc kubenswrapper[4881]: I1211 10:00:07.552590 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dbgd4\" (UniqueName: \"kubernetes.io/projected/ec312bbf-dc76-4090-b595-1c0fe22592ee-kube-api-access-dbgd4\") pod \"ec312bbf-dc76-4090-b595-1c0fe22592ee\" (UID: \"ec312bbf-dc76-4090-b595-1c0fe22592ee\") " Dec 11 10:00:07 crc kubenswrapper[4881]: I1211 10:00:07.552762 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ec312bbf-dc76-4090-b595-1c0fe22592ee-config-volume\") pod \"ec312bbf-dc76-4090-b595-1c0fe22592ee\" (UID: \"ec312bbf-dc76-4090-b595-1c0fe22592ee\") " Dec 11 10:00:07 crc kubenswrapper[4881]: I1211 10:00:07.552842 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/ec312bbf-dc76-4090-b595-1c0fe22592ee-secret-volume\") pod \"ec312bbf-dc76-4090-b595-1c0fe22592ee\" (UID: \"ec312bbf-dc76-4090-b595-1c0fe22592ee\") " Dec 11 10:00:07 crc kubenswrapper[4881]: I1211 10:00:07.553604 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ec312bbf-dc76-4090-b595-1c0fe22592ee-config-volume" (OuterVolumeSpecName: "config-volume") pod "ec312bbf-dc76-4090-b595-1c0fe22592ee" (UID: "ec312bbf-dc76-4090-b595-1c0fe22592ee"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:00:07 crc kubenswrapper[4881]: I1211 10:00:07.571526 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ec312bbf-dc76-4090-b595-1c0fe22592ee-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "ec312bbf-dc76-4090-b595-1c0fe22592ee" (UID: "ec312bbf-dc76-4090-b595-1c0fe22592ee"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:00:07 crc kubenswrapper[4881]: I1211 10:00:07.571643 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ec312bbf-dc76-4090-b595-1c0fe22592ee-kube-api-access-dbgd4" (OuterVolumeSpecName: "kube-api-access-dbgd4") pod "ec312bbf-dc76-4090-b595-1c0fe22592ee" (UID: "ec312bbf-dc76-4090-b595-1c0fe22592ee"). InnerVolumeSpecName "kube-api-access-dbgd4". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:00:07 crc kubenswrapper[4881]: I1211 10:00:07.656438 4881 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/ec312bbf-dc76-4090-b595-1c0fe22592ee-secret-volume\") on node \"crc\" DevicePath \"\"" Dec 11 10:00:07 crc kubenswrapper[4881]: I1211 10:00:07.656478 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dbgd4\" (UniqueName: \"kubernetes.io/projected/ec312bbf-dc76-4090-b595-1c0fe22592ee-kube-api-access-dbgd4\") on node \"crc\" DevicePath \"\"" Dec 11 10:00:07 crc kubenswrapper[4881]: I1211 10:00:07.656488 4881 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ec312bbf-dc76-4090-b595-1c0fe22592ee-config-volume\") on node \"crc\" DevicePath \"\"" Dec 11 10:00:08 crc kubenswrapper[4881]: I1211 10:00:08.257780 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29424120-nkspv" event={"ID":"ec312bbf-dc76-4090-b595-1c0fe22592ee","Type":"ContainerDied","Data":"85f36c7d80802b73a67fc27c77325e3041633020eb06312e141d5376e36971f9"} Dec 11 10:00:08 crc kubenswrapper[4881]: I1211 10:00:08.257818 4881 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="85f36c7d80802b73a67fc27c77325e3041633020eb06312e141d5376e36971f9" Dec 11 10:00:08 crc kubenswrapper[4881]: I1211 10:00:08.257876 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29424120-nkspv" Dec 11 10:00:08 crc kubenswrapper[4881]: I1211 10:00:08.584388 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29424075-ntxg6"] Dec 11 10:00:08 crc kubenswrapper[4881]: I1211 10:00:08.595168 4881 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29424075-ntxg6"] Dec 11 10:00:09 crc kubenswrapper[4881]: I1211 10:00:09.020580 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4caeb70a-1169-4614-861a-5b7ec55986cc" path="/var/lib/kubelet/pods/4caeb70a-1169-4614-861a-5b7ec55986cc/volumes" Dec 11 10:00:13 crc kubenswrapper[4881]: I1211 10:00:13.411902 4881 scope.go:117] "RemoveContainer" containerID="8d743313d6835e3a6a7e88318cf902dadc4a8e6d7f290d31a14d0bdd3ee49f55" Dec 11 10:00:13 crc kubenswrapper[4881]: I1211 10:00:13.445518 4881 scope.go:117] "RemoveContainer" containerID="20c7b15e611b6e3704bf47f268bfa53889bc6096784ee217e63eed7c3f238452" Dec 11 10:00:54 crc kubenswrapper[4881]: I1211 10:00:54.863445 4881 generic.go:334] "Generic (PLEG): container finished" podID="6cd87b4f-9808-4c98-828d-ce4fdb1d1960" containerID="e245d9b6b5ced786e2aa27b689797e9c33be0f7dcadea5439aad8bd0215f3d62" exitCode=0 Dec 11 10:00:54 crc kubenswrapper[4881]: I1211 10:00:54.863524 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-6ndjj/crc-debug-h8w9r" event={"ID":"6cd87b4f-9808-4c98-828d-ce4fdb1d1960","Type":"ContainerDied","Data":"e245d9b6b5ced786e2aa27b689797e9c33be0f7dcadea5439aad8bd0215f3d62"} Dec 11 10:00:56 crc kubenswrapper[4881]: I1211 10:00:56.005638 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-6ndjj/crc-debug-h8w9r" Dec 11 10:00:56 crc kubenswrapper[4881]: I1211 10:00:56.042821 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-6ndjj/crc-debug-h8w9r"] Dec 11 10:00:56 crc kubenswrapper[4881]: I1211 10:00:56.052929 4881 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-6ndjj/crc-debug-h8w9r"] Dec 11 10:00:56 crc kubenswrapper[4881]: I1211 10:00:56.143218 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/6cd87b4f-9808-4c98-828d-ce4fdb1d1960-host\") pod \"6cd87b4f-9808-4c98-828d-ce4fdb1d1960\" (UID: \"6cd87b4f-9808-4c98-828d-ce4fdb1d1960\") " Dec 11 10:00:56 crc kubenswrapper[4881]: I1211 10:00:56.143463 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/6cd87b4f-9808-4c98-828d-ce4fdb1d1960-host" (OuterVolumeSpecName: "host") pod "6cd87b4f-9808-4c98-828d-ce4fdb1d1960" (UID: "6cd87b4f-9808-4c98-828d-ce4fdb1d1960"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 11 10:00:56 crc kubenswrapper[4881]: I1211 10:00:56.143494 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gp679\" (UniqueName: \"kubernetes.io/projected/6cd87b4f-9808-4c98-828d-ce4fdb1d1960-kube-api-access-gp679\") pod \"6cd87b4f-9808-4c98-828d-ce4fdb1d1960\" (UID: \"6cd87b4f-9808-4c98-828d-ce4fdb1d1960\") " Dec 11 10:00:56 crc kubenswrapper[4881]: I1211 10:00:56.144306 4881 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/6cd87b4f-9808-4c98-828d-ce4fdb1d1960-host\") on node \"crc\" DevicePath \"\"" Dec 11 10:00:56 crc kubenswrapper[4881]: I1211 10:00:56.153666 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6cd87b4f-9808-4c98-828d-ce4fdb1d1960-kube-api-access-gp679" (OuterVolumeSpecName: "kube-api-access-gp679") pod "6cd87b4f-9808-4c98-828d-ce4fdb1d1960" (UID: "6cd87b4f-9808-4c98-828d-ce4fdb1d1960"). InnerVolumeSpecName "kube-api-access-gp679". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:00:56 crc kubenswrapper[4881]: I1211 10:00:56.247081 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gp679\" (UniqueName: \"kubernetes.io/projected/6cd87b4f-9808-4c98-828d-ce4fdb1d1960-kube-api-access-gp679\") on node \"crc\" DevicePath \"\"" Dec 11 10:00:56 crc kubenswrapper[4881]: I1211 10:00:56.884941 4881 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1a57fd6f62b2369aac0afb5703b15327e7113f78b2b560228eda07c24a67db05" Dec 11 10:00:56 crc kubenswrapper[4881]: I1211 10:00:56.884967 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-6ndjj/crc-debug-h8w9r" Dec 11 10:00:57 crc kubenswrapper[4881]: I1211 10:00:57.018634 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6cd87b4f-9808-4c98-828d-ce4fdb1d1960" path="/var/lib/kubelet/pods/6cd87b4f-9808-4c98-828d-ce4fdb1d1960/volumes" Dec 11 10:00:57 crc kubenswrapper[4881]: I1211 10:00:57.240814 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-6ndjj/crc-debug-2gr5g"] Dec 11 10:00:57 crc kubenswrapper[4881]: E1211 10:00:57.242034 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6cd87b4f-9808-4c98-828d-ce4fdb1d1960" containerName="container-00" Dec 11 10:00:57 crc kubenswrapper[4881]: I1211 10:00:57.242057 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="6cd87b4f-9808-4c98-828d-ce4fdb1d1960" containerName="container-00" Dec 11 10:00:57 crc kubenswrapper[4881]: E1211 10:00:57.242102 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ec312bbf-dc76-4090-b595-1c0fe22592ee" containerName="collect-profiles" Dec 11 10:00:57 crc kubenswrapper[4881]: I1211 10:00:57.242109 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="ec312bbf-dc76-4090-b595-1c0fe22592ee" containerName="collect-profiles" Dec 11 10:00:57 crc kubenswrapper[4881]: I1211 10:00:57.242368 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="6cd87b4f-9808-4c98-828d-ce4fdb1d1960" containerName="container-00" Dec 11 10:00:57 crc kubenswrapper[4881]: I1211 10:00:57.242417 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="ec312bbf-dc76-4090-b595-1c0fe22592ee" containerName="collect-profiles" Dec 11 10:00:57 crc kubenswrapper[4881]: I1211 10:00:57.243130 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-6ndjj/crc-debug-2gr5g" Dec 11 10:00:57 crc kubenswrapper[4881]: I1211 10:00:57.373118 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6s2w9\" (UniqueName: \"kubernetes.io/projected/0399d042-2e43-4674-b9c7-ca762cd23fb4-kube-api-access-6s2w9\") pod \"crc-debug-2gr5g\" (UID: \"0399d042-2e43-4674-b9c7-ca762cd23fb4\") " pod="openshift-must-gather-6ndjj/crc-debug-2gr5g" Dec 11 10:00:57 crc kubenswrapper[4881]: I1211 10:00:57.373390 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/0399d042-2e43-4674-b9c7-ca762cd23fb4-host\") pod \"crc-debug-2gr5g\" (UID: \"0399d042-2e43-4674-b9c7-ca762cd23fb4\") " pod="openshift-must-gather-6ndjj/crc-debug-2gr5g" Dec 11 10:00:57 crc kubenswrapper[4881]: I1211 10:00:57.476806 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/0399d042-2e43-4674-b9c7-ca762cd23fb4-host\") pod \"crc-debug-2gr5g\" (UID: \"0399d042-2e43-4674-b9c7-ca762cd23fb4\") " pod="openshift-must-gather-6ndjj/crc-debug-2gr5g" Dec 11 10:00:57 crc kubenswrapper[4881]: I1211 10:00:57.476965 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/0399d042-2e43-4674-b9c7-ca762cd23fb4-host\") pod \"crc-debug-2gr5g\" (UID: \"0399d042-2e43-4674-b9c7-ca762cd23fb4\") " pod="openshift-must-gather-6ndjj/crc-debug-2gr5g" Dec 11 10:00:57 crc kubenswrapper[4881]: I1211 10:00:57.477323 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6s2w9\" (UniqueName: \"kubernetes.io/projected/0399d042-2e43-4674-b9c7-ca762cd23fb4-kube-api-access-6s2w9\") pod \"crc-debug-2gr5g\" (UID: \"0399d042-2e43-4674-b9c7-ca762cd23fb4\") " pod="openshift-must-gather-6ndjj/crc-debug-2gr5g" Dec 11 10:00:57 crc kubenswrapper[4881]: I1211 10:00:57.496513 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6s2w9\" (UniqueName: \"kubernetes.io/projected/0399d042-2e43-4674-b9c7-ca762cd23fb4-kube-api-access-6s2w9\") pod \"crc-debug-2gr5g\" (UID: \"0399d042-2e43-4674-b9c7-ca762cd23fb4\") " pod="openshift-must-gather-6ndjj/crc-debug-2gr5g" Dec 11 10:00:57 crc kubenswrapper[4881]: I1211 10:00:57.562013 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-6ndjj/crc-debug-2gr5g" Dec 11 10:00:57 crc kubenswrapper[4881]: W1211 10:00:57.597997 4881 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0399d042_2e43_4674_b9c7_ca762cd23fb4.slice/crio-2b61b4d2e49cf181ea9061efbf31c1de27f4393c9ee01f7d649e497cf2f17519 WatchSource:0}: Error finding container 2b61b4d2e49cf181ea9061efbf31c1de27f4393c9ee01f7d649e497cf2f17519: Status 404 returned error can't find the container with id 2b61b4d2e49cf181ea9061efbf31c1de27f4393c9ee01f7d649e497cf2f17519 Dec 11 10:00:57 crc kubenswrapper[4881]: I1211 10:00:57.896261 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-6ndjj/crc-debug-2gr5g" event={"ID":"0399d042-2e43-4674-b9c7-ca762cd23fb4","Type":"ContainerStarted","Data":"2b61b4d2e49cf181ea9061efbf31c1de27f4393c9ee01f7d649e497cf2f17519"} Dec 11 10:00:58 crc kubenswrapper[4881]: I1211 10:00:58.907040 4881 generic.go:334] "Generic (PLEG): container finished" podID="0399d042-2e43-4674-b9c7-ca762cd23fb4" containerID="58d0d316cd4cc921f5269a28c9e8243f103e11a9dd54777c42d900a6c589f5ab" exitCode=0 Dec 11 10:00:58 crc kubenswrapper[4881]: I1211 10:00:58.907137 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-6ndjj/crc-debug-2gr5g" event={"ID":"0399d042-2e43-4674-b9c7-ca762cd23fb4","Type":"ContainerDied","Data":"58d0d316cd4cc921f5269a28c9e8243f103e11a9dd54777c42d900a6c589f5ab"} Dec 11 10:01:00 crc kubenswrapper[4881]: I1211 10:01:00.053936 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-6ndjj/crc-debug-2gr5g" Dec 11 10:01:00 crc kubenswrapper[4881]: I1211 10:01:00.162346 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-cron-29424121-wd29n"] Dec 11 10:01:00 crc kubenswrapper[4881]: E1211 10:01:00.163127 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0399d042-2e43-4674-b9c7-ca762cd23fb4" containerName="container-00" Dec 11 10:01:00 crc kubenswrapper[4881]: I1211 10:01:00.163156 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="0399d042-2e43-4674-b9c7-ca762cd23fb4" containerName="container-00" Dec 11 10:01:00 crc kubenswrapper[4881]: I1211 10:01:00.163637 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="0399d042-2e43-4674-b9c7-ca762cd23fb4" containerName="container-00" Dec 11 10:01:00 crc kubenswrapper[4881]: I1211 10:01:00.165547 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29424121-wd29n" Dec 11 10:01:00 crc kubenswrapper[4881]: I1211 10:01:00.190161 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29424121-wd29n"] Dec 11 10:01:00 crc kubenswrapper[4881]: I1211 10:01:00.238174 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/0399d042-2e43-4674-b9c7-ca762cd23fb4-host\") pod \"0399d042-2e43-4674-b9c7-ca762cd23fb4\" (UID: \"0399d042-2e43-4674-b9c7-ca762cd23fb4\") " Dec 11 10:01:00 crc kubenswrapper[4881]: I1211 10:01:00.238240 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6s2w9\" (UniqueName: \"kubernetes.io/projected/0399d042-2e43-4674-b9c7-ca762cd23fb4-kube-api-access-6s2w9\") pod \"0399d042-2e43-4674-b9c7-ca762cd23fb4\" (UID: \"0399d042-2e43-4674-b9c7-ca762cd23fb4\") " Dec 11 10:01:00 crc kubenswrapper[4881]: I1211 10:01:00.238435 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/0399d042-2e43-4674-b9c7-ca762cd23fb4-host" (OuterVolumeSpecName: "host") pod "0399d042-2e43-4674-b9c7-ca762cd23fb4" (UID: "0399d042-2e43-4674-b9c7-ca762cd23fb4"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 11 10:01:00 crc kubenswrapper[4881]: I1211 10:01:00.239046 4881 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/0399d042-2e43-4674-b9c7-ca762cd23fb4-host\") on node \"crc\" DevicePath \"\"" Dec 11 10:01:00 crc kubenswrapper[4881]: I1211 10:01:00.272207 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0399d042-2e43-4674-b9c7-ca762cd23fb4-kube-api-access-6s2w9" (OuterVolumeSpecName: "kube-api-access-6s2w9") pod "0399d042-2e43-4674-b9c7-ca762cd23fb4" (UID: "0399d042-2e43-4674-b9c7-ca762cd23fb4"). InnerVolumeSpecName "kube-api-access-6s2w9". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:01:00 crc kubenswrapper[4881]: I1211 10:01:00.340626 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/b7f4b194-07cf-4a48-ab95-f0aece6d4576-fernet-keys\") pod \"keystone-cron-29424121-wd29n\" (UID: \"b7f4b194-07cf-4a48-ab95-f0aece6d4576\") " pod="openstack/keystone-cron-29424121-wd29n" Dec 11 10:01:00 crc kubenswrapper[4881]: I1211 10:01:00.340701 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-txsq7\" (UniqueName: \"kubernetes.io/projected/b7f4b194-07cf-4a48-ab95-f0aece6d4576-kube-api-access-txsq7\") pod \"keystone-cron-29424121-wd29n\" (UID: \"b7f4b194-07cf-4a48-ab95-f0aece6d4576\") " pod="openstack/keystone-cron-29424121-wd29n" Dec 11 10:01:00 crc kubenswrapper[4881]: I1211 10:01:00.340731 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b7f4b194-07cf-4a48-ab95-f0aece6d4576-combined-ca-bundle\") pod \"keystone-cron-29424121-wd29n\" (UID: \"b7f4b194-07cf-4a48-ab95-f0aece6d4576\") " pod="openstack/keystone-cron-29424121-wd29n" Dec 11 10:01:00 crc kubenswrapper[4881]: I1211 10:01:00.340826 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b7f4b194-07cf-4a48-ab95-f0aece6d4576-config-data\") pod \"keystone-cron-29424121-wd29n\" (UID: \"b7f4b194-07cf-4a48-ab95-f0aece6d4576\") " pod="openstack/keystone-cron-29424121-wd29n" Dec 11 10:01:00 crc kubenswrapper[4881]: I1211 10:01:00.340956 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6s2w9\" (UniqueName: \"kubernetes.io/projected/0399d042-2e43-4674-b9c7-ca762cd23fb4-kube-api-access-6s2w9\") on node \"crc\" DevicePath \"\"" Dec 11 10:01:00 crc kubenswrapper[4881]: I1211 10:01:00.442430 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b7f4b194-07cf-4a48-ab95-f0aece6d4576-config-data\") pod \"keystone-cron-29424121-wd29n\" (UID: \"b7f4b194-07cf-4a48-ab95-f0aece6d4576\") " pod="openstack/keystone-cron-29424121-wd29n" Dec 11 10:01:00 crc kubenswrapper[4881]: I1211 10:01:00.442626 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/b7f4b194-07cf-4a48-ab95-f0aece6d4576-fernet-keys\") pod \"keystone-cron-29424121-wd29n\" (UID: \"b7f4b194-07cf-4a48-ab95-f0aece6d4576\") " pod="openstack/keystone-cron-29424121-wd29n" Dec 11 10:01:00 crc kubenswrapper[4881]: I1211 10:01:00.442675 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-txsq7\" (UniqueName: \"kubernetes.io/projected/b7f4b194-07cf-4a48-ab95-f0aece6d4576-kube-api-access-txsq7\") pod \"keystone-cron-29424121-wd29n\" (UID: \"b7f4b194-07cf-4a48-ab95-f0aece6d4576\") " pod="openstack/keystone-cron-29424121-wd29n" Dec 11 10:01:00 crc kubenswrapper[4881]: I1211 10:01:00.442712 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b7f4b194-07cf-4a48-ab95-f0aece6d4576-combined-ca-bundle\") pod \"keystone-cron-29424121-wd29n\" (UID: \"b7f4b194-07cf-4a48-ab95-f0aece6d4576\") " pod="openstack/keystone-cron-29424121-wd29n" Dec 11 10:01:00 crc kubenswrapper[4881]: I1211 10:01:00.448567 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/b7f4b194-07cf-4a48-ab95-f0aece6d4576-fernet-keys\") pod \"keystone-cron-29424121-wd29n\" (UID: \"b7f4b194-07cf-4a48-ab95-f0aece6d4576\") " pod="openstack/keystone-cron-29424121-wd29n" Dec 11 10:01:00 crc kubenswrapper[4881]: I1211 10:01:00.448621 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b7f4b194-07cf-4a48-ab95-f0aece6d4576-config-data\") pod \"keystone-cron-29424121-wd29n\" (UID: \"b7f4b194-07cf-4a48-ab95-f0aece6d4576\") " pod="openstack/keystone-cron-29424121-wd29n" Dec 11 10:01:00 crc kubenswrapper[4881]: I1211 10:01:00.458859 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b7f4b194-07cf-4a48-ab95-f0aece6d4576-combined-ca-bundle\") pod \"keystone-cron-29424121-wd29n\" (UID: \"b7f4b194-07cf-4a48-ab95-f0aece6d4576\") " pod="openstack/keystone-cron-29424121-wd29n" Dec 11 10:01:00 crc kubenswrapper[4881]: I1211 10:01:00.459571 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-txsq7\" (UniqueName: \"kubernetes.io/projected/b7f4b194-07cf-4a48-ab95-f0aece6d4576-kube-api-access-txsq7\") pod \"keystone-cron-29424121-wd29n\" (UID: \"b7f4b194-07cf-4a48-ab95-f0aece6d4576\") " pod="openstack/keystone-cron-29424121-wd29n" Dec 11 10:01:00 crc kubenswrapper[4881]: I1211 10:01:00.500554 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29424121-wd29n" Dec 11 10:01:00 crc kubenswrapper[4881]: I1211 10:01:00.934507 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-6ndjj/crc-debug-2gr5g" event={"ID":"0399d042-2e43-4674-b9c7-ca762cd23fb4","Type":"ContainerDied","Data":"2b61b4d2e49cf181ea9061efbf31c1de27f4393c9ee01f7d649e497cf2f17519"} Dec 11 10:01:00 crc kubenswrapper[4881]: I1211 10:01:00.935199 4881 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2b61b4d2e49cf181ea9061efbf31c1de27f4393c9ee01f7d649e497cf2f17519" Dec 11 10:01:00 crc kubenswrapper[4881]: I1211 10:01:00.934550 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-6ndjj/crc-debug-2gr5g" Dec 11 10:01:01 crc kubenswrapper[4881]: I1211 10:01:01.046847 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29424121-wd29n"] Dec 11 10:01:01 crc kubenswrapper[4881]: W1211 10:01:01.052720 4881 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb7f4b194_07cf_4a48_ab95_f0aece6d4576.slice/crio-cf66fbb2032c38e7872c0f8eb0076198853cd1e7340bec6f4b7ec74bfe80ff40 WatchSource:0}: Error finding container cf66fbb2032c38e7872c0f8eb0076198853cd1e7340bec6f4b7ec74bfe80ff40: Status 404 returned error can't find the container with id cf66fbb2032c38e7872c0f8eb0076198853cd1e7340bec6f4b7ec74bfe80ff40 Dec 11 10:01:01 crc kubenswrapper[4881]: I1211 10:01:01.444771 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-6ndjj/crc-debug-2gr5g"] Dec 11 10:01:01 crc kubenswrapper[4881]: I1211 10:01:01.453697 4881 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-6ndjj/crc-debug-2gr5g"] Dec 11 10:01:01 crc kubenswrapper[4881]: I1211 10:01:01.948203 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29424121-wd29n" event={"ID":"b7f4b194-07cf-4a48-ab95-f0aece6d4576","Type":"ContainerStarted","Data":"9900f779cd57dbc4e1bc3c5ed7ce590001fe6e403611715db6dcda68e67c9d08"} Dec 11 10:01:01 crc kubenswrapper[4881]: I1211 10:01:01.948257 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29424121-wd29n" event={"ID":"b7f4b194-07cf-4a48-ab95-f0aece6d4576","Type":"ContainerStarted","Data":"cf66fbb2032c38e7872c0f8eb0076198853cd1e7340bec6f4b7ec74bfe80ff40"} Dec 11 10:01:01 crc kubenswrapper[4881]: I1211 10:01:01.973270 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-cron-29424121-wd29n" podStartSLOduration=1.9732510909999998 podStartE2EDuration="1.973251091s" podCreationTimestamp="2025-12-11 10:01:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 10:01:01.963505882 +0000 UTC m=+6310.340874599" watchObservedRunningTime="2025-12-11 10:01:01.973251091 +0000 UTC m=+6310.350619788" Dec 11 10:01:02 crc kubenswrapper[4881]: I1211 10:01:02.635045 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-6ndjj/crc-debug-l7w75"] Dec 11 10:01:02 crc kubenswrapper[4881]: I1211 10:01:02.637127 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-6ndjj/crc-debug-l7w75" Dec 11 10:01:02 crc kubenswrapper[4881]: I1211 10:01:02.806627 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tj5b8\" (UniqueName: \"kubernetes.io/projected/b2d415e3-40dd-443c-a9ba-8c7aae086b1a-kube-api-access-tj5b8\") pod \"crc-debug-l7w75\" (UID: \"b2d415e3-40dd-443c-a9ba-8c7aae086b1a\") " pod="openshift-must-gather-6ndjj/crc-debug-l7w75" Dec 11 10:01:02 crc kubenswrapper[4881]: I1211 10:01:02.806739 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/b2d415e3-40dd-443c-a9ba-8c7aae086b1a-host\") pod \"crc-debug-l7w75\" (UID: \"b2d415e3-40dd-443c-a9ba-8c7aae086b1a\") " pod="openshift-must-gather-6ndjj/crc-debug-l7w75" Dec 11 10:01:02 crc kubenswrapper[4881]: I1211 10:01:02.910272 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tj5b8\" (UniqueName: \"kubernetes.io/projected/b2d415e3-40dd-443c-a9ba-8c7aae086b1a-kube-api-access-tj5b8\") pod \"crc-debug-l7w75\" (UID: \"b2d415e3-40dd-443c-a9ba-8c7aae086b1a\") " pod="openshift-must-gather-6ndjj/crc-debug-l7w75" Dec 11 10:01:02 crc kubenswrapper[4881]: I1211 10:01:02.910353 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/b2d415e3-40dd-443c-a9ba-8c7aae086b1a-host\") pod \"crc-debug-l7w75\" (UID: \"b2d415e3-40dd-443c-a9ba-8c7aae086b1a\") " pod="openshift-must-gather-6ndjj/crc-debug-l7w75" Dec 11 10:01:02 crc kubenswrapper[4881]: I1211 10:01:02.910669 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/b2d415e3-40dd-443c-a9ba-8c7aae086b1a-host\") pod \"crc-debug-l7w75\" (UID: \"b2d415e3-40dd-443c-a9ba-8c7aae086b1a\") " pod="openshift-must-gather-6ndjj/crc-debug-l7w75" Dec 11 10:01:02 crc kubenswrapper[4881]: I1211 10:01:02.930917 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tj5b8\" (UniqueName: \"kubernetes.io/projected/b2d415e3-40dd-443c-a9ba-8c7aae086b1a-kube-api-access-tj5b8\") pod \"crc-debug-l7w75\" (UID: \"b2d415e3-40dd-443c-a9ba-8c7aae086b1a\") " pod="openshift-must-gather-6ndjj/crc-debug-l7w75" Dec 11 10:01:02 crc kubenswrapper[4881]: I1211 10:01:02.959725 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-6ndjj/crc-debug-l7w75" Dec 11 10:01:02 crc kubenswrapper[4881]: W1211 10:01:02.995821 4881 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb2d415e3_40dd_443c_a9ba_8c7aae086b1a.slice/crio-db4e8bfdde25ff0573d30961ae1f21c500e7927c07688b01030e9894dccdda0e WatchSource:0}: Error finding container db4e8bfdde25ff0573d30961ae1f21c500e7927c07688b01030e9894dccdda0e: Status 404 returned error can't find the container with id db4e8bfdde25ff0573d30961ae1f21c500e7927c07688b01030e9894dccdda0e Dec 11 10:01:03 crc kubenswrapper[4881]: I1211 10:01:03.026668 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0399d042-2e43-4674-b9c7-ca762cd23fb4" path="/var/lib/kubelet/pods/0399d042-2e43-4674-b9c7-ca762cd23fb4/volumes" Dec 11 10:01:03 crc kubenswrapper[4881]: I1211 10:01:03.969204 4881 generic.go:334] "Generic (PLEG): container finished" podID="b2d415e3-40dd-443c-a9ba-8c7aae086b1a" containerID="0426fa73eb3f0323e4350e3a0bfcd0ed4a5dda2f2c94599680b14c723b5e07ed" exitCode=0 Dec 11 10:01:03 crc kubenswrapper[4881]: I1211 10:01:03.969325 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-6ndjj/crc-debug-l7w75" event={"ID":"b2d415e3-40dd-443c-a9ba-8c7aae086b1a","Type":"ContainerDied","Data":"0426fa73eb3f0323e4350e3a0bfcd0ed4a5dda2f2c94599680b14c723b5e07ed"} Dec 11 10:01:03 crc kubenswrapper[4881]: I1211 10:01:03.969886 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-6ndjj/crc-debug-l7w75" event={"ID":"b2d415e3-40dd-443c-a9ba-8c7aae086b1a","Type":"ContainerStarted","Data":"db4e8bfdde25ff0573d30961ae1f21c500e7927c07688b01030e9894dccdda0e"} Dec 11 10:01:04 crc kubenswrapper[4881]: I1211 10:01:04.018697 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-6ndjj/crc-debug-l7w75"] Dec 11 10:01:04 crc kubenswrapper[4881]: I1211 10:01:04.032559 4881 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-6ndjj/crc-debug-l7w75"] Dec 11 10:01:04 crc kubenswrapper[4881]: I1211 10:01:04.981917 4881 generic.go:334] "Generic (PLEG): container finished" podID="b7f4b194-07cf-4a48-ab95-f0aece6d4576" containerID="9900f779cd57dbc4e1bc3c5ed7ce590001fe6e403611715db6dcda68e67c9d08" exitCode=0 Dec 11 10:01:04 crc kubenswrapper[4881]: I1211 10:01:04.982160 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29424121-wd29n" event={"ID":"b7f4b194-07cf-4a48-ab95-f0aece6d4576","Type":"ContainerDied","Data":"9900f779cd57dbc4e1bc3c5ed7ce590001fe6e403611715db6dcda68e67c9d08"} Dec 11 10:01:05 crc kubenswrapper[4881]: I1211 10:01:05.131629 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-6ndjj/crc-debug-l7w75" Dec 11 10:01:05 crc kubenswrapper[4881]: I1211 10:01:05.171873 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/b2d415e3-40dd-443c-a9ba-8c7aae086b1a-host\") pod \"b2d415e3-40dd-443c-a9ba-8c7aae086b1a\" (UID: \"b2d415e3-40dd-443c-a9ba-8c7aae086b1a\") " Dec 11 10:01:05 crc kubenswrapper[4881]: I1211 10:01:05.171972 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tj5b8\" (UniqueName: \"kubernetes.io/projected/b2d415e3-40dd-443c-a9ba-8c7aae086b1a-kube-api-access-tj5b8\") pod \"b2d415e3-40dd-443c-a9ba-8c7aae086b1a\" (UID: \"b2d415e3-40dd-443c-a9ba-8c7aae086b1a\") " Dec 11 10:01:05 crc kubenswrapper[4881]: I1211 10:01:05.171999 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/b2d415e3-40dd-443c-a9ba-8c7aae086b1a-host" (OuterVolumeSpecName: "host") pod "b2d415e3-40dd-443c-a9ba-8c7aae086b1a" (UID: "b2d415e3-40dd-443c-a9ba-8c7aae086b1a"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 11 10:01:05 crc kubenswrapper[4881]: I1211 10:01:05.172938 4881 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/b2d415e3-40dd-443c-a9ba-8c7aae086b1a-host\") on node \"crc\" DevicePath \"\"" Dec 11 10:01:05 crc kubenswrapper[4881]: I1211 10:01:05.178129 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b2d415e3-40dd-443c-a9ba-8c7aae086b1a-kube-api-access-tj5b8" (OuterVolumeSpecName: "kube-api-access-tj5b8") pod "b2d415e3-40dd-443c-a9ba-8c7aae086b1a" (UID: "b2d415e3-40dd-443c-a9ba-8c7aae086b1a"). InnerVolumeSpecName "kube-api-access-tj5b8". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:01:05 crc kubenswrapper[4881]: I1211 10:01:05.274858 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tj5b8\" (UniqueName: \"kubernetes.io/projected/b2d415e3-40dd-443c-a9ba-8c7aae086b1a-kube-api-access-tj5b8\") on node \"crc\" DevicePath \"\"" Dec 11 10:01:05 crc kubenswrapper[4881]: I1211 10:01:05.999532 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-6ndjj/crc-debug-l7w75" Dec 11 10:01:06 crc kubenswrapper[4881]: I1211 10:01:05.999533 4881 scope.go:117] "RemoveContainer" containerID="0426fa73eb3f0323e4350e3a0bfcd0ed4a5dda2f2c94599680b14c723b5e07ed" Dec 11 10:01:06 crc kubenswrapper[4881]: I1211 10:01:06.415804 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29424121-wd29n" Dec 11 10:01:06 crc kubenswrapper[4881]: I1211 10:01:06.520070 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/b7f4b194-07cf-4a48-ab95-f0aece6d4576-fernet-keys\") pod \"b7f4b194-07cf-4a48-ab95-f0aece6d4576\" (UID: \"b7f4b194-07cf-4a48-ab95-f0aece6d4576\") " Dec 11 10:01:06 crc kubenswrapper[4881]: I1211 10:01:06.520233 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b7f4b194-07cf-4a48-ab95-f0aece6d4576-combined-ca-bundle\") pod \"b7f4b194-07cf-4a48-ab95-f0aece6d4576\" (UID: \"b7f4b194-07cf-4a48-ab95-f0aece6d4576\") " Dec 11 10:01:06 crc kubenswrapper[4881]: I1211 10:01:06.520298 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b7f4b194-07cf-4a48-ab95-f0aece6d4576-config-data\") pod \"b7f4b194-07cf-4a48-ab95-f0aece6d4576\" (UID: \"b7f4b194-07cf-4a48-ab95-f0aece6d4576\") " Dec 11 10:01:06 crc kubenswrapper[4881]: I1211 10:01:06.520363 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-txsq7\" (UniqueName: \"kubernetes.io/projected/b7f4b194-07cf-4a48-ab95-f0aece6d4576-kube-api-access-txsq7\") pod \"b7f4b194-07cf-4a48-ab95-f0aece6d4576\" (UID: \"b7f4b194-07cf-4a48-ab95-f0aece6d4576\") " Dec 11 10:01:06 crc kubenswrapper[4881]: I1211 10:01:06.526719 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b7f4b194-07cf-4a48-ab95-f0aece6d4576-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "b7f4b194-07cf-4a48-ab95-f0aece6d4576" (UID: "b7f4b194-07cf-4a48-ab95-f0aece6d4576"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:01:06 crc kubenswrapper[4881]: I1211 10:01:06.527348 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b7f4b194-07cf-4a48-ab95-f0aece6d4576-kube-api-access-txsq7" (OuterVolumeSpecName: "kube-api-access-txsq7") pod "b7f4b194-07cf-4a48-ab95-f0aece6d4576" (UID: "b7f4b194-07cf-4a48-ab95-f0aece6d4576"). InnerVolumeSpecName "kube-api-access-txsq7". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:01:06 crc kubenswrapper[4881]: I1211 10:01:06.557213 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b7f4b194-07cf-4a48-ab95-f0aece6d4576-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b7f4b194-07cf-4a48-ab95-f0aece6d4576" (UID: "b7f4b194-07cf-4a48-ab95-f0aece6d4576"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:01:06 crc kubenswrapper[4881]: I1211 10:01:06.597744 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b7f4b194-07cf-4a48-ab95-f0aece6d4576-config-data" (OuterVolumeSpecName: "config-data") pod "b7f4b194-07cf-4a48-ab95-f0aece6d4576" (UID: "b7f4b194-07cf-4a48-ab95-f0aece6d4576"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:01:06 crc kubenswrapper[4881]: I1211 10:01:06.624128 4881 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/b7f4b194-07cf-4a48-ab95-f0aece6d4576-fernet-keys\") on node \"crc\" DevicePath \"\"" Dec 11 10:01:06 crc kubenswrapper[4881]: I1211 10:01:06.624504 4881 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b7f4b194-07cf-4a48-ab95-f0aece6d4576-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 11 10:01:06 crc kubenswrapper[4881]: I1211 10:01:06.624601 4881 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b7f4b194-07cf-4a48-ab95-f0aece6d4576-config-data\") on node \"crc\" DevicePath \"\"" Dec 11 10:01:06 crc kubenswrapper[4881]: I1211 10:01:06.624683 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-txsq7\" (UniqueName: \"kubernetes.io/projected/b7f4b194-07cf-4a48-ab95-f0aece6d4576-kube-api-access-txsq7\") on node \"crc\" DevicePath \"\"" Dec 11 10:01:07 crc kubenswrapper[4881]: I1211 10:01:07.019216 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29424121-wd29n" Dec 11 10:01:07 crc kubenswrapper[4881]: I1211 10:01:07.026249 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b2d415e3-40dd-443c-a9ba-8c7aae086b1a" path="/var/lib/kubelet/pods/b2d415e3-40dd-443c-a9ba-8c7aae086b1a/volumes" Dec 11 10:01:07 crc kubenswrapper[4881]: I1211 10:01:07.027613 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29424121-wd29n" event={"ID":"b7f4b194-07cf-4a48-ab95-f0aece6d4576","Type":"ContainerDied","Data":"cf66fbb2032c38e7872c0f8eb0076198853cd1e7340bec6f4b7ec74bfe80ff40"} Dec 11 10:01:07 crc kubenswrapper[4881]: I1211 10:01:07.027651 4881 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="cf66fbb2032c38e7872c0f8eb0076198853cd1e7340bec6f4b7ec74bfe80ff40" Dec 11 10:01:13 crc kubenswrapper[4881]: I1211 10:01:13.587319 4881 scope.go:117] "RemoveContainer" containerID="4c1403fd15e2a6be22fc90609ab4a47f5e7549cd45b4955509a51a5f1752d1c4" Dec 11 10:01:13 crc kubenswrapper[4881]: I1211 10:01:13.623064 4881 scope.go:117] "RemoveContainer" containerID="440db367b56a84c94950929a05a3bf53be243b40913cf0c7d07121a76a385ba7" Dec 11 10:01:29 crc kubenswrapper[4881]: I1211 10:01:29.397538 4881 patch_prober.go:28] interesting pod/machine-config-daemon-z9nnh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 11 10:01:29 crc kubenswrapper[4881]: I1211 10:01:29.398126 4881 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 11 10:01:30 crc kubenswrapper[4881]: I1211 10:01:30.173745 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_aodh-0_e25df4be-6d20-469b-999e-ae4ffe346be8/aodh-api/0.log" Dec 11 10:01:30 crc kubenswrapper[4881]: I1211 10:01:30.444599 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_aodh-0_e25df4be-6d20-469b-999e-ae4ffe346be8/aodh-listener/0.log" Dec 11 10:01:30 crc kubenswrapper[4881]: I1211 10:01:30.469146 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_aodh-0_e25df4be-6d20-469b-999e-ae4ffe346be8/aodh-notifier/0.log" Dec 11 10:01:30 crc kubenswrapper[4881]: I1211 10:01:30.501143 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_aodh-0_e25df4be-6d20-469b-999e-ae4ffe346be8/aodh-evaluator/0.log" Dec 11 10:01:30 crc kubenswrapper[4881]: I1211 10:01:30.685315 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-api-67b8f6bb8b-gk4v6_24358cec-f24b-4eeb-ad37-069245596b56/barbican-api/0.log" Dec 11 10:01:30 crc kubenswrapper[4881]: I1211 10:01:30.710792 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-api-67b8f6bb8b-gk4v6_24358cec-f24b-4eeb-ad37-069245596b56/barbican-api-log/0.log" Dec 11 10:01:30 crc kubenswrapper[4881]: I1211 10:01:30.976302 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-keystone-listener-5766655fb4-qmcpd_b3f6375d-3379-4a1a-b875-286687315947/barbican-keystone-listener/0.log" Dec 11 10:01:31 crc kubenswrapper[4881]: I1211 10:01:31.103548 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-keystone-listener-5766655fb4-qmcpd_b3f6375d-3379-4a1a-b875-286687315947/barbican-keystone-listener-log/0.log" Dec 11 10:01:31 crc kubenswrapper[4881]: I1211 10:01:31.169372 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-worker-9778dfbb5-mls2j_0fec4cf8-f794-4f69-9645-38b0dd1ef593/barbican-worker/0.log" Dec 11 10:01:31 crc kubenswrapper[4881]: I1211 10:01:31.301319 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-worker-9778dfbb5-mls2j_0fec4cf8-f794-4f69-9645-38b0dd1ef593/barbican-worker-log/0.log" Dec 11 10:01:31 crc kubenswrapper[4881]: I1211 10:01:31.432616 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_bootstrap-edpm-deployment-openstack-edpm-ipam-4n2kr_e741b94a-ed71-4819-ba06-943aa25aaaf8/bootstrap-edpm-deployment-openstack-edpm-ipam/0.log" Dec 11 10:01:31 crc kubenswrapper[4881]: I1211 10:01:31.617314 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_b51fa237-35ec-47d6-b61d-c3e50dc8450f/ceilometer-notification-agent/0.log" Dec 11 10:01:31 crc kubenswrapper[4881]: I1211 10:01:31.679362 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_b51fa237-35ec-47d6-b61d-c3e50dc8450f/ceilometer-central-agent/0.log" Dec 11 10:01:31 crc kubenswrapper[4881]: I1211 10:01:31.732098 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_b51fa237-35ec-47d6-b61d-c3e50dc8450f/proxy-httpd/0.log" Dec 11 10:01:31 crc kubenswrapper[4881]: I1211 10:01:31.855373 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_b51fa237-35ec-47d6-b61d-c3e50dc8450f/sg-core/0.log" Dec 11 10:01:32 crc kubenswrapper[4881]: I1211 10:01:32.032883 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-api-0_98c33c2d-b3e5-450d-8c52-544acac89c74/cinder-api-log/0.log" Dec 11 10:01:32 crc kubenswrapper[4881]: I1211 10:01:32.091644 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-api-0_98c33c2d-b3e5-450d-8c52-544acac89c74/cinder-api/0.log" Dec 11 10:01:32 crc kubenswrapper[4881]: I1211 10:01:32.400954 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-scheduler-0_e9aa88e0-71a6-40a0-92ec-88084b425df9/probe/0.log" Dec 11 10:01:32 crc kubenswrapper[4881]: I1211 10:01:32.443637 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-scheduler-0_e9aa88e0-71a6-40a0-92ec-88084b425df9/cinder-scheduler/0.log" Dec 11 10:01:32 crc kubenswrapper[4881]: I1211 10:01:32.537543 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_configure-network-edpm-deployment-openstack-edpm-ipam-6mhvp_7dd4872c-380b-4dcc-bd46-ad6a624a2d34/configure-network-edpm-deployment-openstack-edpm-ipam/0.log" Dec 11 10:01:33 crc kubenswrapper[4881]: I1211 10:01:33.031206 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_configure-os-edpm-deployment-openstack-edpm-ipam-98j7p_97426780-cfa1-43ea-9cba-e4268c17b4c3/configure-os-edpm-deployment-openstack-edpm-ipam/0.log" Dec 11 10:01:33 crc kubenswrapper[4881]: I1211 10:01:33.159535 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-bb85b8995-xpw6s_ff97623d-cd72-4130-b08b-aa41fb1f3e55/init/0.log" Dec 11 10:01:33 crc kubenswrapper[4881]: I1211 10:01:33.437019 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_download-cache-edpm-deployment-openstack-edpm-ipam-nrjgn_532ceac4-3c2d-4d4a-900f-498fa41192b1/download-cache-edpm-deployment-openstack-edpm-ipam/0.log" Dec 11 10:01:33 crc kubenswrapper[4881]: I1211 10:01:33.438290 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-bb85b8995-xpw6s_ff97623d-cd72-4130-b08b-aa41fb1f3e55/init/0.log" Dec 11 10:01:33 crc kubenswrapper[4881]: I1211 10:01:33.447495 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-bb85b8995-xpw6s_ff97623d-cd72-4130-b08b-aa41fb1f3e55/dnsmasq-dns/0.log" Dec 11 10:01:33 crc kubenswrapper[4881]: I1211 10:01:33.697809 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-external-api-0_03bd1873-0976-4da1-a4f0-4bc1ab183cda/glance-httpd/0.log" Dec 11 10:01:33 crc kubenswrapper[4881]: I1211 10:01:33.806802 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-external-api-0_03bd1873-0976-4da1-a4f0-4bc1ab183cda/glance-log/0.log" Dec 11 10:01:33 crc kubenswrapper[4881]: I1211 10:01:33.921906 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-internal-api-0_4cde142d-aa3a-4c3b-9e63-efcbce032089/glance-httpd/0.log" Dec 11 10:01:33 crc kubenswrapper[4881]: I1211 10:01:33.965501 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-internal-api-0_4cde142d-aa3a-4c3b-9e63-efcbce032089/glance-log/0.log" Dec 11 10:01:34 crc kubenswrapper[4881]: I1211 10:01:34.667616 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_heat-engine-559447f984-4bfr5_6b4ff921-94ce-4083-ad5d-783a59c7fb4d/heat-engine/0.log" Dec 11 10:01:34 crc kubenswrapper[4881]: I1211 10:01:34.780433 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_install-certs-edpm-deployment-openstack-edpm-ipam-pvld9_a4c5efad-5566-4a8d-85d8-c897f04fcb46/install-certs-edpm-deployment-openstack-edpm-ipam/0.log" Dec 11 10:01:34 crc kubenswrapper[4881]: I1211 10:01:34.914760 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_heat-api-77bc5fff7b-clttx_916eca8d-ca13-4db2-a350-b39a66bdee84/heat-api/0.log" Dec 11 10:01:35 crc kubenswrapper[4881]: I1211 10:01:35.047765 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_heat-cfnapi-5f8f5d98d5-xmfmx_059dc22b-b46b-482a-9a29-ded125bc4dac/heat-cfnapi/0.log" Dec 11 10:01:35 crc kubenswrapper[4881]: I1211 10:01:35.121968 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_install-os-edpm-deployment-openstack-edpm-ipam-p978t_482d67c1-02c6-4526-99fb-2bc546471c4d/install-os-edpm-deployment-openstack-edpm-ipam/0.log" Dec 11 10:01:35 crc kubenswrapper[4881]: I1211 10:01:35.409676 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-cron-29424061-884r9_5cfd9f27-8ab3-4f72-8fb8-fa1b11a82b7a/keystone-cron/0.log" Dec 11 10:01:35 crc kubenswrapper[4881]: I1211 10:01:35.442111 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-cron-29424121-wd29n_b7f4b194-07cf-4a48-ab95-f0aece6d4576/keystone-cron/0.log" Dec 11 10:01:35 crc kubenswrapper[4881]: I1211 10:01:35.703997 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-7fbbb6db6c-bqwjn_cae51d9b-e997-4228-af25-872a6e16df8d/keystone-api/0.log" Dec 11 10:01:35 crc kubenswrapper[4881]: I1211 10:01:35.704314 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_kube-state-metrics-0_dad35c24-846e-4c89-aa50-20ccea9fd132/kube-state-metrics/0.log" Dec 11 10:01:35 crc kubenswrapper[4881]: I1211 10:01:35.772119 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_libvirt-edpm-deployment-openstack-edpm-ipam-xdlgc_1ad81113-10d1-4110-81ad-abd39146b84c/libvirt-edpm-deployment-openstack-edpm-ipam/0.log" Dec 11 10:01:35 crc kubenswrapper[4881]: I1211 10:01:35.948268 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_logging-edpm-deployment-openstack-edpm-ipam-wqjnk_7afa083c-c63d-4f07-9a8f-15b00a918860/logging-edpm-deployment-openstack-edpm-ipam/0.log" Dec 11 10:01:36 crc kubenswrapper[4881]: I1211 10:01:36.222761 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_mysqld-exporter-0_598ba082-c5c4-4dc3-b4ec-5db6677fdb61/mysqld-exporter/0.log" Dec 11 10:01:36 crc kubenswrapper[4881]: I1211 10:01:36.514455 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-6986b4b8b9-dlx84_8edd456d-09d4-46fc-97ef-68c44cb5320c/neutron-httpd/0.log" Dec 11 10:01:36 crc kubenswrapper[4881]: I1211 10:01:36.884769 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-metadata-edpm-deployment-openstack-edpm-ipam-tkvg4_0a91cc42-4d2c-4527-81b9-7bfe0432f4f4/neutron-metadata-edpm-deployment-openstack-edpm-ipam/0.log" Dec 11 10:01:37 crc kubenswrapper[4881]: I1211 10:01:37.083492 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-6986b4b8b9-dlx84_8edd456d-09d4-46fc-97ef-68c44cb5320c/neutron-api/0.log" Dec 11 10:01:37 crc kubenswrapper[4881]: I1211 10:01:37.595871 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell0-conductor-0_5bbd6724-7a9f-4aac-8ca7-199f8cba6223/nova-cell0-conductor-conductor/0.log" Dec 11 10:01:38 crc kubenswrapper[4881]: I1211 10:01:38.041957 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-api-0_a461b235-8929-4c28-a4bc-fcc40fe9ede9/nova-api-log/0.log" Dec 11 10:01:38 crc kubenswrapper[4881]: I1211 10:01:38.146571 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-conductor-0_7b6eaf58-bd08-44d7-bc98-0b11d1cea0b3/nova-cell1-conductor-conductor/0.log" Dec 11 10:01:38 crc kubenswrapper[4881]: I1211 10:01:38.356917 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-novncproxy-0_bf7c31d0-4b0e-4161-9ed5-3dc62c4388b9/nova-cell1-novncproxy-novncproxy/0.log" Dec 11 10:01:38 crc kubenswrapper[4881]: I1211 10:01:38.528916 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-edpm-deployment-openstack-edpm-ipam-hq67p_84b496b0-b36c-4ece-ba2d-e73423d502cd/nova-edpm-deployment-openstack-edpm-ipam/0.log" Dec 11 10:01:38 crc kubenswrapper[4881]: I1211 10:01:38.793225 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-api-0_a461b235-8929-4c28-a4bc-fcc40fe9ede9/nova-api-api/0.log" Dec 11 10:01:38 crc kubenswrapper[4881]: I1211 10:01:38.808685 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-metadata-0_af5edf02-ece3-42cd-b5da-a84f734d2505/nova-metadata-log/0.log" Dec 11 10:01:39 crc kubenswrapper[4881]: I1211 10:01:39.388106 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-scheduler-0_b12d1174-33a2-4075-8cc6-bd591d290563/nova-scheduler-scheduler/0.log" Dec 11 10:01:39 crc kubenswrapper[4881]: I1211 10:01:39.392169 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_70402eec-968d-4ceb-b259-5e2508ee21a0/mysql-bootstrap/0.log" Dec 11 10:01:39 crc kubenswrapper[4881]: I1211 10:01:39.502896 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_70402eec-968d-4ceb-b259-5e2508ee21a0/mysql-bootstrap/0.log" Dec 11 10:01:39 crc kubenswrapper[4881]: I1211 10:01:39.641392 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_70402eec-968d-4ceb-b259-5e2508ee21a0/galera/0.log" Dec 11 10:01:39 crc kubenswrapper[4881]: I1211 10:01:39.787592 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_7a825abb-23ec-4f51-940d-2500da233e14/mysql-bootstrap/0.log" Dec 11 10:01:40 crc kubenswrapper[4881]: I1211 10:01:40.167119 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_7a825abb-23ec-4f51-940d-2500da233e14/mysql-bootstrap/0.log" Dec 11 10:01:40 crc kubenswrapper[4881]: I1211 10:01:40.203766 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_7a825abb-23ec-4f51-940d-2500da233e14/galera/0.log" Dec 11 10:01:40 crc kubenswrapper[4881]: I1211 10:01:40.516119 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstackclient_5e8c400d-4f6c-4e16-8ed3-45d19b56a0bf/openstackclient/0.log" Dec 11 10:01:41 crc kubenswrapper[4881]: I1211 10:01:41.021784 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-metrics-rmmgk_cec096de-5459-4769-9e87-9a3f54d3e8dc/openstack-network-exporter/0.log" Dec 11 10:01:41 crc kubenswrapper[4881]: I1211 10:01:41.171201 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-5gqh8_ede1ec9d-4207-4a9c-ba57-3f2037f68632/ovsdb-server-init/0.log" Dec 11 10:01:41 crc kubenswrapper[4881]: I1211 10:01:41.450396 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-5gqh8_ede1ec9d-4207-4a9c-ba57-3f2037f68632/ovsdb-server-init/0.log" Dec 11 10:01:41 crc kubenswrapper[4881]: I1211 10:01:41.503457 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-5gqh8_ede1ec9d-4207-4a9c-ba57-3f2037f68632/ovs-vswitchd/0.log" Dec 11 10:01:41 crc kubenswrapper[4881]: I1211 10:01:41.508404 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-5gqh8_ede1ec9d-4207-4a9c-ba57-3f2037f68632/ovsdb-server/0.log" Dec 11 10:01:41 crc kubenswrapper[4881]: I1211 10:01:41.748426 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-xfltd_49d6015f-9f76-4e77-821e-2a11887e497c/ovn-controller/0.log" Dec 11 10:01:41 crc kubenswrapper[4881]: I1211 10:01:41.810643 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-metadata-0_af5edf02-ece3-42cd-b5da-a84f734d2505/nova-metadata-metadata/0.log" Dec 11 10:01:42 crc kubenswrapper[4881]: I1211 10:01:42.040180 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_ac1565ce-1297-4689-b199-d88c339feb68/openstack-network-exporter/0.log" Dec 11 10:01:42 crc kubenswrapper[4881]: I1211 10:01:42.080166 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-edpm-deployment-openstack-edpm-ipam-tvdms_9b70ae00-542c-47d9-b985-5fc2433218a5/ovn-edpm-deployment-openstack-edpm-ipam/0.log" Dec 11 10:01:42 crc kubenswrapper[4881]: I1211 10:01:42.175677 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_ac1565ce-1297-4689-b199-d88c339feb68/ovn-northd/0.log" Dec 11 10:01:42 crc kubenswrapper[4881]: I1211 10:01:42.317146 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_7f0aa090-3aac-4da8-9efa-a31a7b3b130f/openstack-network-exporter/0.log" Dec 11 10:01:42 crc kubenswrapper[4881]: I1211 10:01:42.415412 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_7f0aa090-3aac-4da8-9efa-a31a7b3b130f/ovsdbserver-nb/0.log" Dec 11 10:01:42 crc kubenswrapper[4881]: I1211 10:01:42.661266 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_b9b67c1c-0e11-4c19-8d1f-6c046375659c/ovsdbserver-sb/0.log" Dec 11 10:01:42 crc kubenswrapper[4881]: I1211 10:01:42.688727 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_b9b67c1c-0e11-4c19-8d1f-6c046375659c/openstack-network-exporter/0.log" Dec 11 10:01:43 crc kubenswrapper[4881]: I1211 10:01:43.009896 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_placement-55fbb6c694-gw7p4_30f691dc-faf6-411b-8cb8-db57047199b0/placement-api/0.log" Dec 11 10:01:43 crc kubenswrapper[4881]: I1211 10:01:43.027751 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_b515a685-da3e-4d92-a8e5-60561e9de83f/init-config-reloader/0.log" Dec 11 10:01:43 crc kubenswrapper[4881]: I1211 10:01:43.062834 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_placement-55fbb6c694-gw7p4_30f691dc-faf6-411b-8cb8-db57047199b0/placement-log/0.log" Dec 11 10:01:43 crc kubenswrapper[4881]: I1211 10:01:43.224995 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_b515a685-da3e-4d92-a8e5-60561e9de83f/config-reloader/0.log" Dec 11 10:01:43 crc kubenswrapper[4881]: I1211 10:01:43.300663 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_b515a685-da3e-4d92-a8e5-60561e9de83f/thanos-sidecar/0.log" Dec 11 10:01:43 crc kubenswrapper[4881]: I1211 10:01:43.309325 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_b515a685-da3e-4d92-a8e5-60561e9de83f/prometheus/0.log" Dec 11 10:01:43 crc kubenswrapper[4881]: I1211 10:01:43.315865 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_b515a685-da3e-4d92-a8e5-60561e9de83f/init-config-reloader/0.log" Dec 11 10:01:43 crc kubenswrapper[4881]: I1211 10:01:43.570868 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_acc39512-3a6a-4e4c-a2a2-a13ad13b11f0/setup-container/0.log" Dec 11 10:01:43 crc kubenswrapper[4881]: I1211 10:01:43.769673 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_d52ebbc7-03f0-4f73-827b-8f8066e83146/setup-container/0.log" Dec 11 10:01:43 crc kubenswrapper[4881]: I1211 10:01:43.805701 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_acc39512-3a6a-4e4c-a2a2-a13ad13b11f0/setup-container/0.log" Dec 11 10:01:43 crc kubenswrapper[4881]: I1211 10:01:43.846847 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_acc39512-3a6a-4e4c-a2a2-a13ad13b11f0/rabbitmq/0.log" Dec 11 10:01:44 crc kubenswrapper[4881]: I1211 10:01:44.064801 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_d52ebbc7-03f0-4f73-827b-8f8066e83146/rabbitmq/0.log" Dec 11 10:01:44 crc kubenswrapper[4881]: I1211 10:01:44.125869 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_d52ebbc7-03f0-4f73-827b-8f8066e83146/setup-container/0.log" Dec 11 10:01:44 crc kubenswrapper[4881]: I1211 10:01:44.222625 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_reboot-os-edpm-deployment-openstack-edpm-ipam-bsvm6_55357646-b980-4023-b886-5365ec6fd85f/reboot-os-edpm-deployment-openstack-edpm-ipam/0.log" Dec 11 10:01:44 crc kubenswrapper[4881]: I1211 10:01:44.336329 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_redhat-edpm-deployment-openstack-edpm-ipam-bfzwp_5c0b2b12-1f8f-4bd7-a3ea-3a078f43f178/redhat-edpm-deployment-openstack-edpm-ipam/0.log" Dec 11 10:01:44 crc kubenswrapper[4881]: I1211 10:01:44.728353 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_repo-setup-edpm-deployment-openstack-edpm-ipam-t24nn_cde02f48-eb61-4053-b321-3ab152bafeaa/repo-setup-edpm-deployment-openstack-edpm-ipam/0.log" Dec 11 10:01:44 crc kubenswrapper[4881]: I1211 10:01:44.944987 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_run-os-edpm-deployment-openstack-edpm-ipam-xjxv2_0b72500e-98a3-4e2a-895b-422da6f81a8c/run-os-edpm-deployment-openstack-edpm-ipam/0.log" Dec 11 10:01:45 crc kubenswrapper[4881]: I1211 10:01:45.067223 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ssh-known-hosts-edpm-deployment-f79qk_1e3366c8-0354-47fb-af1a-f579ed757f2b/ssh-known-hosts-edpm-deployment/0.log" Dec 11 10:01:45 crc kubenswrapper[4881]: I1211 10:01:45.376548 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-proxy-5f6c547b6c-rjk9h_910014af-7b9e-49b8-99e3-b80a15d72faf/proxy-server/0.log" Dec 11 10:01:45 crc kubenswrapper[4881]: I1211 10:01:45.430870 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-ring-rebalance-qt8lj_d40e3cbd-c017-4b42-94ee-dea2565d55a3/swift-ring-rebalance/0.log" Dec 11 10:01:45 crc kubenswrapper[4881]: I1211 10:01:45.486392 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-proxy-5f6c547b6c-rjk9h_910014af-7b9e-49b8-99e3-b80a15d72faf/proxy-httpd/0.log" Dec 11 10:01:45 crc kubenswrapper[4881]: I1211 10:01:45.653202 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_1af483c2-ea3f-45cd-971d-797c06f5c6e2/account-auditor/0.log" Dec 11 10:01:45 crc kubenswrapper[4881]: I1211 10:01:45.688177 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_1af483c2-ea3f-45cd-971d-797c06f5c6e2/account-reaper/0.log" Dec 11 10:01:45 crc kubenswrapper[4881]: I1211 10:01:45.801405 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_1af483c2-ea3f-45cd-971d-797c06f5c6e2/account-replicator/0.log" Dec 11 10:01:45 crc kubenswrapper[4881]: I1211 10:01:45.898323 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_1af483c2-ea3f-45cd-971d-797c06f5c6e2/account-server/0.log" Dec 11 10:01:45 crc kubenswrapper[4881]: I1211 10:01:45.952805 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_1af483c2-ea3f-45cd-971d-797c06f5c6e2/container-auditor/0.log" Dec 11 10:01:45 crc kubenswrapper[4881]: I1211 10:01:45.959307 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_1af483c2-ea3f-45cd-971d-797c06f5c6e2/container-replicator/0.log" Dec 11 10:01:46 crc kubenswrapper[4881]: I1211 10:01:46.067386 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_1af483c2-ea3f-45cd-971d-797c06f5c6e2/container-server/0.log" Dec 11 10:01:46 crc kubenswrapper[4881]: I1211 10:01:46.110695 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_1af483c2-ea3f-45cd-971d-797c06f5c6e2/container-updater/0.log" Dec 11 10:01:46 crc kubenswrapper[4881]: I1211 10:01:46.269993 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_1af483c2-ea3f-45cd-971d-797c06f5c6e2/object-auditor/0.log" Dec 11 10:01:46 crc kubenswrapper[4881]: I1211 10:01:46.291564 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_1af483c2-ea3f-45cd-971d-797c06f5c6e2/object-expirer/0.log" Dec 11 10:01:46 crc kubenswrapper[4881]: I1211 10:01:46.388967 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_1af483c2-ea3f-45cd-971d-797c06f5c6e2/object-server/0.log" Dec 11 10:01:46 crc kubenswrapper[4881]: I1211 10:01:46.390583 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_1af483c2-ea3f-45cd-971d-797c06f5c6e2/object-replicator/0.log" Dec 11 10:01:46 crc kubenswrapper[4881]: I1211 10:01:46.532827 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_1af483c2-ea3f-45cd-971d-797c06f5c6e2/rsync/0.log" Dec 11 10:01:46 crc kubenswrapper[4881]: I1211 10:01:46.551914 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_1af483c2-ea3f-45cd-971d-797c06f5c6e2/object-updater/0.log" Dec 11 10:01:46 crc kubenswrapper[4881]: I1211 10:01:46.683669 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_1af483c2-ea3f-45cd-971d-797c06f5c6e2/swift-recon-cron/0.log" Dec 11 10:01:46 crc kubenswrapper[4881]: I1211 10:01:46.801993 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_telemetry-edpm-deployment-openstack-edpm-ipam-plpct_e87175a4-03cc-472f-90ac-18cb8573131f/telemetry-edpm-deployment-openstack-edpm-ipam/0.log" Dec 11 10:01:46 crc kubenswrapper[4881]: I1211 10:01:46.959910 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_telemetry-power-monitoring-edpm-deployment-openstack-edpm-h628s_1272adc3-399f-4c39-b62c-3bc18dda3b59/telemetry-power-monitoring-edpm-deployment-openstack-edpm-ipam/0.log" Dec 11 10:01:47 crc kubenswrapper[4881]: I1211 10:01:47.190221 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_test-operator-logs-pod-tempest-tempest-tests-tempest_443704cb-4132-4086-9c08-edc325a2bbc5/test-operator-logs-container/0.log" Dec 11 10:01:47 crc kubenswrapper[4881]: I1211 10:01:47.288408 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_validate-network-edpm-deployment-openstack-edpm-ipam-ktsnq_f4e51bed-808f-4037-b472-88fbe64bd15f/validate-network-edpm-deployment-openstack-edpm-ipam/0.log" Dec 11 10:01:48 crc kubenswrapper[4881]: I1211 10:01:48.179984 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_tempest-tests-tempest_44483fe0-748e-4e0e-9591-f5c14c4cd3f8/tempest-tests-tempest-tests-runner/0.log" Dec 11 10:01:55 crc kubenswrapper[4881]: I1211 10:01:55.625622 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_memcached-0_06499ad1-4a0e-46e2-b0fa-7583b8958148/memcached/0.log" Dec 11 10:01:59 crc kubenswrapper[4881]: I1211 10:01:59.396920 4881 patch_prober.go:28] interesting pod/machine-config-daemon-z9nnh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 11 10:01:59 crc kubenswrapper[4881]: I1211 10:01:59.397529 4881 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 11 10:02:13 crc kubenswrapper[4881]: I1211 10:02:13.744459 4881 scope.go:117] "RemoveContainer" containerID="57ad29d4a3a8ddcce780a75f4ecd53cc7364a7934caf6a301ecbeeaaae14faa2" Dec 11 10:02:13 crc kubenswrapper[4881]: I1211 10:02:13.776678 4881 scope.go:117] "RemoveContainer" containerID="e4b6484e342529487175a3602f5b5b77376670a504999a2c2df993647dcccf2a" Dec 11 10:02:13 crc kubenswrapper[4881]: I1211 10:02:13.839883 4881 scope.go:117] "RemoveContainer" containerID="fc1505e1a3593fd8fee9c8301d185abe701d815b141f156c4213215cc0c67656" Dec 11 10:02:18 crc kubenswrapper[4881]: I1211 10:02:18.102604 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_b69ceae0a3beb1917c3cf44da9e05673c5eae51f07b46c3ef0a964a4f8l65q5_63d42923-a2ed-4b92-82ab-e9ca4ad98e55/util/0.log" Dec 11 10:02:18 crc kubenswrapper[4881]: I1211 10:02:18.327904 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_b69ceae0a3beb1917c3cf44da9e05673c5eae51f07b46c3ef0a964a4f8l65q5_63d42923-a2ed-4b92-82ab-e9ca4ad98e55/pull/0.log" Dec 11 10:02:18 crc kubenswrapper[4881]: I1211 10:02:18.370013 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_b69ceae0a3beb1917c3cf44da9e05673c5eae51f07b46c3ef0a964a4f8l65q5_63d42923-a2ed-4b92-82ab-e9ca4ad98e55/util/0.log" Dec 11 10:02:18 crc kubenswrapper[4881]: I1211 10:02:18.391928 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_b69ceae0a3beb1917c3cf44da9e05673c5eae51f07b46c3ef0a964a4f8l65q5_63d42923-a2ed-4b92-82ab-e9ca4ad98e55/pull/0.log" Dec 11 10:02:18 crc kubenswrapper[4881]: I1211 10:02:18.594693 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_b69ceae0a3beb1917c3cf44da9e05673c5eae51f07b46c3ef0a964a4f8l65q5_63d42923-a2ed-4b92-82ab-e9ca4ad98e55/extract/0.log" Dec 11 10:02:18 crc kubenswrapper[4881]: I1211 10:02:18.597498 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_b69ceae0a3beb1917c3cf44da9e05673c5eae51f07b46c3ef0a964a4f8l65q5_63d42923-a2ed-4b92-82ab-e9ca4ad98e55/pull/0.log" Dec 11 10:02:18 crc kubenswrapper[4881]: I1211 10:02:18.644997 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_b69ceae0a3beb1917c3cf44da9e05673c5eae51f07b46c3ef0a964a4f8l65q5_63d42923-a2ed-4b92-82ab-e9ca4ad98e55/util/0.log" Dec 11 10:02:18 crc kubenswrapper[4881]: I1211 10:02:18.827228 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-5bfbbb859d-d4dsr_e5f3ecaa-a91f-4dc8-9baf-2866cf8df0f4/kube-rbac-proxy/0.log" Dec 11 10:02:18 crc kubenswrapper[4881]: I1211 10:02:18.843238 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-748967c98-lzm95_80435f25-efd8-482d-9a9b-1c6caafd655e/kube-rbac-proxy/0.log" Dec 11 10:02:18 crc kubenswrapper[4881]: I1211 10:02:18.896806 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-5bfbbb859d-d4dsr_e5f3ecaa-a91f-4dc8-9baf-2866cf8df0f4/manager/0.log" Dec 11 10:02:19 crc kubenswrapper[4881]: I1211 10:02:19.070923 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-6788cc6d75-6gtkq_e07ec193-8583-4299-9370-ce788e2e1ae1/kube-rbac-proxy/0.log" Dec 11 10:02:19 crc kubenswrapper[4881]: I1211 10:02:19.117235 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-748967c98-lzm95_80435f25-efd8-482d-9a9b-1c6caafd655e/manager/0.log" Dec 11 10:02:19 crc kubenswrapper[4881]: I1211 10:02:19.188488 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-6788cc6d75-6gtkq_e07ec193-8583-4299-9370-ce788e2e1ae1/manager/0.log" Dec 11 10:02:19 crc kubenswrapper[4881]: I1211 10:02:19.605736 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-85fbd69fcd-twbpm_d46284f5-997e-4ce9-a607-254c3ce33f31/kube-rbac-proxy/0.log" Dec 11 10:02:19 crc kubenswrapper[4881]: I1211 10:02:19.645448 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-85fbd69fcd-twbpm_d46284f5-997e-4ce9-a607-254c3ce33f31/manager/0.log" Dec 11 10:02:19 crc kubenswrapper[4881]: I1211 10:02:19.864988 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-698d6fd7d6-rs5fd_d4850972-2a52-4030-9822-af3de9cc647a/kube-rbac-proxy/0.log" Dec 11 10:02:19 crc kubenswrapper[4881]: I1211 10:02:19.947536 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-698d6fd7d6-rs5fd_d4850972-2a52-4030-9822-af3de9cc647a/manager/0.log" Dec 11 10:02:20 crc kubenswrapper[4881]: I1211 10:02:20.048673 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-7d5d9fd47f-dqhqx_2dab1d4f-2c9a-4b32-a666-4b0802e51576/kube-rbac-proxy/0.log" Dec 11 10:02:20 crc kubenswrapper[4881]: I1211 10:02:20.196459 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-7d5d9fd47f-dqhqx_2dab1d4f-2c9a-4b32-a666-4b0802e51576/manager/0.log" Dec 11 10:02:20 crc kubenswrapper[4881]: I1211 10:02:20.272838 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-6c55d8d69b-44khn_867bc48e-c043-4428-b201-0ce4dd830f3f/kube-rbac-proxy/0.log" Dec 11 10:02:20 crc kubenswrapper[4881]: I1211 10:02:20.429041 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-6c55d8d69b-44khn_867bc48e-c043-4428-b201-0ce4dd830f3f/manager/0.log" Dec 11 10:02:20 crc kubenswrapper[4881]: I1211 10:02:20.484618 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-54485f899-mvlsx_2fd323b1-8fa8-456c-bcd8-d89872682762/kube-rbac-proxy/0.log" Dec 11 10:02:20 crc kubenswrapper[4881]: I1211 10:02:20.514422 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-54485f899-mvlsx_2fd323b1-8fa8-456c-bcd8-d89872682762/manager/0.log" Dec 11 10:02:20 crc kubenswrapper[4881]: I1211 10:02:20.726762 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-79cc9d59f5-qh8s8_9b3ee431-6c33-4b49-8fdb-27056597fbe8/kube-rbac-proxy/0.log" Dec 11 10:02:20 crc kubenswrapper[4881]: I1211 10:02:20.845963 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-79cc9d59f5-qh8s8_9b3ee431-6c33-4b49-8fdb-27056597fbe8/manager/0.log" Dec 11 10:02:20 crc kubenswrapper[4881]: I1211 10:02:20.867575 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-5cbc8c7f96-ww55g_da8fe0e3-3416-453d-80b7-47d4ab23c610/kube-rbac-proxy/0.log" Dec 11 10:02:20 crc kubenswrapper[4881]: I1211 10:02:20.931831 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-5cbc8c7f96-ww55g_da8fe0e3-3416-453d-80b7-47d4ab23c610/manager/0.log" Dec 11 10:02:21 crc kubenswrapper[4881]: I1211 10:02:21.067916 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-64d7c556cd-qhz8n_21cbbd1f-7cfe-481a-b02a-f72c9d052519/manager/0.log" Dec 11 10:02:21 crc kubenswrapper[4881]: I1211 10:02:21.081538 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-64d7c556cd-qhz8n_21cbbd1f-7cfe-481a-b02a-f72c9d052519/kube-rbac-proxy/0.log" Dec 11 10:02:21 crc kubenswrapper[4881]: I1211 10:02:21.328527 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-58879495c-kmpzx_5079b14d-bd2f-4151-898d-91362a4b24c2/kube-rbac-proxy/0.log" Dec 11 10:02:21 crc kubenswrapper[4881]: I1211 10:02:21.574898 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-58879495c-kmpzx_5079b14d-bd2f-4151-898d-91362a4b24c2/manager/0.log" Dec 11 10:02:21 crc kubenswrapper[4881]: I1211 10:02:21.815686 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-79d658b66d-2dxhd_05ef8d73-6d8a-4d91-83a3-93ec0fc14ae1/kube-rbac-proxy/0.log" Dec 11 10:02:21 crc kubenswrapper[4881]: I1211 10:02:21.837876 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-79d658b66d-2dxhd_05ef8d73-6d8a-4d91-83a3-93ec0fc14ae1/manager/0.log" Dec 11 10:02:21 crc kubenswrapper[4881]: I1211 10:02:21.927394 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-d5fb87cb8-q22gz_ac8a74d8-c81e-4154-b2dc-7ebb23d13aa7/kube-rbac-proxy/0.log" Dec 11 10:02:22 crc kubenswrapper[4881]: I1211 10:02:22.068096 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-d5fb87cb8-q22gz_ac8a74d8-c81e-4154-b2dc-7ebb23d13aa7/manager/0.log" Dec 11 10:02:22 crc kubenswrapper[4881]: I1211 10:02:22.160659 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-77868f484-kgwhp_2621fa0b-89fb-4d65-aef3-98de0e9a8106/kube-rbac-proxy/0.log" Dec 11 10:02:22 crc kubenswrapper[4881]: I1211 10:02:22.180781 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-77868f484-kgwhp_2621fa0b-89fb-4d65-aef3-98de0e9a8106/manager/0.log" Dec 11 10:02:22 crc kubenswrapper[4881]: I1211 10:02:22.468457 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-manager-b95f4d4f8-phlkr_128ea8d0-53b4-410c-8587-165aa960d46c/kube-rbac-proxy/0.log" Dec 11 10:02:22 crc kubenswrapper[4881]: I1211 10:02:22.779718 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-operator-768c6dc6d6-plgdc_36714c9d-bfd8-4c2e-9d06-971da594217f/kube-rbac-proxy/0.log" Dec 11 10:02:22 crc kubenswrapper[4881]: I1211 10:02:22.913870 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-operator-768c6dc6d6-plgdc_36714c9d-bfd8-4c2e-9d06-971da594217f/operator/0.log" Dec 11 10:02:22 crc kubenswrapper[4881]: I1211 10:02:22.964610 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-index-pgrfm_c8209c30-f66b-4b47-a663-a0dac2ea36dd/registry-server/0.log" Dec 11 10:02:23 crc kubenswrapper[4881]: I1211 10:02:23.159931 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-5b67cfc8fb-nnlz4_91ca18da-4852-496b-bf77-558e8010aabe/kube-rbac-proxy/0.log" Dec 11 10:02:23 crc kubenswrapper[4881]: I1211 10:02:23.349065 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-5b67cfc8fb-nnlz4_91ca18da-4852-496b-bf77-558e8010aabe/manager/0.log" Dec 11 10:02:23 crc kubenswrapper[4881]: I1211 10:02:23.385987 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-867d87977b-ftqqr_02aa2201-6757-40f8-b24d-fbad39b79069/kube-rbac-proxy/0.log" Dec 11 10:02:23 crc kubenswrapper[4881]: I1211 10:02:23.467485 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-867d87977b-ftqqr_02aa2201-6757-40f8-b24d-fbad39b79069/manager/0.log" Dec 11 10:02:23 crc kubenswrapper[4881]: I1211 10:02:23.752876 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_rabbitmq-cluster-operator-manager-5f97d8c699-22zs6_14d65f13-7dce-49b7-9c8e-0a6ea9b57132/operator/0.log" Dec 11 10:02:23 crc kubenswrapper[4881]: I1211 10:02:23.845735 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-manager-b95f4d4f8-phlkr_128ea8d0-53b4-410c-8587-165aa960d46c/manager/0.log" Dec 11 10:02:23 crc kubenswrapper[4881]: I1211 10:02:23.889087 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-8f6687c44-zkl8b_246f8ac7-b65e-40b1-aba1-ba1defde43ef/kube-rbac-proxy/0.log" Dec 11 10:02:23 crc kubenswrapper[4881]: I1211 10:02:23.900814 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-8f6687c44-zkl8b_246f8ac7-b65e-40b1-aba1-ba1defde43ef/manager/0.log" Dec 11 10:02:24 crc kubenswrapper[4881]: I1211 10:02:24.010313 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-fb56f4744-vgmrx_de3e8077-0bfa-4e55-aba0-0e5dca0e598d/kube-rbac-proxy/0.log" Dec 11 10:02:24 crc kubenswrapper[4881]: I1211 10:02:24.209457 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-bb86466d8-6mz9j_06cd21f3-b69e-4238-9894-8c4f0f77ee53/kube-rbac-proxy/0.log" Dec 11 10:02:24 crc kubenswrapper[4881]: I1211 10:02:24.257261 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-bb86466d8-6mz9j_06cd21f3-b69e-4238-9894-8c4f0f77ee53/manager/0.log" Dec 11 10:02:24 crc kubenswrapper[4881]: I1211 10:02:24.386126 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-fb56f4744-vgmrx_de3e8077-0bfa-4e55-aba0-0e5dca0e598d/manager/0.log" Dec 11 10:02:24 crc kubenswrapper[4881]: I1211 10:02:24.418269 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-6b56b8849f-mj8lx_dd07d5b4-cfe3-4580-a859-64558daab601/kube-rbac-proxy/0.log" Dec 11 10:02:24 crc kubenswrapper[4881]: I1211 10:02:24.496350 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-6b56b8849f-mj8lx_dd07d5b4-cfe3-4580-a859-64558daab601/manager/0.log" Dec 11 10:02:29 crc kubenswrapper[4881]: I1211 10:02:29.396872 4881 patch_prober.go:28] interesting pod/machine-config-daemon-z9nnh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 11 10:02:29 crc kubenswrapper[4881]: I1211 10:02:29.397190 4881 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 11 10:02:29 crc kubenswrapper[4881]: I1211 10:02:29.397241 4881 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" Dec 11 10:02:29 crc kubenswrapper[4881]: I1211 10:02:29.398184 4881 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"82dddf54ebc8157bc717b15196b143b5bf6d65e5f3b7b89542d401dd508acd69"} pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Dec 11 10:02:29 crc kubenswrapper[4881]: I1211 10:02:29.400134 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" containerName="machine-config-daemon" containerID="cri-o://82dddf54ebc8157bc717b15196b143b5bf6d65e5f3b7b89542d401dd508acd69" gracePeriod=600 Dec 11 10:02:30 crc kubenswrapper[4881]: I1211 10:02:30.089610 4881 generic.go:334] "Generic (PLEG): container finished" podID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" containerID="82dddf54ebc8157bc717b15196b143b5bf6d65e5f3b7b89542d401dd508acd69" exitCode=0 Dec 11 10:02:30 crc kubenswrapper[4881]: I1211 10:02:30.089669 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" event={"ID":"56d69133-af36-4cbd-af7d-3a58cc4dd8ca","Type":"ContainerDied","Data":"82dddf54ebc8157bc717b15196b143b5bf6d65e5f3b7b89542d401dd508acd69"} Dec 11 10:02:30 crc kubenswrapper[4881]: I1211 10:02:30.090515 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" event={"ID":"56d69133-af36-4cbd-af7d-3a58cc4dd8ca","Type":"ContainerStarted","Data":"e1b977d6cf38148e3fb0a2375af7359048b9dcb9e237c41bd315b667b8afd334"} Dec 11 10:02:30 crc kubenswrapper[4881]: I1211 10:02:30.090606 4881 scope.go:117] "RemoveContainer" containerID="50e3b73ab215efbdf949798a8eb564f0477f7755099fe663a304e614a2b9881c" Dec 11 10:02:44 crc kubenswrapper[4881]: I1211 10:02:44.727932 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_control-plane-machine-set-operator-78cbb6b69f-dw769_a658d244-5927-4518-b8bb-0685d0e40a07/control-plane-machine-set-operator/0.log" Dec 11 10:02:45 crc kubenswrapper[4881]: I1211 10:02:45.221955 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-vk42n_70bb46be-cfc1-411d-9a3b-55e040e1c2c5/kube-rbac-proxy/0.log" Dec 11 10:02:45 crc kubenswrapper[4881]: I1211 10:02:45.369941 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-vk42n_70bb46be-cfc1-411d-9a3b-55e040e1c2c5/machine-api-operator/0.log" Dec 11 10:02:59 crc kubenswrapper[4881]: I1211 10:02:59.880820 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-5b446d88c5-9wlsj_39798ebd-f640-4134-881a-1fc8aae8caf2/cert-manager-controller/0.log" Dec 11 10:03:00 crc kubenswrapper[4881]: I1211 10:03:00.090920 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-webhook-5655c58dd6-k8mbh_b5ad6193-6ecc-4146-ba6a-b16704219c0b/cert-manager-webhook/0.log" Dec 11 10:03:00 crc kubenswrapper[4881]: I1211 10:03:00.124570 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-cainjector-7f985d654d-m6cfc_e18d3098-b003-42c6-bba8-0fdeff9222d4/cert-manager-cainjector/0.log" Dec 11 10:03:13 crc kubenswrapper[4881]: I1211 10:03:13.907312 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-console-plugin-6ff7998486-nj8ls_809482cd-c05d-41df-96db-84149e666743/nmstate-console-plugin/0.log" Dec 11 10:03:14 crc kubenswrapper[4881]: I1211 10:03:14.148637 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-handler-qx6gc_3ef011ad-0329-41b6-89d0-bbe3c976576b/nmstate-handler/0.log" Dec 11 10:03:14 crc kubenswrapper[4881]: I1211 10:03:14.186127 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-7f7f7578db-m5zx5_74e6048b-0d2d-418c-907c-5858077de213/kube-rbac-proxy/0.log" Dec 11 10:03:14 crc kubenswrapper[4881]: I1211 10:03:14.215376 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-7f7f7578db-m5zx5_74e6048b-0d2d-418c-907c-5858077de213/nmstate-metrics/0.log" Dec 11 10:03:14 crc kubenswrapper[4881]: I1211 10:03:14.475480 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-operator-6769fb99d-g9bf9_ae9ad369-6e2e-4c6c-a12a-cf228edaa48c/nmstate-operator/0.log" Dec 11 10:03:14 crc kubenswrapper[4881]: I1211 10:03:14.532848 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-webhook-f8fb84555-mfvb7_dbbccd2c-ccc0-4501-b4b4-b85621051f5f/nmstate-webhook/0.log" Dec 11 10:03:28 crc kubenswrapper[4881]: I1211 10:03:28.954902 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators-redhat_loki-operator-controller-manager-5895ddbf9f-qxpgx_71e1a3d0-ed67-45c6-8bfd-95237910c5c9/manager/0.log" Dec 11 10:03:28 crc kubenswrapper[4881]: I1211 10:03:28.979493 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators-redhat_loki-operator-controller-manager-5895ddbf9f-qxpgx_71e1a3d0-ed67-45c6-8bfd-95237910c5c9/kube-rbac-proxy/0.log" Dec 11 10:03:44 crc kubenswrapper[4881]: I1211 10:03:44.496118 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_cluster-logging-operator-ff9846bd-w8lh7_e9a4de57-461a-4db3-b12e-5d9eb9fd0a60/cluster-logging-operator/0.log" Dec 11 10:03:44 crc kubenswrapper[4881]: I1211 10:03:44.735231 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_collector-bwkfh_c095e07a-478b-41db-8ca3-a6b29c79756c/collector/0.log" Dec 11 10:03:44 crc kubenswrapper[4881]: I1211 10:03:44.791124 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_logging-loki-compactor-0_19c28f0c-c6b1-4192-b769-35ce88232323/loki-compactor/0.log" Dec 11 10:03:44 crc kubenswrapper[4881]: I1211 10:03:44.955905 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_logging-loki-distributor-76cc67bf56-7wtq5_3d172162-6309-4035-b574-842fa40d6db6/loki-distributor/0.log" Dec 11 10:03:45 crc kubenswrapper[4881]: I1211 10:03:45.044735 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_logging-loki-gateway-5f65744c89-srnq9_3470b561-e428-417b-bd76-92642ba561d8/gateway/0.log" Dec 11 10:03:45 crc kubenswrapper[4881]: I1211 10:03:45.070008 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_logging-loki-gateway-5f65744c89-srnq9_3470b561-e428-417b-bd76-92642ba561d8/opa/0.log" Dec 11 10:03:45 crc kubenswrapper[4881]: I1211 10:03:45.218494 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_logging-loki-gateway-5f65744c89-tvqmf_166ecd73-e9b9-4aa0-b09c-7ad373aea239/gateway/0.log" Dec 11 10:03:45 crc kubenswrapper[4881]: I1211 10:03:45.255134 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_logging-loki-gateway-5f65744c89-tvqmf_166ecd73-e9b9-4aa0-b09c-7ad373aea239/opa/0.log" Dec 11 10:03:45 crc kubenswrapper[4881]: I1211 10:03:45.443714 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_logging-loki-index-gateway-0_7d91da1c-1fcb-4d73-b7ad-838bd4b3eb6b/loki-index-gateway/0.log" Dec 11 10:03:45 crc kubenswrapper[4881]: I1211 10:03:45.508940 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_logging-loki-ingester-0_76043698-27ae-4a6d-af81-a7da0a14902d/loki-ingester/0.log" Dec 11 10:03:45 crc kubenswrapper[4881]: I1211 10:03:45.659135 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_logging-loki-querier-5895d59bb8-87hfb_b4ff84df-3a3b-4346-84a2-56f79c1aac44/loki-querier/0.log" Dec 11 10:03:45 crc kubenswrapper[4881]: I1211 10:03:45.725014 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_logging-loki-query-frontend-84558f7c9f-sswft_b4e3cba0-44c9-46ee-97f1-fb3b34b37cb3/loki-query-frontend/0.log" Dec 11 10:04:02 crc kubenswrapper[4881]: I1211 10:04:02.358818 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-5bddd4b946-mzlq2_1f002df3-6d4a-4f05-8ef8-07bc16590076/kube-rbac-proxy/0.log" Dec 11 10:04:02 crc kubenswrapper[4881]: I1211 10:04:02.493317 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-5bddd4b946-mzlq2_1f002df3-6d4a-4f05-8ef8-07bc16590076/controller/0.log" Dec 11 10:04:02 crc kubenswrapper[4881]: I1211 10:04:02.678392 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-webhook-server-7784b6fcf-nkdkx_52d03d01-bd10-4a71-993f-284fa256ebae/frr-k8s-webhook-server/0.log" Dec 11 10:04:02 crc kubenswrapper[4881]: I1211 10:04:02.757884 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-xj6xk_30393d1e-8b58-4cb7-9e45-23b5b79f235e/cp-frr-files/0.log" Dec 11 10:04:03 crc kubenswrapper[4881]: I1211 10:04:03.039656 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-xj6xk_30393d1e-8b58-4cb7-9e45-23b5b79f235e/cp-frr-files/0.log" Dec 11 10:04:03 crc kubenswrapper[4881]: I1211 10:04:03.065412 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-xj6xk_30393d1e-8b58-4cb7-9e45-23b5b79f235e/cp-reloader/0.log" Dec 11 10:04:03 crc kubenswrapper[4881]: I1211 10:04:03.100470 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-xj6xk_30393d1e-8b58-4cb7-9e45-23b5b79f235e/cp-metrics/0.log" Dec 11 10:04:03 crc kubenswrapper[4881]: I1211 10:04:03.144931 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-xj6xk_30393d1e-8b58-4cb7-9e45-23b5b79f235e/cp-reloader/0.log" Dec 11 10:04:03 crc kubenswrapper[4881]: I1211 10:04:03.353559 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-xj6xk_30393d1e-8b58-4cb7-9e45-23b5b79f235e/cp-frr-files/0.log" Dec 11 10:04:03 crc kubenswrapper[4881]: I1211 10:04:03.407354 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-xj6xk_30393d1e-8b58-4cb7-9e45-23b5b79f235e/cp-reloader/0.log" Dec 11 10:04:03 crc kubenswrapper[4881]: I1211 10:04:03.444997 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-xj6xk_30393d1e-8b58-4cb7-9e45-23b5b79f235e/cp-metrics/0.log" Dec 11 10:04:03 crc kubenswrapper[4881]: I1211 10:04:03.489485 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-xj6xk_30393d1e-8b58-4cb7-9e45-23b5b79f235e/cp-metrics/0.log" Dec 11 10:04:03 crc kubenswrapper[4881]: I1211 10:04:03.685745 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-xj6xk_30393d1e-8b58-4cb7-9e45-23b5b79f235e/cp-frr-files/0.log" Dec 11 10:04:03 crc kubenswrapper[4881]: I1211 10:04:03.691898 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-xj6xk_30393d1e-8b58-4cb7-9e45-23b5b79f235e/cp-reloader/0.log" Dec 11 10:04:03 crc kubenswrapper[4881]: I1211 10:04:03.720144 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-xj6xk_30393d1e-8b58-4cb7-9e45-23b5b79f235e/controller/0.log" Dec 11 10:04:03 crc kubenswrapper[4881]: I1211 10:04:03.776650 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-xj6xk_30393d1e-8b58-4cb7-9e45-23b5b79f235e/cp-metrics/0.log" Dec 11 10:04:03 crc kubenswrapper[4881]: I1211 10:04:03.896678 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-xj6xk_30393d1e-8b58-4cb7-9e45-23b5b79f235e/frr-metrics/0.log" Dec 11 10:04:04 crc kubenswrapper[4881]: I1211 10:04:04.037604 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-xj6xk_30393d1e-8b58-4cb7-9e45-23b5b79f235e/kube-rbac-proxy/0.log" Dec 11 10:04:04 crc kubenswrapper[4881]: I1211 10:04:04.120565 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-xj6xk_30393d1e-8b58-4cb7-9e45-23b5b79f235e/kube-rbac-proxy-frr/0.log" Dec 11 10:04:04 crc kubenswrapper[4881]: I1211 10:04:04.176711 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-xj6xk_30393d1e-8b58-4cb7-9e45-23b5b79f235e/reloader/0.log" Dec 11 10:04:04 crc kubenswrapper[4881]: I1211 10:04:04.385076 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-controller-manager-5646b5c6f5-clxl4_fda9a059-2ee6-41ae-ad81-e4f694080990/manager/0.log" Dec 11 10:04:04 crc kubenswrapper[4881]: I1211 10:04:04.580123 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-webhook-server-65f54b9948-tf47z_56dc9d55-54e9-44bc-9a4b-3a1eb32f4f04/webhook-server/0.log" Dec 11 10:04:04 crc kubenswrapper[4881]: I1211 10:04:04.731034 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-x559l_3a0e36ce-d2bd-4725-bc4a-e37ea057b5c3/kube-rbac-proxy/0.log" Dec 11 10:04:05 crc kubenswrapper[4881]: I1211 10:04:05.460425 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-x559l_3a0e36ce-d2bd-4725-bc4a-e37ea057b5c3/speaker/0.log" Dec 11 10:04:06 crc kubenswrapper[4881]: I1211 10:04:06.192642 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-xj6xk_30393d1e-8b58-4cb7-9e45-23b5b79f235e/frr/0.log" Dec 11 10:04:19 crc kubenswrapper[4881]: I1211 10:04:19.637309 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8sbtpr_e703c073-e40b-4293-9138-648e3d24c648/util/0.log" Dec 11 10:04:19 crc kubenswrapper[4881]: I1211 10:04:19.926430 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8sbtpr_e703c073-e40b-4293-9138-648e3d24c648/pull/0.log" Dec 11 10:04:19 crc kubenswrapper[4881]: I1211 10:04:19.995422 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8sbtpr_e703c073-e40b-4293-9138-648e3d24c648/pull/0.log" Dec 11 10:04:20 crc kubenswrapper[4881]: I1211 10:04:20.006805 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8sbtpr_e703c073-e40b-4293-9138-648e3d24c648/util/0.log" Dec 11 10:04:20 crc kubenswrapper[4881]: I1211 10:04:20.205064 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8sbtpr_e703c073-e40b-4293-9138-648e3d24c648/util/0.log" Dec 11 10:04:20 crc kubenswrapper[4881]: I1211 10:04:20.248192 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8sbtpr_e703c073-e40b-4293-9138-648e3d24c648/pull/0.log" Dec 11 10:04:20 crc kubenswrapper[4881]: I1211 10:04:20.249247 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8sbtpr_e703c073-e40b-4293-9138-648e3d24c648/extract/0.log" Dec 11 10:04:20 crc kubenswrapper[4881]: I1211 10:04:20.437614 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d472jx7_8a6e4a4a-8f04-4f2a-87bd-07f36e56a51e/util/0.log" Dec 11 10:04:20 crc kubenswrapper[4881]: I1211 10:04:20.635007 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d472jx7_8a6e4a4a-8f04-4f2a-87bd-07f36e56a51e/util/0.log" Dec 11 10:04:20 crc kubenswrapper[4881]: I1211 10:04:20.640833 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d472jx7_8a6e4a4a-8f04-4f2a-87bd-07f36e56a51e/pull/0.log" Dec 11 10:04:20 crc kubenswrapper[4881]: I1211 10:04:20.647685 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d472jx7_8a6e4a4a-8f04-4f2a-87bd-07f36e56a51e/pull/0.log" Dec 11 10:04:20 crc kubenswrapper[4881]: I1211 10:04:20.885072 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d472jx7_8a6e4a4a-8f04-4f2a-87bd-07f36e56a51e/util/0.log" Dec 11 10:04:20 crc kubenswrapper[4881]: I1211 10:04:20.899007 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d472jx7_8a6e4a4a-8f04-4f2a-87bd-07f36e56a51e/pull/0.log" Dec 11 10:04:20 crc kubenswrapper[4881]: I1211 10:04:20.924083 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d472jx7_8a6e4a4a-8f04-4f2a-87bd-07f36e56a51e/extract/0.log" Dec 11 10:04:21 crc kubenswrapper[4881]: I1211 10:04:21.106179 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210zj5zk_61c61666-4cf4-40df-bc54-202ef93cf87d/util/0.log" Dec 11 10:04:21 crc kubenswrapper[4881]: I1211 10:04:21.287702 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210zj5zk_61c61666-4cf4-40df-bc54-202ef93cf87d/util/0.log" Dec 11 10:04:21 crc kubenswrapper[4881]: I1211 10:04:21.476006 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210zj5zk_61c61666-4cf4-40df-bc54-202ef93cf87d/pull/0.log" Dec 11 10:04:21 crc kubenswrapper[4881]: I1211 10:04:21.476089 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210zj5zk_61c61666-4cf4-40df-bc54-202ef93cf87d/pull/0.log" Dec 11 10:04:21 crc kubenswrapper[4881]: I1211 10:04:21.477242 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210zj5zk_61c61666-4cf4-40df-bc54-202ef93cf87d/pull/0.log" Dec 11 10:04:21 crc kubenswrapper[4881]: I1211 10:04:21.646262 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210zj5zk_61c61666-4cf4-40df-bc54-202ef93cf87d/util/0.log" Dec 11 10:04:21 crc kubenswrapper[4881]: I1211 10:04:21.708545 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8q9msf_6d84ce93-6f0f-4248-a431-dd1692ff2ba8/util/0.log" Dec 11 10:04:21 crc kubenswrapper[4881]: I1211 10:04:21.771230 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210zj5zk_61c61666-4cf4-40df-bc54-202ef93cf87d/extract/0.log" Dec 11 10:04:22 crc kubenswrapper[4881]: I1211 10:04:22.168456 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8q9msf_6d84ce93-6f0f-4248-a431-dd1692ff2ba8/pull/0.log" Dec 11 10:04:22 crc kubenswrapper[4881]: I1211 10:04:22.192251 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8q9msf_6d84ce93-6f0f-4248-a431-dd1692ff2ba8/pull/0.log" Dec 11 10:04:22 crc kubenswrapper[4881]: I1211 10:04:22.204840 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8q9msf_6d84ce93-6f0f-4248-a431-dd1692ff2ba8/util/0.log" Dec 11 10:04:22 crc kubenswrapper[4881]: I1211 10:04:22.405133 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8q9msf_6d84ce93-6f0f-4248-a431-dd1692ff2ba8/pull/0.log" Dec 11 10:04:22 crc kubenswrapper[4881]: I1211 10:04:22.419666 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8q9msf_6d84ce93-6f0f-4248-a431-dd1692ff2ba8/extract/0.log" Dec 11 10:04:22 crc kubenswrapper[4881]: I1211 10:04:22.425025 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8q9msf_6d84ce93-6f0f-4248-a431-dd1692ff2ba8/util/0.log" Dec 11 10:04:22 crc kubenswrapper[4881]: I1211 10:04:22.599295 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463f2wqng_b1985fae-8a44-4865-b0bc-7d9e8d197a02/util/0.log" Dec 11 10:04:22 crc kubenswrapper[4881]: I1211 10:04:22.737194 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463f2wqng_b1985fae-8a44-4865-b0bc-7d9e8d197a02/util/0.log" Dec 11 10:04:22 crc kubenswrapper[4881]: I1211 10:04:22.764554 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463f2wqng_b1985fae-8a44-4865-b0bc-7d9e8d197a02/pull/0.log" Dec 11 10:04:22 crc kubenswrapper[4881]: I1211 10:04:22.808614 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463f2wqng_b1985fae-8a44-4865-b0bc-7d9e8d197a02/pull/0.log" Dec 11 10:04:22 crc kubenswrapper[4881]: I1211 10:04:22.953417 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463f2wqng_b1985fae-8a44-4865-b0bc-7d9e8d197a02/pull/0.log" Dec 11 10:04:22 crc kubenswrapper[4881]: I1211 10:04:22.996129 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463f2wqng_b1985fae-8a44-4865-b0bc-7d9e8d197a02/util/0.log" Dec 11 10:04:23 crc kubenswrapper[4881]: I1211 10:04:23.021013 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463f2wqng_b1985fae-8a44-4865-b0bc-7d9e8d197a02/extract/0.log" Dec 11 10:04:23 crc kubenswrapper[4881]: I1211 10:04:23.193939 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-qnffx_18611892-d199-4d6c-a3b6-391c8c78511c/extract-utilities/0.log" Dec 11 10:04:23 crc kubenswrapper[4881]: I1211 10:04:23.345373 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-qnffx_18611892-d199-4d6c-a3b6-391c8c78511c/extract-utilities/0.log" Dec 11 10:04:23 crc kubenswrapper[4881]: I1211 10:04:23.347801 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-qnffx_18611892-d199-4d6c-a3b6-391c8c78511c/extract-content/0.log" Dec 11 10:04:23 crc kubenswrapper[4881]: I1211 10:04:23.406890 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-qnffx_18611892-d199-4d6c-a3b6-391c8c78511c/extract-content/0.log" Dec 11 10:04:23 crc kubenswrapper[4881]: I1211 10:04:23.557989 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-qnffx_18611892-d199-4d6c-a3b6-391c8c78511c/extract-utilities/0.log" Dec 11 10:04:23 crc kubenswrapper[4881]: I1211 10:04:23.583925 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-qnffx_18611892-d199-4d6c-a3b6-391c8c78511c/extract-content/0.log" Dec 11 10:04:23 crc kubenswrapper[4881]: I1211 10:04:23.624918 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-74d77_99c7c976-c996-4b9c-a389-fbaed3f71813/extract-utilities/0.log" Dec 11 10:04:23 crc kubenswrapper[4881]: I1211 10:04:23.798111 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-qnffx_18611892-d199-4d6c-a3b6-391c8c78511c/registry-server/0.log" Dec 11 10:04:23 crc kubenswrapper[4881]: I1211 10:04:23.924071 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-74d77_99c7c976-c996-4b9c-a389-fbaed3f71813/extract-content/0.log" Dec 11 10:04:23 crc kubenswrapper[4881]: I1211 10:04:23.935233 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-74d77_99c7c976-c996-4b9c-a389-fbaed3f71813/extract-utilities/0.log" Dec 11 10:04:23 crc kubenswrapper[4881]: I1211 10:04:23.936567 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-74d77_99c7c976-c996-4b9c-a389-fbaed3f71813/extract-content/0.log" Dec 11 10:04:24 crc kubenswrapper[4881]: I1211 10:04:24.142543 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-74d77_99c7c976-c996-4b9c-a389-fbaed3f71813/extract-utilities/0.log" Dec 11 10:04:24 crc kubenswrapper[4881]: I1211 10:04:24.174162 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-74d77_99c7c976-c996-4b9c-a389-fbaed3f71813/extract-content/0.log" Dec 11 10:04:24 crc kubenswrapper[4881]: I1211 10:04:24.215155 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-79b997595-tbhw6_a98dd8bc-f2be-42b3-94c9-5eb725bfb1bf/marketplace-operator/1.log" Dec 11 10:04:24 crc kubenswrapper[4881]: I1211 10:04:24.416467 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-79b997595-tbhw6_a98dd8bc-f2be-42b3-94c9-5eb725bfb1bf/marketplace-operator/0.log" Dec 11 10:04:24 crc kubenswrapper[4881]: I1211 10:04:24.477196 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-j9mc4_e1b26f03-480c-45ab-b37e-c2971f8e117a/extract-utilities/0.log" Dec 11 10:04:24 crc kubenswrapper[4881]: I1211 10:04:24.677935 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-j9mc4_e1b26f03-480c-45ab-b37e-c2971f8e117a/extract-utilities/0.log" Dec 11 10:04:24 crc kubenswrapper[4881]: I1211 10:04:24.774435 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-j9mc4_e1b26f03-480c-45ab-b37e-c2971f8e117a/extract-content/0.log" Dec 11 10:04:24 crc kubenswrapper[4881]: I1211 10:04:24.835064 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-j9mc4_e1b26f03-480c-45ab-b37e-c2971f8e117a/extract-content/0.log" Dec 11 10:04:24 crc kubenswrapper[4881]: I1211 10:04:24.996039 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-j9mc4_e1b26f03-480c-45ab-b37e-c2971f8e117a/extract-utilities/0.log" Dec 11 10:04:25 crc kubenswrapper[4881]: I1211 10:04:25.049723 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-j9mc4_e1b26f03-480c-45ab-b37e-c2971f8e117a/extract-content/0.log" Dec 11 10:04:25 crc kubenswrapper[4881]: I1211 10:04:25.166042 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-74d77_99c7c976-c996-4b9c-a389-fbaed3f71813/registry-server/0.log" Dec 11 10:04:25 crc kubenswrapper[4881]: I1211 10:04:25.228583 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-kfk9x_23d4f6b6-7fa3-4c1b-afc8-ce176ef3b69f/extract-utilities/0.log" Dec 11 10:04:25 crc kubenswrapper[4881]: I1211 10:04:25.327538 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-j9mc4_e1b26f03-480c-45ab-b37e-c2971f8e117a/registry-server/0.log" Dec 11 10:04:25 crc kubenswrapper[4881]: I1211 10:04:25.449994 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-kfk9x_23d4f6b6-7fa3-4c1b-afc8-ce176ef3b69f/extract-utilities/0.log" Dec 11 10:04:25 crc kubenswrapper[4881]: I1211 10:04:25.456767 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-kfk9x_23d4f6b6-7fa3-4c1b-afc8-ce176ef3b69f/extract-content/0.log" Dec 11 10:04:25 crc kubenswrapper[4881]: I1211 10:04:25.461374 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-kfk9x_23d4f6b6-7fa3-4c1b-afc8-ce176ef3b69f/extract-content/0.log" Dec 11 10:04:25 crc kubenswrapper[4881]: I1211 10:04:25.662261 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-kfk9x_23d4f6b6-7fa3-4c1b-afc8-ce176ef3b69f/extract-utilities/0.log" Dec 11 10:04:25 crc kubenswrapper[4881]: I1211 10:04:25.673157 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-kfk9x_23d4f6b6-7fa3-4c1b-afc8-ce176ef3b69f/extract-content/0.log" Dec 11 10:04:26 crc kubenswrapper[4881]: I1211 10:04:26.522692 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-kfk9x_23d4f6b6-7fa3-4c1b-afc8-ce176ef3b69f/registry-server/0.log" Dec 11 10:04:29 crc kubenswrapper[4881]: I1211 10:04:29.396945 4881 patch_prober.go:28] interesting pod/machine-config-daemon-z9nnh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 11 10:04:29 crc kubenswrapper[4881]: I1211 10:04:29.397528 4881 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 11 10:04:39 crc kubenswrapper[4881]: I1211 10:04:39.795848 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-668cf9dfbb-qwshf_518448cb-2c51-4398-a75a-3d2c0d26905e/prometheus-operator/0.log" Dec 11 10:04:39 crc kubenswrapper[4881]: I1211 10:04:39.835197 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-ntbj9"] Dec 11 10:04:39 crc kubenswrapper[4881]: E1211 10:04:39.835940 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b7f4b194-07cf-4a48-ab95-f0aece6d4576" containerName="keystone-cron" Dec 11 10:04:39 crc kubenswrapper[4881]: I1211 10:04:39.836044 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="b7f4b194-07cf-4a48-ab95-f0aece6d4576" containerName="keystone-cron" Dec 11 10:04:39 crc kubenswrapper[4881]: E1211 10:04:39.836115 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b2d415e3-40dd-443c-a9ba-8c7aae086b1a" containerName="container-00" Dec 11 10:04:39 crc kubenswrapper[4881]: I1211 10:04:39.836166 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="b2d415e3-40dd-443c-a9ba-8c7aae086b1a" containerName="container-00" Dec 11 10:04:39 crc kubenswrapper[4881]: I1211 10:04:39.836500 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="b7f4b194-07cf-4a48-ab95-f0aece6d4576" containerName="keystone-cron" Dec 11 10:04:39 crc kubenswrapper[4881]: I1211 10:04:39.836592 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="b2d415e3-40dd-443c-a9ba-8c7aae086b1a" containerName="container-00" Dec 11 10:04:39 crc kubenswrapper[4881]: I1211 10:04:39.838707 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-ntbj9" Dec 11 10:04:39 crc kubenswrapper[4881]: I1211 10:04:39.907033 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-ntbj9"] Dec 11 10:04:40 crc kubenswrapper[4881]: I1211 10:04:40.007542 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e8067619-5ce9-4b22-a287-e65e3d6b1e0e-utilities\") pod \"redhat-operators-ntbj9\" (UID: \"e8067619-5ce9-4b22-a287-e65e3d6b1e0e\") " pod="openshift-marketplace/redhat-operators-ntbj9" Dec 11 10:04:40 crc kubenswrapper[4881]: I1211 10:04:40.007854 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w9z6t\" (UniqueName: \"kubernetes.io/projected/e8067619-5ce9-4b22-a287-e65e3d6b1e0e-kube-api-access-w9z6t\") pod \"redhat-operators-ntbj9\" (UID: \"e8067619-5ce9-4b22-a287-e65e3d6b1e0e\") " pod="openshift-marketplace/redhat-operators-ntbj9" Dec 11 10:04:40 crc kubenswrapper[4881]: I1211 10:04:40.007957 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e8067619-5ce9-4b22-a287-e65e3d6b1e0e-catalog-content\") pod \"redhat-operators-ntbj9\" (UID: \"e8067619-5ce9-4b22-a287-e65e3d6b1e0e\") " pod="openshift-marketplace/redhat-operators-ntbj9" Dec 11 10:04:40 crc kubenswrapper[4881]: I1211 10:04:40.110167 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e8067619-5ce9-4b22-a287-e65e3d6b1e0e-utilities\") pod \"redhat-operators-ntbj9\" (UID: \"e8067619-5ce9-4b22-a287-e65e3d6b1e0e\") " pod="openshift-marketplace/redhat-operators-ntbj9" Dec 11 10:04:40 crc kubenswrapper[4881]: I1211 10:04:40.110282 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w9z6t\" (UniqueName: \"kubernetes.io/projected/e8067619-5ce9-4b22-a287-e65e3d6b1e0e-kube-api-access-w9z6t\") pod \"redhat-operators-ntbj9\" (UID: \"e8067619-5ce9-4b22-a287-e65e3d6b1e0e\") " pod="openshift-marketplace/redhat-operators-ntbj9" Dec 11 10:04:40 crc kubenswrapper[4881]: I1211 10:04:40.110376 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e8067619-5ce9-4b22-a287-e65e3d6b1e0e-catalog-content\") pod \"redhat-operators-ntbj9\" (UID: \"e8067619-5ce9-4b22-a287-e65e3d6b1e0e\") " pod="openshift-marketplace/redhat-operators-ntbj9" Dec 11 10:04:40 crc kubenswrapper[4881]: I1211 10:04:40.111500 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e8067619-5ce9-4b22-a287-e65e3d6b1e0e-catalog-content\") pod \"redhat-operators-ntbj9\" (UID: \"e8067619-5ce9-4b22-a287-e65e3d6b1e0e\") " pod="openshift-marketplace/redhat-operators-ntbj9" Dec 11 10:04:40 crc kubenswrapper[4881]: I1211 10:04:40.111590 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e8067619-5ce9-4b22-a287-e65e3d6b1e0e-utilities\") pod \"redhat-operators-ntbj9\" (UID: \"e8067619-5ce9-4b22-a287-e65e3d6b1e0e\") " pod="openshift-marketplace/redhat-operators-ntbj9" Dec 11 10:04:40 crc kubenswrapper[4881]: I1211 10:04:40.137687 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w9z6t\" (UniqueName: \"kubernetes.io/projected/e8067619-5ce9-4b22-a287-e65e3d6b1e0e-kube-api-access-w9z6t\") pod \"redhat-operators-ntbj9\" (UID: \"e8067619-5ce9-4b22-a287-e65e3d6b1e0e\") " pod="openshift-marketplace/redhat-operators-ntbj9" Dec 11 10:04:40 crc kubenswrapper[4881]: I1211 10:04:40.164950 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-ntbj9" Dec 11 10:04:40 crc kubenswrapper[4881]: I1211 10:04:40.361397 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-admission-webhook-77767bb68c-7w7p7_6e8d1991-85d6-4f79-8233-98a8c9be9b32/prometheus-operator-admission-webhook/0.log" Dec 11 10:04:40 crc kubenswrapper[4881]: I1211 10:04:40.535788 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-admission-webhook-77767bb68c-n4z89_91763e1b-8187-4c07-be69-34a7330afb73/prometheus-operator-admission-webhook/0.log" Dec 11 10:04:40 crc kubenswrapper[4881]: I1211 10:04:40.791331 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_observability-operator-d8bb48f5d-zdvsr_578f637c-c2d8-46be-9838-f2a0b587b0c6/operator/0.log" Dec 11 10:04:40 crc kubenswrapper[4881]: I1211 10:04:40.846496 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_observability-ui-dashboards-7d5fb4cbfb-qvsbb_3c5fa886-8b43-4ef2-9f4b-d4724c4efa56/observability-ui-dashboards/0.log" Dec 11 10:04:40 crc kubenswrapper[4881]: I1211 10:04:40.917381 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-ntbj9"] Dec 11 10:04:41 crc kubenswrapper[4881]: I1211 10:04:41.103636 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_perses-operator-5446b9c989-clg8t_8df2f7b3-931a-4e09-b473-f71d8ee210d8/perses-operator/0.log" Dec 11 10:04:41 crc kubenswrapper[4881]: I1211 10:04:41.696619 4881 generic.go:334] "Generic (PLEG): container finished" podID="e8067619-5ce9-4b22-a287-e65e3d6b1e0e" containerID="85dfac275c9fa7a1aa89120ebfd65fd8074c213ca1b5011d7d66e62157a3fc57" exitCode=0 Dec 11 10:04:41 crc kubenswrapper[4881]: I1211 10:04:41.696717 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-ntbj9" event={"ID":"e8067619-5ce9-4b22-a287-e65e3d6b1e0e","Type":"ContainerDied","Data":"85dfac275c9fa7a1aa89120ebfd65fd8074c213ca1b5011d7d66e62157a3fc57"} Dec 11 10:04:41 crc kubenswrapper[4881]: I1211 10:04:41.697406 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-ntbj9" event={"ID":"e8067619-5ce9-4b22-a287-e65e3d6b1e0e","Type":"ContainerStarted","Data":"a3c88600c5998cacad4ae05c1d325245442faa1896b41b30432c6b5e2c7e988f"} Dec 11 10:04:41 crc kubenswrapper[4881]: I1211 10:04:41.699469 4881 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Dec 11 10:04:42 crc kubenswrapper[4881]: I1211 10:04:42.711399 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-ntbj9" event={"ID":"e8067619-5ce9-4b22-a287-e65e3d6b1e0e","Type":"ContainerStarted","Data":"4b3152b77990bdb945fe7bc3d92c75a41d4a6e476230b1870dbdcbead551eb44"} Dec 11 10:04:47 crc kubenswrapper[4881]: I1211 10:04:47.763718 4881 generic.go:334] "Generic (PLEG): container finished" podID="e8067619-5ce9-4b22-a287-e65e3d6b1e0e" containerID="4b3152b77990bdb945fe7bc3d92c75a41d4a6e476230b1870dbdcbead551eb44" exitCode=0 Dec 11 10:04:47 crc kubenswrapper[4881]: I1211 10:04:47.763942 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-ntbj9" event={"ID":"e8067619-5ce9-4b22-a287-e65e3d6b1e0e","Type":"ContainerDied","Data":"4b3152b77990bdb945fe7bc3d92c75a41d4a6e476230b1870dbdcbead551eb44"} Dec 11 10:04:49 crc kubenswrapper[4881]: I1211 10:04:49.797622 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-ntbj9" event={"ID":"e8067619-5ce9-4b22-a287-e65e3d6b1e0e","Type":"ContainerStarted","Data":"93d3560aa75b7bcb1b2cb34c161b2cef60475d0dcb73f881a26537062e9889c5"} Dec 11 10:04:49 crc kubenswrapper[4881]: I1211 10:04:49.819512 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-ntbj9" podStartSLOduration=3.803284626 podStartE2EDuration="10.819493267s" podCreationTimestamp="2025-12-11 10:04:39 +0000 UTC" firstStartedPulling="2025-12-11 10:04:41.698675317 +0000 UTC m=+6530.076044024" lastFinishedPulling="2025-12-11 10:04:48.714883968 +0000 UTC m=+6537.092252665" observedRunningTime="2025-12-11 10:04:49.814186177 +0000 UTC m=+6538.191554874" watchObservedRunningTime="2025-12-11 10:04:49.819493267 +0000 UTC m=+6538.196861964" Dec 11 10:04:50 crc kubenswrapper[4881]: I1211 10:04:50.166970 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-ntbj9" Dec 11 10:04:50 crc kubenswrapper[4881]: I1211 10:04:50.167029 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-ntbj9" Dec 11 10:04:51 crc kubenswrapper[4881]: I1211 10:04:51.233163 4881 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-ntbj9" podUID="e8067619-5ce9-4b22-a287-e65e3d6b1e0e" containerName="registry-server" probeResult="failure" output=< Dec 11 10:04:51 crc kubenswrapper[4881]: timeout: failed to connect service ":50051" within 1s Dec 11 10:04:51 crc kubenswrapper[4881]: > Dec 11 10:04:57 crc kubenswrapper[4881]: I1211 10:04:57.461049 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators-redhat_loki-operator-controller-manager-5895ddbf9f-qxpgx_71e1a3d0-ed67-45c6-8bfd-95237910c5c9/kube-rbac-proxy/0.log" Dec 11 10:04:57 crc kubenswrapper[4881]: I1211 10:04:57.520404 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators-redhat_loki-operator-controller-manager-5895ddbf9f-qxpgx_71e1a3d0-ed67-45c6-8bfd-95237910c5c9/manager/0.log" Dec 11 10:04:59 crc kubenswrapper[4881]: I1211 10:04:59.397169 4881 patch_prober.go:28] interesting pod/machine-config-daemon-z9nnh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 11 10:04:59 crc kubenswrapper[4881]: I1211 10:04:59.397501 4881 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 11 10:05:01 crc kubenswrapper[4881]: I1211 10:05:01.220887 4881 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-ntbj9" podUID="e8067619-5ce9-4b22-a287-e65e3d6b1e0e" containerName="registry-server" probeResult="failure" output=< Dec 11 10:05:01 crc kubenswrapper[4881]: timeout: failed to connect service ":50051" within 1s Dec 11 10:05:01 crc kubenswrapper[4881]: > Dec 11 10:05:10 crc kubenswrapper[4881]: I1211 10:05:10.249523 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-ntbj9" Dec 11 10:05:10 crc kubenswrapper[4881]: I1211 10:05:10.331052 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-ntbj9" Dec 11 10:05:11 crc kubenswrapper[4881]: I1211 10:05:11.080148 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-ntbj9"] Dec 11 10:05:12 crc kubenswrapper[4881]: I1211 10:05:12.165467 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-ntbj9" podUID="e8067619-5ce9-4b22-a287-e65e3d6b1e0e" containerName="registry-server" containerID="cri-o://93d3560aa75b7bcb1b2cb34c161b2cef60475d0dcb73f881a26537062e9889c5" gracePeriod=2 Dec 11 10:05:12 crc kubenswrapper[4881]: I1211 10:05:12.880484 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-ntbj9" Dec 11 10:05:12 crc kubenswrapper[4881]: I1211 10:05:12.939930 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e8067619-5ce9-4b22-a287-e65e3d6b1e0e-catalog-content\") pod \"e8067619-5ce9-4b22-a287-e65e3d6b1e0e\" (UID: \"e8067619-5ce9-4b22-a287-e65e3d6b1e0e\") " Dec 11 10:05:12 crc kubenswrapper[4881]: I1211 10:05:12.940098 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w9z6t\" (UniqueName: \"kubernetes.io/projected/e8067619-5ce9-4b22-a287-e65e3d6b1e0e-kube-api-access-w9z6t\") pod \"e8067619-5ce9-4b22-a287-e65e3d6b1e0e\" (UID: \"e8067619-5ce9-4b22-a287-e65e3d6b1e0e\") " Dec 11 10:05:12 crc kubenswrapper[4881]: I1211 10:05:12.940119 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e8067619-5ce9-4b22-a287-e65e3d6b1e0e-utilities\") pod \"e8067619-5ce9-4b22-a287-e65e3d6b1e0e\" (UID: \"e8067619-5ce9-4b22-a287-e65e3d6b1e0e\") " Dec 11 10:05:12 crc kubenswrapper[4881]: I1211 10:05:12.943843 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e8067619-5ce9-4b22-a287-e65e3d6b1e0e-utilities" (OuterVolumeSpecName: "utilities") pod "e8067619-5ce9-4b22-a287-e65e3d6b1e0e" (UID: "e8067619-5ce9-4b22-a287-e65e3d6b1e0e"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 10:05:12 crc kubenswrapper[4881]: I1211 10:05:12.969558 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e8067619-5ce9-4b22-a287-e65e3d6b1e0e-kube-api-access-w9z6t" (OuterVolumeSpecName: "kube-api-access-w9z6t") pod "e8067619-5ce9-4b22-a287-e65e3d6b1e0e" (UID: "e8067619-5ce9-4b22-a287-e65e3d6b1e0e"). InnerVolumeSpecName "kube-api-access-w9z6t". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:05:13 crc kubenswrapper[4881]: I1211 10:05:13.045567 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w9z6t\" (UniqueName: \"kubernetes.io/projected/e8067619-5ce9-4b22-a287-e65e3d6b1e0e-kube-api-access-w9z6t\") on node \"crc\" DevicePath \"\"" Dec 11 10:05:13 crc kubenswrapper[4881]: I1211 10:05:13.047413 4881 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e8067619-5ce9-4b22-a287-e65e3d6b1e0e-utilities\") on node \"crc\" DevicePath \"\"" Dec 11 10:05:13 crc kubenswrapper[4881]: I1211 10:05:13.059470 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e8067619-5ce9-4b22-a287-e65e3d6b1e0e-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "e8067619-5ce9-4b22-a287-e65e3d6b1e0e" (UID: "e8067619-5ce9-4b22-a287-e65e3d6b1e0e"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 10:05:13 crc kubenswrapper[4881]: I1211 10:05:13.150670 4881 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e8067619-5ce9-4b22-a287-e65e3d6b1e0e-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 11 10:05:13 crc kubenswrapper[4881]: I1211 10:05:13.181413 4881 generic.go:334] "Generic (PLEG): container finished" podID="e8067619-5ce9-4b22-a287-e65e3d6b1e0e" containerID="93d3560aa75b7bcb1b2cb34c161b2cef60475d0dcb73f881a26537062e9889c5" exitCode=0 Dec 11 10:05:13 crc kubenswrapper[4881]: I1211 10:05:13.181511 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-ntbj9" event={"ID":"e8067619-5ce9-4b22-a287-e65e3d6b1e0e","Type":"ContainerDied","Data":"93d3560aa75b7bcb1b2cb34c161b2cef60475d0dcb73f881a26537062e9889c5"} Dec 11 10:05:13 crc kubenswrapper[4881]: I1211 10:05:13.181619 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-ntbj9" event={"ID":"e8067619-5ce9-4b22-a287-e65e3d6b1e0e","Type":"ContainerDied","Data":"a3c88600c5998cacad4ae05c1d325245442faa1896b41b30432c6b5e2c7e988f"} Dec 11 10:05:13 crc kubenswrapper[4881]: I1211 10:05:13.181651 4881 scope.go:117] "RemoveContainer" containerID="93d3560aa75b7bcb1b2cb34c161b2cef60475d0dcb73f881a26537062e9889c5" Dec 11 10:05:13 crc kubenswrapper[4881]: I1211 10:05:13.182710 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-ntbj9" Dec 11 10:05:13 crc kubenswrapper[4881]: I1211 10:05:13.239930 4881 scope.go:117] "RemoveContainer" containerID="4b3152b77990bdb945fe7bc3d92c75a41d4a6e476230b1870dbdcbead551eb44" Dec 11 10:05:13 crc kubenswrapper[4881]: I1211 10:05:13.243959 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-ntbj9"] Dec 11 10:05:13 crc kubenswrapper[4881]: I1211 10:05:13.257451 4881 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-ntbj9"] Dec 11 10:05:13 crc kubenswrapper[4881]: I1211 10:05:13.272679 4881 scope.go:117] "RemoveContainer" containerID="85dfac275c9fa7a1aa89120ebfd65fd8074c213ca1b5011d7d66e62157a3fc57" Dec 11 10:05:13 crc kubenswrapper[4881]: I1211 10:05:13.346182 4881 scope.go:117] "RemoveContainer" containerID="93d3560aa75b7bcb1b2cb34c161b2cef60475d0dcb73f881a26537062e9889c5" Dec 11 10:05:13 crc kubenswrapper[4881]: E1211 10:05:13.347082 4881 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"93d3560aa75b7bcb1b2cb34c161b2cef60475d0dcb73f881a26537062e9889c5\": container with ID starting with 93d3560aa75b7bcb1b2cb34c161b2cef60475d0dcb73f881a26537062e9889c5 not found: ID does not exist" containerID="93d3560aa75b7bcb1b2cb34c161b2cef60475d0dcb73f881a26537062e9889c5" Dec 11 10:05:13 crc kubenswrapper[4881]: I1211 10:05:13.347554 4881 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"93d3560aa75b7bcb1b2cb34c161b2cef60475d0dcb73f881a26537062e9889c5"} err="failed to get container status \"93d3560aa75b7bcb1b2cb34c161b2cef60475d0dcb73f881a26537062e9889c5\": rpc error: code = NotFound desc = could not find container \"93d3560aa75b7bcb1b2cb34c161b2cef60475d0dcb73f881a26537062e9889c5\": container with ID starting with 93d3560aa75b7bcb1b2cb34c161b2cef60475d0dcb73f881a26537062e9889c5 not found: ID does not exist" Dec 11 10:05:13 crc kubenswrapper[4881]: I1211 10:05:13.347593 4881 scope.go:117] "RemoveContainer" containerID="4b3152b77990bdb945fe7bc3d92c75a41d4a6e476230b1870dbdcbead551eb44" Dec 11 10:05:13 crc kubenswrapper[4881]: E1211 10:05:13.347869 4881 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4b3152b77990bdb945fe7bc3d92c75a41d4a6e476230b1870dbdcbead551eb44\": container with ID starting with 4b3152b77990bdb945fe7bc3d92c75a41d4a6e476230b1870dbdcbead551eb44 not found: ID does not exist" containerID="4b3152b77990bdb945fe7bc3d92c75a41d4a6e476230b1870dbdcbead551eb44" Dec 11 10:05:13 crc kubenswrapper[4881]: I1211 10:05:13.347902 4881 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4b3152b77990bdb945fe7bc3d92c75a41d4a6e476230b1870dbdcbead551eb44"} err="failed to get container status \"4b3152b77990bdb945fe7bc3d92c75a41d4a6e476230b1870dbdcbead551eb44\": rpc error: code = NotFound desc = could not find container \"4b3152b77990bdb945fe7bc3d92c75a41d4a6e476230b1870dbdcbead551eb44\": container with ID starting with 4b3152b77990bdb945fe7bc3d92c75a41d4a6e476230b1870dbdcbead551eb44 not found: ID does not exist" Dec 11 10:05:13 crc kubenswrapper[4881]: I1211 10:05:13.347923 4881 scope.go:117] "RemoveContainer" containerID="85dfac275c9fa7a1aa89120ebfd65fd8074c213ca1b5011d7d66e62157a3fc57" Dec 11 10:05:13 crc kubenswrapper[4881]: E1211 10:05:13.348167 4881 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"85dfac275c9fa7a1aa89120ebfd65fd8074c213ca1b5011d7d66e62157a3fc57\": container with ID starting with 85dfac275c9fa7a1aa89120ebfd65fd8074c213ca1b5011d7d66e62157a3fc57 not found: ID does not exist" containerID="85dfac275c9fa7a1aa89120ebfd65fd8074c213ca1b5011d7d66e62157a3fc57" Dec 11 10:05:13 crc kubenswrapper[4881]: I1211 10:05:13.348194 4881 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"85dfac275c9fa7a1aa89120ebfd65fd8074c213ca1b5011d7d66e62157a3fc57"} err="failed to get container status \"85dfac275c9fa7a1aa89120ebfd65fd8074c213ca1b5011d7d66e62157a3fc57\": rpc error: code = NotFound desc = could not find container \"85dfac275c9fa7a1aa89120ebfd65fd8074c213ca1b5011d7d66e62157a3fc57\": container with ID starting with 85dfac275c9fa7a1aa89120ebfd65fd8074c213ca1b5011d7d66e62157a3fc57 not found: ID does not exist" Dec 11 10:05:15 crc kubenswrapper[4881]: I1211 10:05:15.019213 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e8067619-5ce9-4b22-a287-e65e3d6b1e0e" path="/var/lib/kubelet/pods/e8067619-5ce9-4b22-a287-e65e3d6b1e0e/volumes" Dec 11 10:05:29 crc kubenswrapper[4881]: I1211 10:05:29.398530 4881 patch_prober.go:28] interesting pod/machine-config-daemon-z9nnh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 11 10:05:29 crc kubenswrapper[4881]: I1211 10:05:29.399155 4881 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 11 10:05:29 crc kubenswrapper[4881]: I1211 10:05:29.399235 4881 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" Dec 11 10:05:29 crc kubenswrapper[4881]: I1211 10:05:29.400262 4881 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"e1b977d6cf38148e3fb0a2375af7359048b9dcb9e237c41bd315b667b8afd334"} pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Dec 11 10:05:29 crc kubenswrapper[4881]: I1211 10:05:29.400356 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" containerName="machine-config-daemon" containerID="cri-o://e1b977d6cf38148e3fb0a2375af7359048b9dcb9e237c41bd315b667b8afd334" gracePeriod=600 Dec 11 10:05:29 crc kubenswrapper[4881]: E1211 10:05:29.520387 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 10:05:30 crc kubenswrapper[4881]: I1211 10:05:30.421266 4881 generic.go:334] "Generic (PLEG): container finished" podID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" containerID="e1b977d6cf38148e3fb0a2375af7359048b9dcb9e237c41bd315b667b8afd334" exitCode=0 Dec 11 10:05:30 crc kubenswrapper[4881]: I1211 10:05:30.421360 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" event={"ID":"56d69133-af36-4cbd-af7d-3a58cc4dd8ca","Type":"ContainerDied","Data":"e1b977d6cf38148e3fb0a2375af7359048b9dcb9e237c41bd315b667b8afd334"} Dec 11 10:05:30 crc kubenswrapper[4881]: I1211 10:05:30.421641 4881 scope.go:117] "RemoveContainer" containerID="82dddf54ebc8157bc717b15196b143b5bf6d65e5f3b7b89542d401dd508acd69" Dec 11 10:05:30 crc kubenswrapper[4881]: I1211 10:05:30.427880 4881 scope.go:117] "RemoveContainer" containerID="e1b977d6cf38148e3fb0a2375af7359048b9dcb9e237c41bd315b667b8afd334" Dec 11 10:05:30 crc kubenswrapper[4881]: E1211 10:05:30.428545 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 10:05:45 crc kubenswrapper[4881]: I1211 10:05:45.006304 4881 scope.go:117] "RemoveContainer" containerID="e1b977d6cf38148e3fb0a2375af7359048b9dcb9e237c41bd315b667b8afd334" Dec 11 10:05:45 crc kubenswrapper[4881]: E1211 10:05:45.007230 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 10:05:56 crc kubenswrapper[4881]: I1211 10:05:56.013392 4881 scope.go:117] "RemoveContainer" containerID="e1b977d6cf38148e3fb0a2375af7359048b9dcb9e237c41bd315b667b8afd334" Dec 11 10:05:56 crc kubenswrapper[4881]: E1211 10:05:56.015194 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 10:06:08 crc kubenswrapper[4881]: I1211 10:06:08.006274 4881 scope.go:117] "RemoveContainer" containerID="e1b977d6cf38148e3fb0a2375af7359048b9dcb9e237c41bd315b667b8afd334" Dec 11 10:06:08 crc kubenswrapper[4881]: E1211 10:06:08.006920 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 10:06:14 crc kubenswrapper[4881]: I1211 10:06:14.054346 4881 scope.go:117] "RemoveContainer" containerID="e245d9b6b5ced786e2aa27b689797e9c33be0f7dcadea5439aad8bd0215f3d62" Dec 11 10:06:21 crc kubenswrapper[4881]: I1211 10:06:21.005448 4881 scope.go:117] "RemoveContainer" containerID="e1b977d6cf38148e3fb0a2375af7359048b9dcb9e237c41bd315b667b8afd334" Dec 11 10:06:21 crc kubenswrapper[4881]: E1211 10:06:21.006273 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 10:06:27 crc kubenswrapper[4881]: I1211 10:06:27.847503 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-5jrrh"] Dec 11 10:06:27 crc kubenswrapper[4881]: E1211 10:06:27.848633 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e8067619-5ce9-4b22-a287-e65e3d6b1e0e" containerName="registry-server" Dec 11 10:06:27 crc kubenswrapper[4881]: I1211 10:06:27.848664 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="e8067619-5ce9-4b22-a287-e65e3d6b1e0e" containerName="registry-server" Dec 11 10:06:27 crc kubenswrapper[4881]: E1211 10:06:27.848716 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e8067619-5ce9-4b22-a287-e65e3d6b1e0e" containerName="extract-content" Dec 11 10:06:27 crc kubenswrapper[4881]: I1211 10:06:27.848735 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="e8067619-5ce9-4b22-a287-e65e3d6b1e0e" containerName="extract-content" Dec 11 10:06:27 crc kubenswrapper[4881]: E1211 10:06:27.848762 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e8067619-5ce9-4b22-a287-e65e3d6b1e0e" containerName="extract-utilities" Dec 11 10:06:27 crc kubenswrapper[4881]: I1211 10:06:27.848770 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="e8067619-5ce9-4b22-a287-e65e3d6b1e0e" containerName="extract-utilities" Dec 11 10:06:27 crc kubenswrapper[4881]: I1211 10:06:27.849024 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="e8067619-5ce9-4b22-a287-e65e3d6b1e0e" containerName="registry-server" Dec 11 10:06:27 crc kubenswrapper[4881]: I1211 10:06:27.850899 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-5jrrh" Dec 11 10:06:27 crc kubenswrapper[4881]: I1211 10:06:27.861897 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-5jrrh"] Dec 11 10:06:27 crc kubenswrapper[4881]: I1211 10:06:27.981169 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d504e4dc-abb2-435a-ad46-09acdda9d84b-utilities\") pod \"certified-operators-5jrrh\" (UID: \"d504e4dc-abb2-435a-ad46-09acdda9d84b\") " pod="openshift-marketplace/certified-operators-5jrrh" Dec 11 10:06:27 crc kubenswrapper[4881]: I1211 10:06:27.981435 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t26ng\" (UniqueName: \"kubernetes.io/projected/d504e4dc-abb2-435a-ad46-09acdda9d84b-kube-api-access-t26ng\") pod \"certified-operators-5jrrh\" (UID: \"d504e4dc-abb2-435a-ad46-09acdda9d84b\") " pod="openshift-marketplace/certified-operators-5jrrh" Dec 11 10:06:27 crc kubenswrapper[4881]: I1211 10:06:27.981461 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d504e4dc-abb2-435a-ad46-09acdda9d84b-catalog-content\") pod \"certified-operators-5jrrh\" (UID: \"d504e4dc-abb2-435a-ad46-09acdda9d84b\") " pod="openshift-marketplace/certified-operators-5jrrh" Dec 11 10:06:28 crc kubenswrapper[4881]: I1211 10:06:28.085495 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d504e4dc-abb2-435a-ad46-09acdda9d84b-utilities\") pod \"certified-operators-5jrrh\" (UID: \"d504e4dc-abb2-435a-ad46-09acdda9d84b\") " pod="openshift-marketplace/certified-operators-5jrrh" Dec 11 10:06:28 crc kubenswrapper[4881]: I1211 10:06:28.085936 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d504e4dc-abb2-435a-ad46-09acdda9d84b-utilities\") pod \"certified-operators-5jrrh\" (UID: \"d504e4dc-abb2-435a-ad46-09acdda9d84b\") " pod="openshift-marketplace/certified-operators-5jrrh" Dec 11 10:06:28 crc kubenswrapper[4881]: I1211 10:06:28.090232 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t26ng\" (UniqueName: \"kubernetes.io/projected/d504e4dc-abb2-435a-ad46-09acdda9d84b-kube-api-access-t26ng\") pod \"certified-operators-5jrrh\" (UID: \"d504e4dc-abb2-435a-ad46-09acdda9d84b\") " pod="openshift-marketplace/certified-operators-5jrrh" Dec 11 10:06:28 crc kubenswrapper[4881]: I1211 10:06:28.090462 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d504e4dc-abb2-435a-ad46-09acdda9d84b-catalog-content\") pod \"certified-operators-5jrrh\" (UID: \"d504e4dc-abb2-435a-ad46-09acdda9d84b\") " pod="openshift-marketplace/certified-operators-5jrrh" Dec 11 10:06:28 crc kubenswrapper[4881]: I1211 10:06:28.090888 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d504e4dc-abb2-435a-ad46-09acdda9d84b-catalog-content\") pod \"certified-operators-5jrrh\" (UID: \"d504e4dc-abb2-435a-ad46-09acdda9d84b\") " pod="openshift-marketplace/certified-operators-5jrrh" Dec 11 10:06:28 crc kubenswrapper[4881]: I1211 10:06:28.111993 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t26ng\" (UniqueName: \"kubernetes.io/projected/d504e4dc-abb2-435a-ad46-09acdda9d84b-kube-api-access-t26ng\") pod \"certified-operators-5jrrh\" (UID: \"d504e4dc-abb2-435a-ad46-09acdda9d84b\") " pod="openshift-marketplace/certified-operators-5jrrh" Dec 11 10:06:28 crc kubenswrapper[4881]: I1211 10:06:28.172546 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-5jrrh" Dec 11 10:06:28 crc kubenswrapper[4881]: I1211 10:06:28.714581 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-5jrrh"] Dec 11 10:06:29 crc kubenswrapper[4881]: I1211 10:06:29.070488 4881 generic.go:334] "Generic (PLEG): container finished" podID="d504e4dc-abb2-435a-ad46-09acdda9d84b" containerID="e70d64b0f49c6945862bb893bc215556582cf4134444599048b602ce34e6dc03" exitCode=0 Dec 11 10:06:29 crc kubenswrapper[4881]: I1211 10:06:29.070551 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5jrrh" event={"ID":"d504e4dc-abb2-435a-ad46-09acdda9d84b","Type":"ContainerDied","Data":"e70d64b0f49c6945862bb893bc215556582cf4134444599048b602ce34e6dc03"} Dec 11 10:06:29 crc kubenswrapper[4881]: I1211 10:06:29.070574 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5jrrh" event={"ID":"d504e4dc-abb2-435a-ad46-09acdda9d84b","Type":"ContainerStarted","Data":"056ecb4974ebad3e652dfa16a77ccc166f7b4f3c691c21caf17faad7641e4888"} Dec 11 10:06:29 crc kubenswrapper[4881]: I1211 10:06:29.840035 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-xn9vx"] Dec 11 10:06:29 crc kubenswrapper[4881]: I1211 10:06:29.843533 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-xn9vx" Dec 11 10:06:29 crc kubenswrapper[4881]: I1211 10:06:29.858773 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-xn9vx"] Dec 11 10:06:29 crc kubenswrapper[4881]: I1211 10:06:29.942174 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/368b103b-ef6b-4607-ae3b-8efd42f4ef3f-utilities\") pod \"community-operators-xn9vx\" (UID: \"368b103b-ef6b-4607-ae3b-8efd42f4ef3f\") " pod="openshift-marketplace/community-operators-xn9vx" Dec 11 10:06:29 crc kubenswrapper[4881]: I1211 10:06:29.942459 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/368b103b-ef6b-4607-ae3b-8efd42f4ef3f-catalog-content\") pod \"community-operators-xn9vx\" (UID: \"368b103b-ef6b-4607-ae3b-8efd42f4ef3f\") " pod="openshift-marketplace/community-operators-xn9vx" Dec 11 10:06:29 crc kubenswrapper[4881]: I1211 10:06:29.942519 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-47dxr\" (UniqueName: \"kubernetes.io/projected/368b103b-ef6b-4607-ae3b-8efd42f4ef3f-kube-api-access-47dxr\") pod \"community-operators-xn9vx\" (UID: \"368b103b-ef6b-4607-ae3b-8efd42f4ef3f\") " pod="openshift-marketplace/community-operators-xn9vx" Dec 11 10:06:30 crc kubenswrapper[4881]: I1211 10:06:30.045871 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/368b103b-ef6b-4607-ae3b-8efd42f4ef3f-catalog-content\") pod \"community-operators-xn9vx\" (UID: \"368b103b-ef6b-4607-ae3b-8efd42f4ef3f\") " pod="openshift-marketplace/community-operators-xn9vx" Dec 11 10:06:30 crc kubenswrapper[4881]: I1211 10:06:30.045936 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-47dxr\" (UniqueName: \"kubernetes.io/projected/368b103b-ef6b-4607-ae3b-8efd42f4ef3f-kube-api-access-47dxr\") pod \"community-operators-xn9vx\" (UID: \"368b103b-ef6b-4607-ae3b-8efd42f4ef3f\") " pod="openshift-marketplace/community-operators-xn9vx" Dec 11 10:06:30 crc kubenswrapper[4881]: I1211 10:06:30.045999 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/368b103b-ef6b-4607-ae3b-8efd42f4ef3f-utilities\") pod \"community-operators-xn9vx\" (UID: \"368b103b-ef6b-4607-ae3b-8efd42f4ef3f\") " pod="openshift-marketplace/community-operators-xn9vx" Dec 11 10:06:30 crc kubenswrapper[4881]: I1211 10:06:30.046416 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/368b103b-ef6b-4607-ae3b-8efd42f4ef3f-catalog-content\") pod \"community-operators-xn9vx\" (UID: \"368b103b-ef6b-4607-ae3b-8efd42f4ef3f\") " pod="openshift-marketplace/community-operators-xn9vx" Dec 11 10:06:30 crc kubenswrapper[4881]: I1211 10:06:30.046457 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/368b103b-ef6b-4607-ae3b-8efd42f4ef3f-utilities\") pod \"community-operators-xn9vx\" (UID: \"368b103b-ef6b-4607-ae3b-8efd42f4ef3f\") " pod="openshift-marketplace/community-operators-xn9vx" Dec 11 10:06:30 crc kubenswrapper[4881]: I1211 10:06:30.070443 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-47dxr\" (UniqueName: \"kubernetes.io/projected/368b103b-ef6b-4607-ae3b-8efd42f4ef3f-kube-api-access-47dxr\") pod \"community-operators-xn9vx\" (UID: \"368b103b-ef6b-4607-ae3b-8efd42f4ef3f\") " pod="openshift-marketplace/community-operators-xn9vx" Dec 11 10:06:30 crc kubenswrapper[4881]: I1211 10:06:30.179977 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-xn9vx" Dec 11 10:06:30 crc kubenswrapper[4881]: I1211 10:06:30.734804 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-xn9vx"] Dec 11 10:06:30 crc kubenswrapper[4881]: W1211 10:06:30.735598 4881 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod368b103b_ef6b_4607_ae3b_8efd42f4ef3f.slice/crio-bbce788a63d46c88f2255c0b88d7413ef73358260808556ea88e7b6e3862753b WatchSource:0}: Error finding container bbce788a63d46c88f2255c0b88d7413ef73358260808556ea88e7b6e3862753b: Status 404 returned error can't find the container with id bbce788a63d46c88f2255c0b88d7413ef73358260808556ea88e7b6e3862753b Dec 11 10:06:31 crc kubenswrapper[4881]: I1211 10:06:31.097554 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-xn9vx" event={"ID":"368b103b-ef6b-4607-ae3b-8efd42f4ef3f","Type":"ContainerStarted","Data":"662fe83db5097489c64391350406c4c0e645c77548818f25bf3cec2cdf54d10e"} Dec 11 10:06:31 crc kubenswrapper[4881]: I1211 10:06:31.097963 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-xn9vx" event={"ID":"368b103b-ef6b-4607-ae3b-8efd42f4ef3f","Type":"ContainerStarted","Data":"bbce788a63d46c88f2255c0b88d7413ef73358260808556ea88e7b6e3862753b"} Dec 11 10:06:31 crc kubenswrapper[4881]: I1211 10:06:31.100842 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5jrrh" event={"ID":"d504e4dc-abb2-435a-ad46-09acdda9d84b","Type":"ContainerStarted","Data":"a293aad4334e06ed1bcac8eecd15457857922114f41ef9ae6af2b0028fc293eb"} Dec 11 10:06:31 crc kubenswrapper[4881]: I1211 10:06:31.449384 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-tl8kp"] Dec 11 10:06:31 crc kubenswrapper[4881]: I1211 10:06:31.457221 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-tl8kp" Dec 11 10:06:31 crc kubenswrapper[4881]: I1211 10:06:31.507965 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-tl8kp"] Dec 11 10:06:31 crc kubenswrapper[4881]: I1211 10:06:31.595366 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b9e52945-cd55-4085-80b8-e4c7418ebe7b-utilities\") pod \"redhat-marketplace-tl8kp\" (UID: \"b9e52945-cd55-4085-80b8-e4c7418ebe7b\") " pod="openshift-marketplace/redhat-marketplace-tl8kp" Dec 11 10:06:31 crc kubenswrapper[4881]: I1211 10:06:31.595438 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b9e52945-cd55-4085-80b8-e4c7418ebe7b-catalog-content\") pod \"redhat-marketplace-tl8kp\" (UID: \"b9e52945-cd55-4085-80b8-e4c7418ebe7b\") " pod="openshift-marketplace/redhat-marketplace-tl8kp" Dec 11 10:06:31 crc kubenswrapper[4881]: I1211 10:06:31.595543 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wfs5z\" (UniqueName: \"kubernetes.io/projected/b9e52945-cd55-4085-80b8-e4c7418ebe7b-kube-api-access-wfs5z\") pod \"redhat-marketplace-tl8kp\" (UID: \"b9e52945-cd55-4085-80b8-e4c7418ebe7b\") " pod="openshift-marketplace/redhat-marketplace-tl8kp" Dec 11 10:06:31 crc kubenswrapper[4881]: I1211 10:06:31.698271 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b9e52945-cd55-4085-80b8-e4c7418ebe7b-utilities\") pod \"redhat-marketplace-tl8kp\" (UID: \"b9e52945-cd55-4085-80b8-e4c7418ebe7b\") " pod="openshift-marketplace/redhat-marketplace-tl8kp" Dec 11 10:06:31 crc kubenswrapper[4881]: I1211 10:06:31.698356 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b9e52945-cd55-4085-80b8-e4c7418ebe7b-catalog-content\") pod \"redhat-marketplace-tl8kp\" (UID: \"b9e52945-cd55-4085-80b8-e4c7418ebe7b\") " pod="openshift-marketplace/redhat-marketplace-tl8kp" Dec 11 10:06:31 crc kubenswrapper[4881]: I1211 10:06:31.698435 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wfs5z\" (UniqueName: \"kubernetes.io/projected/b9e52945-cd55-4085-80b8-e4c7418ebe7b-kube-api-access-wfs5z\") pod \"redhat-marketplace-tl8kp\" (UID: \"b9e52945-cd55-4085-80b8-e4c7418ebe7b\") " pod="openshift-marketplace/redhat-marketplace-tl8kp" Dec 11 10:06:31 crc kubenswrapper[4881]: I1211 10:06:31.699376 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b9e52945-cd55-4085-80b8-e4c7418ebe7b-utilities\") pod \"redhat-marketplace-tl8kp\" (UID: \"b9e52945-cd55-4085-80b8-e4c7418ebe7b\") " pod="openshift-marketplace/redhat-marketplace-tl8kp" Dec 11 10:06:31 crc kubenswrapper[4881]: I1211 10:06:31.699656 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b9e52945-cd55-4085-80b8-e4c7418ebe7b-catalog-content\") pod \"redhat-marketplace-tl8kp\" (UID: \"b9e52945-cd55-4085-80b8-e4c7418ebe7b\") " pod="openshift-marketplace/redhat-marketplace-tl8kp" Dec 11 10:06:31 crc kubenswrapper[4881]: I1211 10:06:31.720135 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wfs5z\" (UniqueName: \"kubernetes.io/projected/b9e52945-cd55-4085-80b8-e4c7418ebe7b-kube-api-access-wfs5z\") pod \"redhat-marketplace-tl8kp\" (UID: \"b9e52945-cd55-4085-80b8-e4c7418ebe7b\") " pod="openshift-marketplace/redhat-marketplace-tl8kp" Dec 11 10:06:31 crc kubenswrapper[4881]: I1211 10:06:31.830814 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-tl8kp" Dec 11 10:06:32 crc kubenswrapper[4881]: I1211 10:06:32.121058 4881 generic.go:334] "Generic (PLEG): container finished" podID="d504e4dc-abb2-435a-ad46-09acdda9d84b" containerID="a293aad4334e06ed1bcac8eecd15457857922114f41ef9ae6af2b0028fc293eb" exitCode=0 Dec 11 10:06:32 crc kubenswrapper[4881]: I1211 10:06:32.124492 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5jrrh" event={"ID":"d504e4dc-abb2-435a-ad46-09acdda9d84b","Type":"ContainerDied","Data":"a293aad4334e06ed1bcac8eecd15457857922114f41ef9ae6af2b0028fc293eb"} Dec 11 10:06:32 crc kubenswrapper[4881]: I1211 10:06:32.129579 4881 generic.go:334] "Generic (PLEG): container finished" podID="368b103b-ef6b-4607-ae3b-8efd42f4ef3f" containerID="662fe83db5097489c64391350406c4c0e645c77548818f25bf3cec2cdf54d10e" exitCode=0 Dec 11 10:06:32 crc kubenswrapper[4881]: I1211 10:06:32.129619 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-xn9vx" event={"ID":"368b103b-ef6b-4607-ae3b-8efd42f4ef3f","Type":"ContainerDied","Data":"662fe83db5097489c64391350406c4c0e645c77548818f25bf3cec2cdf54d10e"} Dec 11 10:06:33 crc kubenswrapper[4881]: I1211 10:06:33.014710 4881 scope.go:117] "RemoveContainer" containerID="e1b977d6cf38148e3fb0a2375af7359048b9dcb9e237c41bd315b667b8afd334" Dec 11 10:06:33 crc kubenswrapper[4881]: E1211 10:06:33.016939 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 10:06:33 crc kubenswrapper[4881]: I1211 10:06:33.292305 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-tl8kp"] Dec 11 10:06:33 crc kubenswrapper[4881]: W1211 10:06:33.311389 4881 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb9e52945_cd55_4085_80b8_e4c7418ebe7b.slice/crio-3ee42c9c154c41cc8483809eef10a7505e4a14e2829ba354977c993b1d0dd4b0 WatchSource:0}: Error finding container 3ee42c9c154c41cc8483809eef10a7505e4a14e2829ba354977c993b1d0dd4b0: Status 404 returned error can't find the container with id 3ee42c9c154c41cc8483809eef10a7505e4a14e2829ba354977c993b1d0dd4b0 Dec 11 10:06:34 crc kubenswrapper[4881]: I1211 10:06:34.274726 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5jrrh" event={"ID":"d504e4dc-abb2-435a-ad46-09acdda9d84b","Type":"ContainerStarted","Data":"9e91b52ca0d89b2c3b4938dcd2bda6fb577c10b915300f942dd44b3c4a2c2905"} Dec 11 10:06:34 crc kubenswrapper[4881]: I1211 10:06:34.293192 4881 generic.go:334] "Generic (PLEG): container finished" podID="b9e52945-cd55-4085-80b8-e4c7418ebe7b" containerID="797849f855287f55e4047a4d11352a6b5a4fe856e9b67fe25fa1bfcb35196782" exitCode=0 Dec 11 10:06:34 crc kubenswrapper[4881]: I1211 10:06:34.293389 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-tl8kp" event={"ID":"b9e52945-cd55-4085-80b8-e4c7418ebe7b","Type":"ContainerDied","Data":"797849f855287f55e4047a4d11352a6b5a4fe856e9b67fe25fa1bfcb35196782"} Dec 11 10:06:34 crc kubenswrapper[4881]: I1211 10:06:34.293541 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-tl8kp" event={"ID":"b9e52945-cd55-4085-80b8-e4c7418ebe7b","Type":"ContainerStarted","Data":"3ee42c9c154c41cc8483809eef10a7505e4a14e2829ba354977c993b1d0dd4b0"} Dec 11 10:06:34 crc kubenswrapper[4881]: I1211 10:06:34.316247 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-5jrrh" podStartSLOduration=3.078239512 podStartE2EDuration="7.316224516s" podCreationTimestamp="2025-12-11 10:06:27 +0000 UTC" firstStartedPulling="2025-12-11 10:06:29.071946863 +0000 UTC m=+6637.449315560" lastFinishedPulling="2025-12-11 10:06:33.309931867 +0000 UTC m=+6641.687300564" observedRunningTime="2025-12-11 10:06:34.299254577 +0000 UTC m=+6642.676623284" watchObservedRunningTime="2025-12-11 10:06:34.316224516 +0000 UTC m=+6642.693593223" Dec 11 10:06:36 crc kubenswrapper[4881]: I1211 10:06:36.328909 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-tl8kp" event={"ID":"b9e52945-cd55-4085-80b8-e4c7418ebe7b","Type":"ContainerStarted","Data":"7d3e89a2075b64896d813284eafd1841d6dc460fa6902c6183ee2467f616bf5e"} Dec 11 10:06:37 crc kubenswrapper[4881]: I1211 10:06:37.544225 4881 generic.go:334] "Generic (PLEG): container finished" podID="b9e52945-cd55-4085-80b8-e4c7418ebe7b" containerID="7d3e89a2075b64896d813284eafd1841d6dc460fa6902c6183ee2467f616bf5e" exitCode=0 Dec 11 10:06:37 crc kubenswrapper[4881]: I1211 10:06:37.544465 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-tl8kp" event={"ID":"b9e52945-cd55-4085-80b8-e4c7418ebe7b","Type":"ContainerDied","Data":"7d3e89a2075b64896d813284eafd1841d6dc460fa6902c6183ee2467f616bf5e"} Dec 11 10:06:38 crc kubenswrapper[4881]: I1211 10:06:38.175541 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-5jrrh" Dec 11 10:06:38 crc kubenswrapper[4881]: I1211 10:06:38.175611 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-5jrrh" Dec 11 10:06:39 crc kubenswrapper[4881]: I1211 10:06:39.262561 4881 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/certified-operators-5jrrh" podUID="d504e4dc-abb2-435a-ad46-09acdda9d84b" containerName="registry-server" probeResult="failure" output=< Dec 11 10:06:39 crc kubenswrapper[4881]: timeout: failed to connect service ":50051" within 1s Dec 11 10:06:39 crc kubenswrapper[4881]: > Dec 11 10:06:41 crc kubenswrapper[4881]: I1211 10:06:41.592145 4881 generic.go:334] "Generic (PLEG): container finished" podID="368b103b-ef6b-4607-ae3b-8efd42f4ef3f" containerID="b46066213c6cc74a9fd17da9e3610a26ddbc4c9a970b22f826369177da2f0118" exitCode=0 Dec 11 10:06:41 crc kubenswrapper[4881]: I1211 10:06:41.592344 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-xn9vx" event={"ID":"368b103b-ef6b-4607-ae3b-8efd42f4ef3f","Type":"ContainerDied","Data":"b46066213c6cc74a9fd17da9e3610a26ddbc4c9a970b22f826369177da2f0118"} Dec 11 10:06:41 crc kubenswrapper[4881]: I1211 10:06:41.605325 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-tl8kp" event={"ID":"b9e52945-cd55-4085-80b8-e4c7418ebe7b","Type":"ContainerStarted","Data":"0ddb2b4c09d18b41e0d985e70a4340bf60aa329936e2ddb9d4803e158b2dc556"} Dec 11 10:06:41 crc kubenswrapper[4881]: I1211 10:06:41.656547 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-tl8kp" podStartSLOduration=4.402028482 podStartE2EDuration="10.656521819s" podCreationTimestamp="2025-12-11 10:06:31 +0000 UTC" firstStartedPulling="2025-12-11 10:06:34.297147336 +0000 UTC m=+6642.674516033" lastFinishedPulling="2025-12-11 10:06:40.551640673 +0000 UTC m=+6648.929009370" observedRunningTime="2025-12-11 10:06:41.645570199 +0000 UTC m=+6650.022939006" watchObservedRunningTime="2025-12-11 10:06:41.656521819 +0000 UTC m=+6650.033890526" Dec 11 10:06:41 crc kubenswrapper[4881]: I1211 10:06:41.832043 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-tl8kp" Dec 11 10:06:41 crc kubenswrapper[4881]: I1211 10:06:41.832119 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-tl8kp" Dec 11 10:06:42 crc kubenswrapper[4881]: I1211 10:06:42.621068 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-xn9vx" event={"ID":"368b103b-ef6b-4607-ae3b-8efd42f4ef3f","Type":"ContainerStarted","Data":"eaf41aba13c7c650644421ab0c57937c21daefd2ba1b338773b78463234c8572"} Dec 11 10:06:42 crc kubenswrapper[4881]: I1211 10:06:42.653350 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-xn9vx" podStartSLOduration=3.748188745 podStartE2EDuration="13.653310704s" podCreationTimestamp="2025-12-11 10:06:29 +0000 UTC" firstStartedPulling="2025-12-11 10:06:32.132662628 +0000 UTC m=+6640.510031325" lastFinishedPulling="2025-12-11 10:06:42.037784587 +0000 UTC m=+6650.415153284" observedRunningTime="2025-12-11 10:06:42.64096168 +0000 UTC m=+6651.018330397" watchObservedRunningTime="2025-12-11 10:06:42.653310704 +0000 UTC m=+6651.030679401" Dec 11 10:06:42 crc kubenswrapper[4881]: I1211 10:06:42.893419 4881 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-marketplace-tl8kp" podUID="b9e52945-cd55-4085-80b8-e4c7418ebe7b" containerName="registry-server" probeResult="failure" output=< Dec 11 10:06:42 crc kubenswrapper[4881]: timeout: failed to connect service ":50051" within 1s Dec 11 10:06:42 crc kubenswrapper[4881]: > Dec 11 10:06:47 crc kubenswrapper[4881]: I1211 10:06:47.005976 4881 scope.go:117] "RemoveContainer" containerID="e1b977d6cf38148e3fb0a2375af7359048b9dcb9e237c41bd315b667b8afd334" Dec 11 10:06:47 crc kubenswrapper[4881]: E1211 10:06:47.006773 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 10:06:48 crc kubenswrapper[4881]: I1211 10:06:48.236070 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-5jrrh" Dec 11 10:06:48 crc kubenswrapper[4881]: I1211 10:06:48.302027 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-5jrrh" Dec 11 10:06:50 crc kubenswrapper[4881]: I1211 10:06:50.181734 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-xn9vx" Dec 11 10:06:50 crc kubenswrapper[4881]: I1211 10:06:50.182640 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-xn9vx" Dec 11 10:06:50 crc kubenswrapper[4881]: I1211 10:06:50.232317 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-xn9vx" Dec 11 10:06:50 crc kubenswrapper[4881]: I1211 10:06:50.811640 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-xn9vx" Dec 11 10:06:51 crc kubenswrapper[4881]: I1211 10:06:51.886624 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-tl8kp" Dec 11 10:06:51 crc kubenswrapper[4881]: I1211 10:06:51.943783 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-tl8kp" Dec 11 10:06:52 crc kubenswrapper[4881]: I1211 10:06:52.829674 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-5jrrh"] Dec 11 10:06:52 crc kubenswrapper[4881]: I1211 10:06:52.829944 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-5jrrh" podUID="d504e4dc-abb2-435a-ad46-09acdda9d84b" containerName="registry-server" containerID="cri-o://9e91b52ca0d89b2c3b4938dcd2bda6fb577c10b915300f942dd44b3c4a2c2905" gracePeriod=2 Dec 11 10:06:53 crc kubenswrapper[4881]: I1211 10:06:53.421779 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-5jrrh" Dec 11 10:06:53 crc kubenswrapper[4881]: I1211 10:06:53.479588 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d504e4dc-abb2-435a-ad46-09acdda9d84b-catalog-content\") pod \"d504e4dc-abb2-435a-ad46-09acdda9d84b\" (UID: \"d504e4dc-abb2-435a-ad46-09acdda9d84b\") " Dec 11 10:06:53 crc kubenswrapper[4881]: I1211 10:06:53.479706 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-t26ng\" (UniqueName: \"kubernetes.io/projected/d504e4dc-abb2-435a-ad46-09acdda9d84b-kube-api-access-t26ng\") pod \"d504e4dc-abb2-435a-ad46-09acdda9d84b\" (UID: \"d504e4dc-abb2-435a-ad46-09acdda9d84b\") " Dec 11 10:06:53 crc kubenswrapper[4881]: I1211 10:06:53.479880 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d504e4dc-abb2-435a-ad46-09acdda9d84b-utilities\") pod \"d504e4dc-abb2-435a-ad46-09acdda9d84b\" (UID: \"d504e4dc-abb2-435a-ad46-09acdda9d84b\") " Dec 11 10:06:53 crc kubenswrapper[4881]: I1211 10:06:53.481498 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d504e4dc-abb2-435a-ad46-09acdda9d84b-utilities" (OuterVolumeSpecName: "utilities") pod "d504e4dc-abb2-435a-ad46-09acdda9d84b" (UID: "d504e4dc-abb2-435a-ad46-09acdda9d84b"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 10:06:53 crc kubenswrapper[4881]: I1211 10:06:53.488107 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d504e4dc-abb2-435a-ad46-09acdda9d84b-kube-api-access-t26ng" (OuterVolumeSpecName: "kube-api-access-t26ng") pod "d504e4dc-abb2-435a-ad46-09acdda9d84b" (UID: "d504e4dc-abb2-435a-ad46-09acdda9d84b"). InnerVolumeSpecName "kube-api-access-t26ng". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:06:53 crc kubenswrapper[4881]: I1211 10:06:53.542543 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d504e4dc-abb2-435a-ad46-09acdda9d84b-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "d504e4dc-abb2-435a-ad46-09acdda9d84b" (UID: "d504e4dc-abb2-435a-ad46-09acdda9d84b"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 10:06:53 crc kubenswrapper[4881]: I1211 10:06:53.583439 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-t26ng\" (UniqueName: \"kubernetes.io/projected/d504e4dc-abb2-435a-ad46-09acdda9d84b-kube-api-access-t26ng\") on node \"crc\" DevicePath \"\"" Dec 11 10:06:53 crc kubenswrapper[4881]: I1211 10:06:53.583498 4881 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d504e4dc-abb2-435a-ad46-09acdda9d84b-utilities\") on node \"crc\" DevicePath \"\"" Dec 11 10:06:53 crc kubenswrapper[4881]: I1211 10:06:53.583511 4881 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d504e4dc-abb2-435a-ad46-09acdda9d84b-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 11 10:06:53 crc kubenswrapper[4881]: I1211 10:06:53.790402 4881 generic.go:334] "Generic (PLEG): container finished" podID="d504e4dc-abb2-435a-ad46-09acdda9d84b" containerID="9e91b52ca0d89b2c3b4938dcd2bda6fb577c10b915300f942dd44b3c4a2c2905" exitCode=0 Dec 11 10:06:53 crc kubenswrapper[4881]: I1211 10:06:53.790523 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5jrrh" event={"ID":"d504e4dc-abb2-435a-ad46-09acdda9d84b","Type":"ContainerDied","Data":"9e91b52ca0d89b2c3b4938dcd2bda6fb577c10b915300f942dd44b3c4a2c2905"} Dec 11 10:06:53 crc kubenswrapper[4881]: I1211 10:06:53.790808 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5jrrh" event={"ID":"d504e4dc-abb2-435a-ad46-09acdda9d84b","Type":"ContainerDied","Data":"056ecb4974ebad3e652dfa16a77ccc166f7b4f3c691c21caf17faad7641e4888"} Dec 11 10:06:53 crc kubenswrapper[4881]: I1211 10:06:53.790661 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-5jrrh" Dec 11 10:06:53 crc kubenswrapper[4881]: I1211 10:06:53.790860 4881 scope.go:117] "RemoveContainer" containerID="9e91b52ca0d89b2c3b4938dcd2bda6fb577c10b915300f942dd44b3c4a2c2905" Dec 11 10:06:53 crc kubenswrapper[4881]: I1211 10:06:53.832233 4881 scope.go:117] "RemoveContainer" containerID="a293aad4334e06ed1bcac8eecd15457857922114f41ef9ae6af2b0028fc293eb" Dec 11 10:06:53 crc kubenswrapper[4881]: I1211 10:06:53.839294 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-5jrrh"] Dec 11 10:06:53 crc kubenswrapper[4881]: I1211 10:06:53.851471 4881 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-5jrrh"] Dec 11 10:06:53 crc kubenswrapper[4881]: I1211 10:06:53.856571 4881 scope.go:117] "RemoveContainer" containerID="e70d64b0f49c6945862bb893bc215556582cf4134444599048b602ce34e6dc03" Dec 11 10:06:53 crc kubenswrapper[4881]: I1211 10:06:53.921073 4881 scope.go:117] "RemoveContainer" containerID="9e91b52ca0d89b2c3b4938dcd2bda6fb577c10b915300f942dd44b3c4a2c2905" Dec 11 10:06:53 crc kubenswrapper[4881]: E1211 10:06:53.922873 4881 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9e91b52ca0d89b2c3b4938dcd2bda6fb577c10b915300f942dd44b3c4a2c2905\": container with ID starting with 9e91b52ca0d89b2c3b4938dcd2bda6fb577c10b915300f942dd44b3c4a2c2905 not found: ID does not exist" containerID="9e91b52ca0d89b2c3b4938dcd2bda6fb577c10b915300f942dd44b3c4a2c2905" Dec 11 10:06:53 crc kubenswrapper[4881]: I1211 10:06:53.922910 4881 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9e91b52ca0d89b2c3b4938dcd2bda6fb577c10b915300f942dd44b3c4a2c2905"} err="failed to get container status \"9e91b52ca0d89b2c3b4938dcd2bda6fb577c10b915300f942dd44b3c4a2c2905\": rpc error: code = NotFound desc = could not find container \"9e91b52ca0d89b2c3b4938dcd2bda6fb577c10b915300f942dd44b3c4a2c2905\": container with ID starting with 9e91b52ca0d89b2c3b4938dcd2bda6fb577c10b915300f942dd44b3c4a2c2905 not found: ID does not exist" Dec 11 10:06:53 crc kubenswrapper[4881]: I1211 10:06:53.922942 4881 scope.go:117] "RemoveContainer" containerID="a293aad4334e06ed1bcac8eecd15457857922114f41ef9ae6af2b0028fc293eb" Dec 11 10:06:53 crc kubenswrapper[4881]: E1211 10:06:53.923434 4881 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a293aad4334e06ed1bcac8eecd15457857922114f41ef9ae6af2b0028fc293eb\": container with ID starting with a293aad4334e06ed1bcac8eecd15457857922114f41ef9ae6af2b0028fc293eb not found: ID does not exist" containerID="a293aad4334e06ed1bcac8eecd15457857922114f41ef9ae6af2b0028fc293eb" Dec 11 10:06:53 crc kubenswrapper[4881]: I1211 10:06:53.923455 4881 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a293aad4334e06ed1bcac8eecd15457857922114f41ef9ae6af2b0028fc293eb"} err="failed to get container status \"a293aad4334e06ed1bcac8eecd15457857922114f41ef9ae6af2b0028fc293eb\": rpc error: code = NotFound desc = could not find container \"a293aad4334e06ed1bcac8eecd15457857922114f41ef9ae6af2b0028fc293eb\": container with ID starting with a293aad4334e06ed1bcac8eecd15457857922114f41ef9ae6af2b0028fc293eb not found: ID does not exist" Dec 11 10:06:53 crc kubenswrapper[4881]: I1211 10:06:53.923468 4881 scope.go:117] "RemoveContainer" containerID="e70d64b0f49c6945862bb893bc215556582cf4134444599048b602ce34e6dc03" Dec 11 10:06:53 crc kubenswrapper[4881]: E1211 10:06:53.923876 4881 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e70d64b0f49c6945862bb893bc215556582cf4134444599048b602ce34e6dc03\": container with ID starting with e70d64b0f49c6945862bb893bc215556582cf4134444599048b602ce34e6dc03 not found: ID does not exist" containerID="e70d64b0f49c6945862bb893bc215556582cf4134444599048b602ce34e6dc03" Dec 11 10:06:53 crc kubenswrapper[4881]: I1211 10:06:53.923897 4881 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e70d64b0f49c6945862bb893bc215556582cf4134444599048b602ce34e6dc03"} err="failed to get container status \"e70d64b0f49c6945862bb893bc215556582cf4134444599048b602ce34e6dc03\": rpc error: code = NotFound desc = could not find container \"e70d64b0f49c6945862bb893bc215556582cf4134444599048b602ce34e6dc03\": container with ID starting with e70d64b0f49c6945862bb893bc215556582cf4134444599048b602ce34e6dc03 not found: ID does not exist" Dec 11 10:06:54 crc kubenswrapper[4881]: I1211 10:06:54.258936 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-xn9vx"] Dec 11 10:06:55 crc kubenswrapper[4881]: I1211 10:06:55.019763 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d504e4dc-abb2-435a-ad46-09acdda9d84b" path="/var/lib/kubelet/pods/d504e4dc-abb2-435a-ad46-09acdda9d84b/volumes" Dec 11 10:06:55 crc kubenswrapper[4881]: I1211 10:06:55.030110 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-74d77"] Dec 11 10:06:55 crc kubenswrapper[4881]: I1211 10:06:55.030376 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-74d77" podUID="99c7c976-c996-4b9c-a389-fbaed3f71813" containerName="registry-server" containerID="cri-o://a89270ffab1342367f2a43c0ce2a03e3b2cfb5489d5a9906587aba0c29c2d4cb" gracePeriod=2 Dec 11 10:06:55 crc kubenswrapper[4881]: I1211 10:06:55.615796 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-74d77" Dec 11 10:06:55 crc kubenswrapper[4881]: I1211 10:06:55.636112 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-c762h\" (UniqueName: \"kubernetes.io/projected/99c7c976-c996-4b9c-a389-fbaed3f71813-kube-api-access-c762h\") pod \"99c7c976-c996-4b9c-a389-fbaed3f71813\" (UID: \"99c7c976-c996-4b9c-a389-fbaed3f71813\") " Dec 11 10:06:55 crc kubenswrapper[4881]: I1211 10:06:55.636422 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/99c7c976-c996-4b9c-a389-fbaed3f71813-utilities\") pod \"99c7c976-c996-4b9c-a389-fbaed3f71813\" (UID: \"99c7c976-c996-4b9c-a389-fbaed3f71813\") " Dec 11 10:06:55 crc kubenswrapper[4881]: I1211 10:06:55.636532 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/99c7c976-c996-4b9c-a389-fbaed3f71813-catalog-content\") pod \"99c7c976-c996-4b9c-a389-fbaed3f71813\" (UID: \"99c7c976-c996-4b9c-a389-fbaed3f71813\") " Dec 11 10:06:55 crc kubenswrapper[4881]: I1211 10:06:55.642643 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/99c7c976-c996-4b9c-a389-fbaed3f71813-utilities" (OuterVolumeSpecName: "utilities") pod "99c7c976-c996-4b9c-a389-fbaed3f71813" (UID: "99c7c976-c996-4b9c-a389-fbaed3f71813"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 10:06:55 crc kubenswrapper[4881]: I1211 10:06:55.670963 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/99c7c976-c996-4b9c-a389-fbaed3f71813-kube-api-access-c762h" (OuterVolumeSpecName: "kube-api-access-c762h") pod "99c7c976-c996-4b9c-a389-fbaed3f71813" (UID: "99c7c976-c996-4b9c-a389-fbaed3f71813"). InnerVolumeSpecName "kube-api-access-c762h". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:06:55 crc kubenswrapper[4881]: I1211 10:06:55.744206 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-c762h\" (UniqueName: \"kubernetes.io/projected/99c7c976-c996-4b9c-a389-fbaed3f71813-kube-api-access-c762h\") on node \"crc\" DevicePath \"\"" Dec 11 10:06:55 crc kubenswrapper[4881]: I1211 10:06:55.744245 4881 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/99c7c976-c996-4b9c-a389-fbaed3f71813-utilities\") on node \"crc\" DevicePath \"\"" Dec 11 10:06:55 crc kubenswrapper[4881]: I1211 10:06:55.746054 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/99c7c976-c996-4b9c-a389-fbaed3f71813-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "99c7c976-c996-4b9c-a389-fbaed3f71813" (UID: "99c7c976-c996-4b9c-a389-fbaed3f71813"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 10:06:55 crc kubenswrapper[4881]: I1211 10:06:55.847139 4881 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/99c7c976-c996-4b9c-a389-fbaed3f71813-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 11 10:06:55 crc kubenswrapper[4881]: I1211 10:06:55.847381 4881 generic.go:334] "Generic (PLEG): container finished" podID="99c7c976-c996-4b9c-a389-fbaed3f71813" containerID="a89270ffab1342367f2a43c0ce2a03e3b2cfb5489d5a9906587aba0c29c2d4cb" exitCode=0 Dec 11 10:06:55 crc kubenswrapper[4881]: I1211 10:06:55.847435 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-74d77" event={"ID":"99c7c976-c996-4b9c-a389-fbaed3f71813","Type":"ContainerDied","Data":"a89270ffab1342367f2a43c0ce2a03e3b2cfb5489d5a9906587aba0c29c2d4cb"} Dec 11 10:06:55 crc kubenswrapper[4881]: I1211 10:06:55.847470 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-74d77" event={"ID":"99c7c976-c996-4b9c-a389-fbaed3f71813","Type":"ContainerDied","Data":"d413efc388b920ce3776847e721235231909ab5db30b62e963244dce8957fdac"} Dec 11 10:06:55 crc kubenswrapper[4881]: I1211 10:06:55.847494 4881 scope.go:117] "RemoveContainer" containerID="a89270ffab1342367f2a43c0ce2a03e3b2cfb5489d5a9906587aba0c29c2d4cb" Dec 11 10:06:55 crc kubenswrapper[4881]: I1211 10:06:55.847690 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-74d77" Dec 11 10:06:55 crc kubenswrapper[4881]: I1211 10:06:55.891661 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-74d77"] Dec 11 10:06:55 crc kubenswrapper[4881]: I1211 10:06:55.907949 4881 scope.go:117] "RemoveContainer" containerID="0016d202d2ad187d252c55619cf31b1d41227d846eabdddcb2f567c6c4fc1195" Dec 11 10:06:55 crc kubenswrapper[4881]: I1211 10:06:55.915598 4881 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-74d77"] Dec 11 10:06:55 crc kubenswrapper[4881]: I1211 10:06:55.949014 4881 scope.go:117] "RemoveContainer" containerID="6dd5d132b784d3186eace1f04b9c8bdf809f11d5763cfe20ec46f99469121fc9" Dec 11 10:06:56 crc kubenswrapper[4881]: I1211 10:06:56.007863 4881 scope.go:117] "RemoveContainer" containerID="a89270ffab1342367f2a43c0ce2a03e3b2cfb5489d5a9906587aba0c29c2d4cb" Dec 11 10:06:56 crc kubenswrapper[4881]: E1211 10:06:56.008235 4881 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a89270ffab1342367f2a43c0ce2a03e3b2cfb5489d5a9906587aba0c29c2d4cb\": container with ID starting with a89270ffab1342367f2a43c0ce2a03e3b2cfb5489d5a9906587aba0c29c2d4cb not found: ID does not exist" containerID="a89270ffab1342367f2a43c0ce2a03e3b2cfb5489d5a9906587aba0c29c2d4cb" Dec 11 10:06:56 crc kubenswrapper[4881]: I1211 10:06:56.008276 4881 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a89270ffab1342367f2a43c0ce2a03e3b2cfb5489d5a9906587aba0c29c2d4cb"} err="failed to get container status \"a89270ffab1342367f2a43c0ce2a03e3b2cfb5489d5a9906587aba0c29c2d4cb\": rpc error: code = NotFound desc = could not find container \"a89270ffab1342367f2a43c0ce2a03e3b2cfb5489d5a9906587aba0c29c2d4cb\": container with ID starting with a89270ffab1342367f2a43c0ce2a03e3b2cfb5489d5a9906587aba0c29c2d4cb not found: ID does not exist" Dec 11 10:06:56 crc kubenswrapper[4881]: I1211 10:06:56.008300 4881 scope.go:117] "RemoveContainer" containerID="0016d202d2ad187d252c55619cf31b1d41227d846eabdddcb2f567c6c4fc1195" Dec 11 10:06:56 crc kubenswrapper[4881]: E1211 10:06:56.008642 4881 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0016d202d2ad187d252c55619cf31b1d41227d846eabdddcb2f567c6c4fc1195\": container with ID starting with 0016d202d2ad187d252c55619cf31b1d41227d846eabdddcb2f567c6c4fc1195 not found: ID does not exist" containerID="0016d202d2ad187d252c55619cf31b1d41227d846eabdddcb2f567c6c4fc1195" Dec 11 10:06:56 crc kubenswrapper[4881]: I1211 10:06:56.008681 4881 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0016d202d2ad187d252c55619cf31b1d41227d846eabdddcb2f567c6c4fc1195"} err="failed to get container status \"0016d202d2ad187d252c55619cf31b1d41227d846eabdddcb2f567c6c4fc1195\": rpc error: code = NotFound desc = could not find container \"0016d202d2ad187d252c55619cf31b1d41227d846eabdddcb2f567c6c4fc1195\": container with ID starting with 0016d202d2ad187d252c55619cf31b1d41227d846eabdddcb2f567c6c4fc1195 not found: ID does not exist" Dec 11 10:06:56 crc kubenswrapper[4881]: I1211 10:06:56.008705 4881 scope.go:117] "RemoveContainer" containerID="6dd5d132b784d3186eace1f04b9c8bdf809f11d5763cfe20ec46f99469121fc9" Dec 11 10:06:56 crc kubenswrapper[4881]: E1211 10:06:56.009088 4881 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6dd5d132b784d3186eace1f04b9c8bdf809f11d5763cfe20ec46f99469121fc9\": container with ID starting with 6dd5d132b784d3186eace1f04b9c8bdf809f11d5763cfe20ec46f99469121fc9 not found: ID does not exist" containerID="6dd5d132b784d3186eace1f04b9c8bdf809f11d5763cfe20ec46f99469121fc9" Dec 11 10:06:56 crc kubenswrapper[4881]: I1211 10:06:56.009113 4881 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6dd5d132b784d3186eace1f04b9c8bdf809f11d5763cfe20ec46f99469121fc9"} err="failed to get container status \"6dd5d132b784d3186eace1f04b9c8bdf809f11d5763cfe20ec46f99469121fc9\": rpc error: code = NotFound desc = could not find container \"6dd5d132b784d3186eace1f04b9c8bdf809f11d5763cfe20ec46f99469121fc9\": container with ID starting with 6dd5d132b784d3186eace1f04b9c8bdf809f11d5763cfe20ec46f99469121fc9 not found: ID does not exist" Dec 11 10:06:57 crc kubenswrapper[4881]: I1211 10:06:57.020978 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="99c7c976-c996-4b9c-a389-fbaed3f71813" path="/var/lib/kubelet/pods/99c7c976-c996-4b9c-a389-fbaed3f71813/volumes" Dec 11 10:06:57 crc kubenswrapper[4881]: I1211 10:06:57.654103 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-tl8kp"] Dec 11 10:06:57 crc kubenswrapper[4881]: I1211 10:06:57.654720 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-tl8kp" podUID="b9e52945-cd55-4085-80b8-e4c7418ebe7b" containerName="registry-server" containerID="cri-o://0ddb2b4c09d18b41e0d985e70a4340bf60aa329936e2ddb9d4803e158b2dc556" gracePeriod=2 Dec 11 10:06:57 crc kubenswrapper[4881]: I1211 10:06:57.875379 4881 generic.go:334] "Generic (PLEG): container finished" podID="b9e52945-cd55-4085-80b8-e4c7418ebe7b" containerID="0ddb2b4c09d18b41e0d985e70a4340bf60aa329936e2ddb9d4803e158b2dc556" exitCode=0 Dec 11 10:06:57 crc kubenswrapper[4881]: I1211 10:06:57.875435 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-tl8kp" event={"ID":"b9e52945-cd55-4085-80b8-e4c7418ebe7b","Type":"ContainerDied","Data":"0ddb2b4c09d18b41e0d985e70a4340bf60aa329936e2ddb9d4803e158b2dc556"} Dec 11 10:06:58 crc kubenswrapper[4881]: I1211 10:06:58.267748 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-tl8kp" Dec 11 10:06:58 crc kubenswrapper[4881]: I1211 10:06:58.406913 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b9e52945-cd55-4085-80b8-e4c7418ebe7b-catalog-content\") pod \"b9e52945-cd55-4085-80b8-e4c7418ebe7b\" (UID: \"b9e52945-cd55-4085-80b8-e4c7418ebe7b\") " Dec 11 10:06:58 crc kubenswrapper[4881]: I1211 10:06:58.407628 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b9e52945-cd55-4085-80b8-e4c7418ebe7b-utilities" (OuterVolumeSpecName: "utilities") pod "b9e52945-cd55-4085-80b8-e4c7418ebe7b" (UID: "b9e52945-cd55-4085-80b8-e4c7418ebe7b"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 10:06:58 crc kubenswrapper[4881]: I1211 10:06:58.408174 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b9e52945-cd55-4085-80b8-e4c7418ebe7b-utilities\") pod \"b9e52945-cd55-4085-80b8-e4c7418ebe7b\" (UID: \"b9e52945-cd55-4085-80b8-e4c7418ebe7b\") " Dec 11 10:06:58 crc kubenswrapper[4881]: I1211 10:06:58.408416 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wfs5z\" (UniqueName: \"kubernetes.io/projected/b9e52945-cd55-4085-80b8-e4c7418ebe7b-kube-api-access-wfs5z\") pod \"b9e52945-cd55-4085-80b8-e4c7418ebe7b\" (UID: \"b9e52945-cd55-4085-80b8-e4c7418ebe7b\") " Dec 11 10:06:58 crc kubenswrapper[4881]: I1211 10:06:58.410022 4881 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b9e52945-cd55-4085-80b8-e4c7418ebe7b-utilities\") on node \"crc\" DevicePath \"\"" Dec 11 10:06:58 crc kubenswrapper[4881]: I1211 10:06:58.416609 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b9e52945-cd55-4085-80b8-e4c7418ebe7b-kube-api-access-wfs5z" (OuterVolumeSpecName: "kube-api-access-wfs5z") pod "b9e52945-cd55-4085-80b8-e4c7418ebe7b" (UID: "b9e52945-cd55-4085-80b8-e4c7418ebe7b"). InnerVolumeSpecName "kube-api-access-wfs5z". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:06:58 crc kubenswrapper[4881]: I1211 10:06:58.429778 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b9e52945-cd55-4085-80b8-e4c7418ebe7b-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b9e52945-cd55-4085-80b8-e4c7418ebe7b" (UID: "b9e52945-cd55-4085-80b8-e4c7418ebe7b"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 10:06:58 crc kubenswrapper[4881]: I1211 10:06:58.511903 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wfs5z\" (UniqueName: \"kubernetes.io/projected/b9e52945-cd55-4085-80b8-e4c7418ebe7b-kube-api-access-wfs5z\") on node \"crc\" DevicePath \"\"" Dec 11 10:06:58 crc kubenswrapper[4881]: I1211 10:06:58.511938 4881 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b9e52945-cd55-4085-80b8-e4c7418ebe7b-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 11 10:06:58 crc kubenswrapper[4881]: I1211 10:06:58.890897 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-tl8kp" event={"ID":"b9e52945-cd55-4085-80b8-e4c7418ebe7b","Type":"ContainerDied","Data":"3ee42c9c154c41cc8483809eef10a7505e4a14e2829ba354977c993b1d0dd4b0"} Dec 11 10:06:58 crc kubenswrapper[4881]: I1211 10:06:58.890953 4881 scope.go:117] "RemoveContainer" containerID="0ddb2b4c09d18b41e0d985e70a4340bf60aa329936e2ddb9d4803e158b2dc556" Dec 11 10:06:58 crc kubenswrapper[4881]: I1211 10:06:58.891090 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-tl8kp" Dec 11 10:06:58 crc kubenswrapper[4881]: I1211 10:06:58.932047 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-tl8kp"] Dec 11 10:06:58 crc kubenswrapper[4881]: I1211 10:06:58.933046 4881 scope.go:117] "RemoveContainer" containerID="7d3e89a2075b64896d813284eafd1841d6dc460fa6902c6183ee2467f616bf5e" Dec 11 10:06:58 crc kubenswrapper[4881]: I1211 10:06:58.945582 4881 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-tl8kp"] Dec 11 10:06:58 crc kubenswrapper[4881]: I1211 10:06:58.959966 4881 scope.go:117] "RemoveContainer" containerID="797849f855287f55e4047a4d11352a6b5a4fe856e9b67fe25fa1bfcb35196782" Dec 11 10:06:59 crc kubenswrapper[4881]: I1211 10:06:59.017895 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b9e52945-cd55-4085-80b8-e4c7418ebe7b" path="/var/lib/kubelet/pods/b9e52945-cd55-4085-80b8-e4c7418ebe7b/volumes" Dec 11 10:07:02 crc kubenswrapper[4881]: I1211 10:07:02.006112 4881 scope.go:117] "RemoveContainer" containerID="e1b977d6cf38148e3fb0a2375af7359048b9dcb9e237c41bd315b667b8afd334" Dec 11 10:07:02 crc kubenswrapper[4881]: E1211 10:07:02.007175 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 10:07:06 crc kubenswrapper[4881]: I1211 10:07:06.006197 4881 generic.go:334] "Generic (PLEG): container finished" podID="8c355bf0-389d-4d9a-a9b3-e4b2856b0bec" containerID="98b44f760930dd1abfd374422c1eda877139256e8111b6b1442ce6dcb08117ae" exitCode=0 Dec 11 10:07:06 crc kubenswrapper[4881]: I1211 10:07:06.006521 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-6ndjj/must-gather-q79pd" event={"ID":"8c355bf0-389d-4d9a-a9b3-e4b2856b0bec","Type":"ContainerDied","Data":"98b44f760930dd1abfd374422c1eda877139256e8111b6b1442ce6dcb08117ae"} Dec 11 10:07:06 crc kubenswrapper[4881]: I1211 10:07:06.007513 4881 scope.go:117] "RemoveContainer" containerID="98b44f760930dd1abfd374422c1eda877139256e8111b6b1442ce6dcb08117ae" Dec 11 10:07:06 crc kubenswrapper[4881]: I1211 10:07:06.520619 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-6ndjj_must-gather-q79pd_8c355bf0-389d-4d9a-a9b3-e4b2856b0bec/gather/0.log" Dec 11 10:07:14 crc kubenswrapper[4881]: I1211 10:07:14.121328 4881 scope.go:117] "RemoveContainer" containerID="58d0d316cd4cc921f5269a28c9e8243f103e11a9dd54777c42d900a6c589f5ab" Dec 11 10:07:15 crc kubenswrapper[4881]: I1211 10:07:15.006142 4881 scope.go:117] "RemoveContainer" containerID="e1b977d6cf38148e3fb0a2375af7359048b9dcb9e237c41bd315b667b8afd334" Dec 11 10:07:15 crc kubenswrapper[4881]: E1211 10:07:15.006858 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 10:07:16 crc kubenswrapper[4881]: I1211 10:07:16.120657 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-6ndjj/must-gather-q79pd"] Dec 11 10:07:16 crc kubenswrapper[4881]: I1211 10:07:16.121013 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-must-gather-6ndjj/must-gather-q79pd" podUID="8c355bf0-389d-4d9a-a9b3-e4b2856b0bec" containerName="copy" containerID="cri-o://c195c7f5efedded7ed78f8b661299b794546b44e285d8eaf71b56f319401168b" gracePeriod=2 Dec 11 10:07:16 crc kubenswrapper[4881]: I1211 10:07:16.141208 4881 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-6ndjj/must-gather-q79pd"] Dec 11 10:07:16 crc kubenswrapper[4881]: I1211 10:07:16.702882 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-6ndjj_must-gather-q79pd_8c355bf0-389d-4d9a-a9b3-e4b2856b0bec/copy/0.log" Dec 11 10:07:16 crc kubenswrapper[4881]: I1211 10:07:16.703610 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-6ndjj/must-gather-q79pd" Dec 11 10:07:16 crc kubenswrapper[4881]: I1211 10:07:16.811032 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vhvfr\" (UniqueName: \"kubernetes.io/projected/8c355bf0-389d-4d9a-a9b3-e4b2856b0bec-kube-api-access-vhvfr\") pod \"8c355bf0-389d-4d9a-a9b3-e4b2856b0bec\" (UID: \"8c355bf0-389d-4d9a-a9b3-e4b2856b0bec\") " Dec 11 10:07:16 crc kubenswrapper[4881]: I1211 10:07:16.811122 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/8c355bf0-389d-4d9a-a9b3-e4b2856b0bec-must-gather-output\") pod \"8c355bf0-389d-4d9a-a9b3-e4b2856b0bec\" (UID: \"8c355bf0-389d-4d9a-a9b3-e4b2856b0bec\") " Dec 11 10:07:16 crc kubenswrapper[4881]: I1211 10:07:16.821726 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8c355bf0-389d-4d9a-a9b3-e4b2856b0bec-kube-api-access-vhvfr" (OuterVolumeSpecName: "kube-api-access-vhvfr") pod "8c355bf0-389d-4d9a-a9b3-e4b2856b0bec" (UID: "8c355bf0-389d-4d9a-a9b3-e4b2856b0bec"). InnerVolumeSpecName "kube-api-access-vhvfr". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:07:16 crc kubenswrapper[4881]: I1211 10:07:16.914758 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vhvfr\" (UniqueName: \"kubernetes.io/projected/8c355bf0-389d-4d9a-a9b3-e4b2856b0bec-kube-api-access-vhvfr\") on node \"crc\" DevicePath \"\"" Dec 11 10:07:17 crc kubenswrapper[4881]: I1211 10:07:17.007012 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8c355bf0-389d-4d9a-a9b3-e4b2856b0bec-must-gather-output" (OuterVolumeSpecName: "must-gather-output") pod "8c355bf0-389d-4d9a-a9b3-e4b2856b0bec" (UID: "8c355bf0-389d-4d9a-a9b3-e4b2856b0bec"). InnerVolumeSpecName "must-gather-output". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 10:07:17 crc kubenswrapper[4881]: I1211 10:07:17.017389 4881 reconciler_common.go:293] "Volume detached for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/8c355bf0-389d-4d9a-a9b3-e4b2856b0bec-must-gather-output\") on node \"crc\" DevicePath \"\"" Dec 11 10:07:17 crc kubenswrapper[4881]: I1211 10:07:17.024520 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8c355bf0-389d-4d9a-a9b3-e4b2856b0bec" path="/var/lib/kubelet/pods/8c355bf0-389d-4d9a-a9b3-e4b2856b0bec/volumes" Dec 11 10:07:17 crc kubenswrapper[4881]: I1211 10:07:17.136496 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-6ndjj_must-gather-q79pd_8c355bf0-389d-4d9a-a9b3-e4b2856b0bec/copy/0.log" Dec 11 10:07:17 crc kubenswrapper[4881]: I1211 10:07:17.136931 4881 generic.go:334] "Generic (PLEG): container finished" podID="8c355bf0-389d-4d9a-a9b3-e4b2856b0bec" containerID="c195c7f5efedded7ed78f8b661299b794546b44e285d8eaf71b56f319401168b" exitCode=143 Dec 11 10:07:17 crc kubenswrapper[4881]: I1211 10:07:17.136986 4881 scope.go:117] "RemoveContainer" containerID="c195c7f5efedded7ed78f8b661299b794546b44e285d8eaf71b56f319401168b" Dec 11 10:07:17 crc kubenswrapper[4881]: I1211 10:07:17.137004 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-6ndjj/must-gather-q79pd" Dec 11 10:07:17 crc kubenswrapper[4881]: I1211 10:07:17.167190 4881 scope.go:117] "RemoveContainer" containerID="98b44f760930dd1abfd374422c1eda877139256e8111b6b1442ce6dcb08117ae" Dec 11 10:07:17 crc kubenswrapper[4881]: I1211 10:07:17.221775 4881 scope.go:117] "RemoveContainer" containerID="c195c7f5efedded7ed78f8b661299b794546b44e285d8eaf71b56f319401168b" Dec 11 10:07:17 crc kubenswrapper[4881]: E1211 10:07:17.222322 4881 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c195c7f5efedded7ed78f8b661299b794546b44e285d8eaf71b56f319401168b\": container with ID starting with c195c7f5efedded7ed78f8b661299b794546b44e285d8eaf71b56f319401168b not found: ID does not exist" containerID="c195c7f5efedded7ed78f8b661299b794546b44e285d8eaf71b56f319401168b" Dec 11 10:07:17 crc kubenswrapper[4881]: I1211 10:07:17.222396 4881 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c195c7f5efedded7ed78f8b661299b794546b44e285d8eaf71b56f319401168b"} err="failed to get container status \"c195c7f5efedded7ed78f8b661299b794546b44e285d8eaf71b56f319401168b\": rpc error: code = NotFound desc = could not find container \"c195c7f5efedded7ed78f8b661299b794546b44e285d8eaf71b56f319401168b\": container with ID starting with c195c7f5efedded7ed78f8b661299b794546b44e285d8eaf71b56f319401168b not found: ID does not exist" Dec 11 10:07:17 crc kubenswrapper[4881]: I1211 10:07:17.222429 4881 scope.go:117] "RemoveContainer" containerID="98b44f760930dd1abfd374422c1eda877139256e8111b6b1442ce6dcb08117ae" Dec 11 10:07:17 crc kubenswrapper[4881]: E1211 10:07:17.222820 4881 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"98b44f760930dd1abfd374422c1eda877139256e8111b6b1442ce6dcb08117ae\": container with ID starting with 98b44f760930dd1abfd374422c1eda877139256e8111b6b1442ce6dcb08117ae not found: ID does not exist" containerID="98b44f760930dd1abfd374422c1eda877139256e8111b6b1442ce6dcb08117ae" Dec 11 10:07:17 crc kubenswrapper[4881]: I1211 10:07:17.222844 4881 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"98b44f760930dd1abfd374422c1eda877139256e8111b6b1442ce6dcb08117ae"} err="failed to get container status \"98b44f760930dd1abfd374422c1eda877139256e8111b6b1442ce6dcb08117ae\": rpc error: code = NotFound desc = could not find container \"98b44f760930dd1abfd374422c1eda877139256e8111b6b1442ce6dcb08117ae\": container with ID starting with 98b44f760930dd1abfd374422c1eda877139256e8111b6b1442ce6dcb08117ae not found: ID does not exist" Dec 11 10:07:30 crc kubenswrapper[4881]: I1211 10:07:30.005760 4881 scope.go:117] "RemoveContainer" containerID="e1b977d6cf38148e3fb0a2375af7359048b9dcb9e237c41bd315b667b8afd334" Dec 11 10:07:30 crc kubenswrapper[4881]: E1211 10:07:30.006693 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 10:07:44 crc kubenswrapper[4881]: I1211 10:07:44.006227 4881 scope.go:117] "RemoveContainer" containerID="e1b977d6cf38148e3fb0a2375af7359048b9dcb9e237c41bd315b667b8afd334" Dec 11 10:07:44 crc kubenswrapper[4881]: E1211 10:07:44.007263 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 10:07:58 crc kubenswrapper[4881]: I1211 10:07:58.005271 4881 scope.go:117] "RemoveContainer" containerID="e1b977d6cf38148e3fb0a2375af7359048b9dcb9e237c41bd315b667b8afd334" Dec 11 10:07:58 crc kubenswrapper[4881]: E1211 10:07:58.006016 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 10:08:12 crc kubenswrapper[4881]: I1211 10:08:12.005717 4881 scope.go:117] "RemoveContainer" containerID="e1b977d6cf38148e3fb0a2375af7359048b9dcb9e237c41bd315b667b8afd334" Dec 11 10:08:12 crc kubenswrapper[4881]: E1211 10:08:12.006518 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 10:08:27 crc kubenswrapper[4881]: I1211 10:08:27.007481 4881 scope.go:117] "RemoveContainer" containerID="e1b977d6cf38148e3fb0a2375af7359048b9dcb9e237c41bd315b667b8afd334" Dec 11 10:08:27 crc kubenswrapper[4881]: E1211 10:08:27.008953 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 10:08:40 crc kubenswrapper[4881]: I1211 10:08:40.005678 4881 scope.go:117] "RemoveContainer" containerID="e1b977d6cf38148e3fb0a2375af7359048b9dcb9e237c41bd315b667b8afd334" Dec 11 10:08:40 crc kubenswrapper[4881]: E1211 10:08:40.006416 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 10:08:51 crc kubenswrapper[4881]: I1211 10:08:51.006216 4881 scope.go:117] "RemoveContainer" containerID="e1b977d6cf38148e3fb0a2375af7359048b9dcb9e237c41bd315b667b8afd334" Dec 11 10:08:51 crc kubenswrapper[4881]: E1211 10:08:51.007009 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 10:09:05 crc kubenswrapper[4881]: I1211 10:09:05.005821 4881 scope.go:117] "RemoveContainer" containerID="e1b977d6cf38148e3fb0a2375af7359048b9dcb9e237c41bd315b667b8afd334" Dec 11 10:09:05 crc kubenswrapper[4881]: E1211 10:09:05.006769 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 10:09:20 crc kubenswrapper[4881]: I1211 10:09:20.006479 4881 scope.go:117] "RemoveContainer" containerID="e1b977d6cf38148e3fb0a2375af7359048b9dcb9e237c41bd315b667b8afd334" Dec 11 10:09:20 crc kubenswrapper[4881]: E1211 10:09:20.007612 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 10:09:33 crc kubenswrapper[4881]: I1211 10:09:33.016489 4881 scope.go:117] "RemoveContainer" containerID="e1b977d6cf38148e3fb0a2375af7359048b9dcb9e237c41bd315b667b8afd334" Dec 11 10:09:33 crc kubenswrapper[4881]: E1211 10:09:33.017593 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 10:09:45 crc kubenswrapper[4881]: I1211 10:09:45.006572 4881 scope.go:117] "RemoveContainer" containerID="e1b977d6cf38148e3fb0a2375af7359048b9dcb9e237c41bd315b667b8afd334" Dec 11 10:09:45 crc kubenswrapper[4881]: E1211 10:09:45.007400 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 10:10:00 crc kubenswrapper[4881]: I1211 10:10:00.005916 4881 scope.go:117] "RemoveContainer" containerID="e1b977d6cf38148e3fb0a2375af7359048b9dcb9e237c41bd315b667b8afd334" Dec 11 10:10:00 crc kubenswrapper[4881]: E1211 10:10:00.006958 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 10:10:15 crc kubenswrapper[4881]: I1211 10:10:15.006065 4881 scope.go:117] "RemoveContainer" containerID="e1b977d6cf38148e3fb0a2375af7359048b9dcb9e237c41bd315b667b8afd334" Dec 11 10:10:15 crc kubenswrapper[4881]: E1211 10:10:15.007093 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 10:10:27 crc kubenswrapper[4881]: I1211 10:10:27.005559 4881 scope.go:117] "RemoveContainer" containerID="e1b977d6cf38148e3fb0a2375af7359048b9dcb9e237c41bd315b667b8afd334" Dec 11 10:10:27 crc kubenswrapper[4881]: E1211 10:10:27.006408 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 10:10:31 crc kubenswrapper[4881]: I1211 10:10:31.832835 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-rwlhh/must-gather-n5brg"] Dec 11 10:10:31 crc kubenswrapper[4881]: E1211 10:10:31.835284 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8c355bf0-389d-4d9a-a9b3-e4b2856b0bec" containerName="copy" Dec 11 10:10:31 crc kubenswrapper[4881]: I1211 10:10:31.835456 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="8c355bf0-389d-4d9a-a9b3-e4b2856b0bec" containerName="copy" Dec 11 10:10:31 crc kubenswrapper[4881]: E1211 10:10:31.835599 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="99c7c976-c996-4b9c-a389-fbaed3f71813" containerName="extract-utilities" Dec 11 10:10:31 crc kubenswrapper[4881]: I1211 10:10:31.835705 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="99c7c976-c996-4b9c-a389-fbaed3f71813" containerName="extract-utilities" Dec 11 10:10:31 crc kubenswrapper[4881]: E1211 10:10:31.835825 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b9e52945-cd55-4085-80b8-e4c7418ebe7b" containerName="extract-utilities" Dec 11 10:10:31 crc kubenswrapper[4881]: I1211 10:10:31.835937 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="b9e52945-cd55-4085-80b8-e4c7418ebe7b" containerName="extract-utilities" Dec 11 10:10:31 crc kubenswrapper[4881]: E1211 10:10:31.836073 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8c355bf0-389d-4d9a-a9b3-e4b2856b0bec" containerName="gather" Dec 11 10:10:31 crc kubenswrapper[4881]: I1211 10:10:31.836178 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="8c355bf0-389d-4d9a-a9b3-e4b2856b0bec" containerName="gather" Dec 11 10:10:31 crc kubenswrapper[4881]: E1211 10:10:31.836285 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d504e4dc-abb2-435a-ad46-09acdda9d84b" containerName="extract-utilities" Dec 11 10:10:31 crc kubenswrapper[4881]: I1211 10:10:31.836389 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="d504e4dc-abb2-435a-ad46-09acdda9d84b" containerName="extract-utilities" Dec 11 10:10:31 crc kubenswrapper[4881]: E1211 10:10:31.836497 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="99c7c976-c996-4b9c-a389-fbaed3f71813" containerName="registry-server" Dec 11 10:10:31 crc kubenswrapper[4881]: I1211 10:10:31.836594 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="99c7c976-c996-4b9c-a389-fbaed3f71813" containerName="registry-server" Dec 11 10:10:31 crc kubenswrapper[4881]: E1211 10:10:31.836695 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="99c7c976-c996-4b9c-a389-fbaed3f71813" containerName="extract-content" Dec 11 10:10:31 crc kubenswrapper[4881]: I1211 10:10:31.836795 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="99c7c976-c996-4b9c-a389-fbaed3f71813" containerName="extract-content" Dec 11 10:10:31 crc kubenswrapper[4881]: E1211 10:10:31.837008 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b9e52945-cd55-4085-80b8-e4c7418ebe7b" containerName="registry-server" Dec 11 10:10:31 crc kubenswrapper[4881]: I1211 10:10:31.837116 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="b9e52945-cd55-4085-80b8-e4c7418ebe7b" containerName="registry-server" Dec 11 10:10:31 crc kubenswrapper[4881]: E1211 10:10:31.837229 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b9e52945-cd55-4085-80b8-e4c7418ebe7b" containerName="extract-content" Dec 11 10:10:31 crc kubenswrapper[4881]: I1211 10:10:31.837347 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="b9e52945-cd55-4085-80b8-e4c7418ebe7b" containerName="extract-content" Dec 11 10:10:31 crc kubenswrapper[4881]: E1211 10:10:31.837461 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d504e4dc-abb2-435a-ad46-09acdda9d84b" containerName="extract-content" Dec 11 10:10:31 crc kubenswrapper[4881]: I1211 10:10:31.837574 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="d504e4dc-abb2-435a-ad46-09acdda9d84b" containerName="extract-content" Dec 11 10:10:31 crc kubenswrapper[4881]: E1211 10:10:31.837702 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d504e4dc-abb2-435a-ad46-09acdda9d84b" containerName="registry-server" Dec 11 10:10:31 crc kubenswrapper[4881]: I1211 10:10:31.837811 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="d504e4dc-abb2-435a-ad46-09acdda9d84b" containerName="registry-server" Dec 11 10:10:31 crc kubenswrapper[4881]: I1211 10:10:31.838329 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="8c355bf0-389d-4d9a-a9b3-e4b2856b0bec" containerName="gather" Dec 11 10:10:31 crc kubenswrapper[4881]: I1211 10:10:31.838488 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="8c355bf0-389d-4d9a-a9b3-e4b2856b0bec" containerName="copy" Dec 11 10:10:31 crc kubenswrapper[4881]: I1211 10:10:31.838588 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="b9e52945-cd55-4085-80b8-e4c7418ebe7b" containerName="registry-server" Dec 11 10:10:31 crc kubenswrapper[4881]: I1211 10:10:31.838695 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="d504e4dc-abb2-435a-ad46-09acdda9d84b" containerName="registry-server" Dec 11 10:10:31 crc kubenswrapper[4881]: I1211 10:10:31.838808 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="99c7c976-c996-4b9c-a389-fbaed3f71813" containerName="registry-server" Dec 11 10:10:31 crc kubenswrapper[4881]: I1211 10:10:31.844175 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-rwlhh/must-gather-n5brg" Dec 11 10:10:31 crc kubenswrapper[4881]: I1211 10:10:31.849915 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-rwlhh"/"kube-root-ca.crt" Dec 11 10:10:31 crc kubenswrapper[4881]: I1211 10:10:31.849919 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-rwlhh"/"openshift-service-ca.crt" Dec 11 10:10:31 crc kubenswrapper[4881]: I1211 10:10:31.866856 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-rwlhh/must-gather-n5brg"] Dec 11 10:10:31 crc kubenswrapper[4881]: I1211 10:10:31.995276 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/44db5ba3-cedf-4bbb-a754-6c51ca4b42a7-must-gather-output\") pod \"must-gather-n5brg\" (UID: \"44db5ba3-cedf-4bbb-a754-6c51ca4b42a7\") " pod="openshift-must-gather-rwlhh/must-gather-n5brg" Dec 11 10:10:31 crc kubenswrapper[4881]: I1211 10:10:31.995852 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qmj9k\" (UniqueName: \"kubernetes.io/projected/44db5ba3-cedf-4bbb-a754-6c51ca4b42a7-kube-api-access-qmj9k\") pod \"must-gather-n5brg\" (UID: \"44db5ba3-cedf-4bbb-a754-6c51ca4b42a7\") " pod="openshift-must-gather-rwlhh/must-gather-n5brg" Dec 11 10:10:32 crc kubenswrapper[4881]: I1211 10:10:32.098457 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qmj9k\" (UniqueName: \"kubernetes.io/projected/44db5ba3-cedf-4bbb-a754-6c51ca4b42a7-kube-api-access-qmj9k\") pod \"must-gather-n5brg\" (UID: \"44db5ba3-cedf-4bbb-a754-6c51ca4b42a7\") " pod="openshift-must-gather-rwlhh/must-gather-n5brg" Dec 11 10:10:32 crc kubenswrapper[4881]: I1211 10:10:32.098612 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/44db5ba3-cedf-4bbb-a754-6c51ca4b42a7-must-gather-output\") pod \"must-gather-n5brg\" (UID: \"44db5ba3-cedf-4bbb-a754-6c51ca4b42a7\") " pod="openshift-must-gather-rwlhh/must-gather-n5brg" Dec 11 10:10:32 crc kubenswrapper[4881]: I1211 10:10:32.099303 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/44db5ba3-cedf-4bbb-a754-6c51ca4b42a7-must-gather-output\") pod \"must-gather-n5brg\" (UID: \"44db5ba3-cedf-4bbb-a754-6c51ca4b42a7\") " pod="openshift-must-gather-rwlhh/must-gather-n5brg" Dec 11 10:10:32 crc kubenswrapper[4881]: I1211 10:10:32.115908 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qmj9k\" (UniqueName: \"kubernetes.io/projected/44db5ba3-cedf-4bbb-a754-6c51ca4b42a7-kube-api-access-qmj9k\") pod \"must-gather-n5brg\" (UID: \"44db5ba3-cedf-4bbb-a754-6c51ca4b42a7\") " pod="openshift-must-gather-rwlhh/must-gather-n5brg" Dec 11 10:10:32 crc kubenswrapper[4881]: I1211 10:10:32.165903 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-rwlhh/must-gather-n5brg" Dec 11 10:10:32 crc kubenswrapper[4881]: I1211 10:10:32.687129 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-rwlhh/must-gather-n5brg"] Dec 11 10:10:33 crc kubenswrapper[4881]: I1211 10:10:33.614810 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-rwlhh/must-gather-n5brg" event={"ID":"44db5ba3-cedf-4bbb-a754-6c51ca4b42a7","Type":"ContainerStarted","Data":"aeaf4cc765f3edd2da56fbbb122adad53d5f4346c6bc8090f62732e8deb96834"} Dec 11 10:10:33 crc kubenswrapper[4881]: I1211 10:10:33.616099 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-rwlhh/must-gather-n5brg" event={"ID":"44db5ba3-cedf-4bbb-a754-6c51ca4b42a7","Type":"ContainerStarted","Data":"c594583f26f1a7905e8575575884170a039c243b84a9ae6093eeeffb24859d2c"} Dec 11 10:10:33 crc kubenswrapper[4881]: I1211 10:10:33.616166 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-rwlhh/must-gather-n5brg" event={"ID":"44db5ba3-cedf-4bbb-a754-6c51ca4b42a7","Type":"ContainerStarted","Data":"0a671976288df7f1dfae2fc0732db84cfdccb70d22b383edfaa9b22428625968"} Dec 11 10:10:33 crc kubenswrapper[4881]: I1211 10:10:33.630654 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-rwlhh/must-gather-n5brg" podStartSLOduration=2.630623474 podStartE2EDuration="2.630623474s" podCreationTimestamp="2025-12-11 10:10:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 10:10:33.62804445 +0000 UTC m=+6882.005413137" watchObservedRunningTime="2025-12-11 10:10:33.630623474 +0000 UTC m=+6882.007992171" Dec 11 10:10:36 crc kubenswrapper[4881]: E1211 10:10:36.438709 4881 upgradeaware.go:427] Error proxying data from client to backend: readfrom tcp 38.102.83.20:49450->38.102.83.20:39683: write tcp 38.102.83.20:49450->38.102.83.20:39683: write: broken pipe Dec 11 10:10:37 crc kubenswrapper[4881]: I1211 10:10:37.139941 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-rwlhh/crc-debug-prlcq"] Dec 11 10:10:37 crc kubenswrapper[4881]: I1211 10:10:37.141524 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-rwlhh/crc-debug-prlcq" Dec 11 10:10:37 crc kubenswrapper[4881]: I1211 10:10:37.143935 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-must-gather-rwlhh"/"default-dockercfg-dsftk" Dec 11 10:10:37 crc kubenswrapper[4881]: I1211 10:10:37.237654 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/0790cc73-de85-4bc9-ae4d-46b612ec4fe6-host\") pod \"crc-debug-prlcq\" (UID: \"0790cc73-de85-4bc9-ae4d-46b612ec4fe6\") " pod="openshift-must-gather-rwlhh/crc-debug-prlcq" Dec 11 10:10:37 crc kubenswrapper[4881]: I1211 10:10:37.238106 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qqrbm\" (UniqueName: \"kubernetes.io/projected/0790cc73-de85-4bc9-ae4d-46b612ec4fe6-kube-api-access-qqrbm\") pod \"crc-debug-prlcq\" (UID: \"0790cc73-de85-4bc9-ae4d-46b612ec4fe6\") " pod="openshift-must-gather-rwlhh/crc-debug-prlcq" Dec 11 10:10:37 crc kubenswrapper[4881]: I1211 10:10:37.340398 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qqrbm\" (UniqueName: \"kubernetes.io/projected/0790cc73-de85-4bc9-ae4d-46b612ec4fe6-kube-api-access-qqrbm\") pod \"crc-debug-prlcq\" (UID: \"0790cc73-de85-4bc9-ae4d-46b612ec4fe6\") " pod="openshift-must-gather-rwlhh/crc-debug-prlcq" Dec 11 10:10:37 crc kubenswrapper[4881]: I1211 10:10:37.340521 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/0790cc73-de85-4bc9-ae4d-46b612ec4fe6-host\") pod \"crc-debug-prlcq\" (UID: \"0790cc73-de85-4bc9-ae4d-46b612ec4fe6\") " pod="openshift-must-gather-rwlhh/crc-debug-prlcq" Dec 11 10:10:37 crc kubenswrapper[4881]: I1211 10:10:37.341261 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/0790cc73-de85-4bc9-ae4d-46b612ec4fe6-host\") pod \"crc-debug-prlcq\" (UID: \"0790cc73-de85-4bc9-ae4d-46b612ec4fe6\") " pod="openshift-must-gather-rwlhh/crc-debug-prlcq" Dec 11 10:10:37 crc kubenswrapper[4881]: I1211 10:10:37.372403 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qqrbm\" (UniqueName: \"kubernetes.io/projected/0790cc73-de85-4bc9-ae4d-46b612ec4fe6-kube-api-access-qqrbm\") pod \"crc-debug-prlcq\" (UID: \"0790cc73-de85-4bc9-ae4d-46b612ec4fe6\") " pod="openshift-must-gather-rwlhh/crc-debug-prlcq" Dec 11 10:10:37 crc kubenswrapper[4881]: I1211 10:10:37.462864 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-rwlhh/crc-debug-prlcq" Dec 11 10:10:37 crc kubenswrapper[4881]: I1211 10:10:37.657403 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-rwlhh/crc-debug-prlcq" event={"ID":"0790cc73-de85-4bc9-ae4d-46b612ec4fe6","Type":"ContainerStarted","Data":"ff92299b71ab5fe52502fab86e369b282808b9e85b00d2f6238ce2f268955ccc"} Dec 11 10:10:38 crc kubenswrapper[4881]: I1211 10:10:38.670540 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-rwlhh/crc-debug-prlcq" event={"ID":"0790cc73-de85-4bc9-ae4d-46b612ec4fe6","Type":"ContainerStarted","Data":"ca98e35e02a7064669e58d4e15ae64f70f12576d3a9730128606e7d8381d11ab"} Dec 11 10:10:38 crc kubenswrapper[4881]: I1211 10:10:38.684300 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-rwlhh/crc-debug-prlcq" podStartSLOduration=1.6842790939999999 podStartE2EDuration="1.684279094s" podCreationTimestamp="2025-12-11 10:10:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 10:10:38.681217808 +0000 UTC m=+6887.058586515" watchObservedRunningTime="2025-12-11 10:10:38.684279094 +0000 UTC m=+6887.061647791" Dec 11 10:10:40 crc kubenswrapper[4881]: I1211 10:10:40.006042 4881 scope.go:117] "RemoveContainer" containerID="e1b977d6cf38148e3fb0a2375af7359048b9dcb9e237c41bd315b667b8afd334" Dec 11 10:10:40 crc kubenswrapper[4881]: I1211 10:10:40.696022 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" event={"ID":"56d69133-af36-4cbd-af7d-3a58cc4dd8ca","Type":"ContainerStarted","Data":"7e4db498eb5765963dae9ad2606e02f73e963ed76b1fbfa5ecaf703742a6a415"} Dec 11 10:11:23 crc kubenswrapper[4881]: I1211 10:11:23.124903 4881 generic.go:334] "Generic (PLEG): container finished" podID="0790cc73-de85-4bc9-ae4d-46b612ec4fe6" containerID="ca98e35e02a7064669e58d4e15ae64f70f12576d3a9730128606e7d8381d11ab" exitCode=0 Dec 11 10:11:23 crc kubenswrapper[4881]: I1211 10:11:23.124987 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-rwlhh/crc-debug-prlcq" event={"ID":"0790cc73-de85-4bc9-ae4d-46b612ec4fe6","Type":"ContainerDied","Data":"ca98e35e02a7064669e58d4e15ae64f70f12576d3a9730128606e7d8381d11ab"} Dec 11 10:11:24 crc kubenswrapper[4881]: I1211 10:11:24.302177 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-rwlhh/crc-debug-prlcq" Dec 11 10:11:24 crc kubenswrapper[4881]: I1211 10:11:24.351452 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-rwlhh/crc-debug-prlcq"] Dec 11 10:11:24 crc kubenswrapper[4881]: I1211 10:11:24.358883 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/0790cc73-de85-4bc9-ae4d-46b612ec4fe6-host\") pod \"0790cc73-de85-4bc9-ae4d-46b612ec4fe6\" (UID: \"0790cc73-de85-4bc9-ae4d-46b612ec4fe6\") " Dec 11 10:11:24 crc kubenswrapper[4881]: I1211 10:11:24.358994 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qqrbm\" (UniqueName: \"kubernetes.io/projected/0790cc73-de85-4bc9-ae4d-46b612ec4fe6-kube-api-access-qqrbm\") pod \"0790cc73-de85-4bc9-ae4d-46b612ec4fe6\" (UID: \"0790cc73-de85-4bc9-ae4d-46b612ec4fe6\") " Dec 11 10:11:24 crc kubenswrapper[4881]: I1211 10:11:24.359663 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/0790cc73-de85-4bc9-ae4d-46b612ec4fe6-host" (OuterVolumeSpecName: "host") pod "0790cc73-de85-4bc9-ae4d-46b612ec4fe6" (UID: "0790cc73-de85-4bc9-ae4d-46b612ec4fe6"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 11 10:11:24 crc kubenswrapper[4881]: I1211 10:11:24.360263 4881 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/0790cc73-de85-4bc9-ae4d-46b612ec4fe6-host\") on node \"crc\" DevicePath \"\"" Dec 11 10:11:24 crc kubenswrapper[4881]: I1211 10:11:24.364042 4881 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-rwlhh/crc-debug-prlcq"] Dec 11 10:11:24 crc kubenswrapper[4881]: I1211 10:11:24.380669 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0790cc73-de85-4bc9-ae4d-46b612ec4fe6-kube-api-access-qqrbm" (OuterVolumeSpecName: "kube-api-access-qqrbm") pod "0790cc73-de85-4bc9-ae4d-46b612ec4fe6" (UID: "0790cc73-de85-4bc9-ae4d-46b612ec4fe6"). InnerVolumeSpecName "kube-api-access-qqrbm". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:11:24 crc kubenswrapper[4881]: I1211 10:11:24.462832 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qqrbm\" (UniqueName: \"kubernetes.io/projected/0790cc73-de85-4bc9-ae4d-46b612ec4fe6-kube-api-access-qqrbm\") on node \"crc\" DevicePath \"\"" Dec 11 10:11:25 crc kubenswrapper[4881]: I1211 10:11:25.022298 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0790cc73-de85-4bc9-ae4d-46b612ec4fe6" path="/var/lib/kubelet/pods/0790cc73-de85-4bc9-ae4d-46b612ec4fe6/volumes" Dec 11 10:11:25 crc kubenswrapper[4881]: I1211 10:11:25.169045 4881 scope.go:117] "RemoveContainer" containerID="ca98e35e02a7064669e58d4e15ae64f70f12576d3a9730128606e7d8381d11ab" Dec 11 10:11:25 crc kubenswrapper[4881]: I1211 10:11:25.169184 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-rwlhh/crc-debug-prlcq" Dec 11 10:11:25 crc kubenswrapper[4881]: I1211 10:11:25.557371 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-rwlhh/crc-debug-cs55t"] Dec 11 10:11:25 crc kubenswrapper[4881]: E1211 10:11:25.558670 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0790cc73-de85-4bc9-ae4d-46b612ec4fe6" containerName="container-00" Dec 11 10:11:25 crc kubenswrapper[4881]: I1211 10:11:25.558748 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="0790cc73-de85-4bc9-ae4d-46b612ec4fe6" containerName="container-00" Dec 11 10:11:25 crc kubenswrapper[4881]: I1211 10:11:25.559038 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="0790cc73-de85-4bc9-ae4d-46b612ec4fe6" containerName="container-00" Dec 11 10:11:25 crc kubenswrapper[4881]: I1211 10:11:25.560236 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-rwlhh/crc-debug-cs55t" Dec 11 10:11:25 crc kubenswrapper[4881]: I1211 10:11:25.562512 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-must-gather-rwlhh"/"default-dockercfg-dsftk" Dec 11 10:11:25 crc kubenswrapper[4881]: I1211 10:11:25.591191 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nv64x\" (UniqueName: \"kubernetes.io/projected/15f24c58-7bae-48d2-80da-7e21cc8ad514-kube-api-access-nv64x\") pod \"crc-debug-cs55t\" (UID: \"15f24c58-7bae-48d2-80da-7e21cc8ad514\") " pod="openshift-must-gather-rwlhh/crc-debug-cs55t" Dec 11 10:11:25 crc kubenswrapper[4881]: I1211 10:11:25.591266 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/15f24c58-7bae-48d2-80da-7e21cc8ad514-host\") pod \"crc-debug-cs55t\" (UID: \"15f24c58-7bae-48d2-80da-7e21cc8ad514\") " pod="openshift-must-gather-rwlhh/crc-debug-cs55t" Dec 11 10:11:25 crc kubenswrapper[4881]: I1211 10:11:25.693489 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nv64x\" (UniqueName: \"kubernetes.io/projected/15f24c58-7bae-48d2-80da-7e21cc8ad514-kube-api-access-nv64x\") pod \"crc-debug-cs55t\" (UID: \"15f24c58-7bae-48d2-80da-7e21cc8ad514\") " pod="openshift-must-gather-rwlhh/crc-debug-cs55t" Dec 11 10:11:25 crc kubenswrapper[4881]: I1211 10:11:25.693793 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/15f24c58-7bae-48d2-80da-7e21cc8ad514-host\") pod \"crc-debug-cs55t\" (UID: \"15f24c58-7bae-48d2-80da-7e21cc8ad514\") " pod="openshift-must-gather-rwlhh/crc-debug-cs55t" Dec 11 10:11:25 crc kubenswrapper[4881]: I1211 10:11:25.694190 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/15f24c58-7bae-48d2-80da-7e21cc8ad514-host\") pod \"crc-debug-cs55t\" (UID: \"15f24c58-7bae-48d2-80da-7e21cc8ad514\") " pod="openshift-must-gather-rwlhh/crc-debug-cs55t" Dec 11 10:11:25 crc kubenswrapper[4881]: I1211 10:11:25.717465 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nv64x\" (UniqueName: \"kubernetes.io/projected/15f24c58-7bae-48d2-80da-7e21cc8ad514-kube-api-access-nv64x\") pod \"crc-debug-cs55t\" (UID: \"15f24c58-7bae-48d2-80da-7e21cc8ad514\") " pod="openshift-must-gather-rwlhh/crc-debug-cs55t" Dec 11 10:11:25 crc kubenswrapper[4881]: I1211 10:11:25.877077 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-rwlhh/crc-debug-cs55t" Dec 11 10:11:26 crc kubenswrapper[4881]: I1211 10:11:26.179860 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-rwlhh/crc-debug-cs55t" event={"ID":"15f24c58-7bae-48d2-80da-7e21cc8ad514","Type":"ContainerStarted","Data":"0fa734473447919459f8267128b9d701bb60d2d3a2b921ee7665ce70bce67459"} Dec 11 10:11:27 crc kubenswrapper[4881]: I1211 10:11:27.194799 4881 generic.go:334] "Generic (PLEG): container finished" podID="15f24c58-7bae-48d2-80da-7e21cc8ad514" containerID="e08011cdbbfb2d875611109d70882818539095be8379662ae0b0ee7b1209824a" exitCode=0 Dec 11 10:11:27 crc kubenswrapper[4881]: I1211 10:11:27.194932 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-rwlhh/crc-debug-cs55t" event={"ID":"15f24c58-7bae-48d2-80da-7e21cc8ad514","Type":"ContainerDied","Data":"e08011cdbbfb2d875611109d70882818539095be8379662ae0b0ee7b1209824a"} Dec 11 10:11:28 crc kubenswrapper[4881]: I1211 10:11:28.321858 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-rwlhh/crc-debug-cs55t" Dec 11 10:11:28 crc kubenswrapper[4881]: I1211 10:11:28.364911 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/15f24c58-7bae-48d2-80da-7e21cc8ad514-host\") pod \"15f24c58-7bae-48d2-80da-7e21cc8ad514\" (UID: \"15f24c58-7bae-48d2-80da-7e21cc8ad514\") " Dec 11 10:11:28 crc kubenswrapper[4881]: I1211 10:11:28.364992 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nv64x\" (UniqueName: \"kubernetes.io/projected/15f24c58-7bae-48d2-80da-7e21cc8ad514-kube-api-access-nv64x\") pod \"15f24c58-7bae-48d2-80da-7e21cc8ad514\" (UID: \"15f24c58-7bae-48d2-80da-7e21cc8ad514\") " Dec 11 10:11:28 crc kubenswrapper[4881]: I1211 10:11:28.365014 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/15f24c58-7bae-48d2-80da-7e21cc8ad514-host" (OuterVolumeSpecName: "host") pod "15f24c58-7bae-48d2-80da-7e21cc8ad514" (UID: "15f24c58-7bae-48d2-80da-7e21cc8ad514"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 11 10:11:28 crc kubenswrapper[4881]: I1211 10:11:28.367017 4881 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/15f24c58-7bae-48d2-80da-7e21cc8ad514-host\") on node \"crc\" DevicePath \"\"" Dec 11 10:11:28 crc kubenswrapper[4881]: I1211 10:11:28.382364 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/15f24c58-7bae-48d2-80da-7e21cc8ad514-kube-api-access-nv64x" (OuterVolumeSpecName: "kube-api-access-nv64x") pod "15f24c58-7bae-48d2-80da-7e21cc8ad514" (UID: "15f24c58-7bae-48d2-80da-7e21cc8ad514"). InnerVolumeSpecName "kube-api-access-nv64x". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:11:28 crc kubenswrapper[4881]: I1211 10:11:28.468893 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nv64x\" (UniqueName: \"kubernetes.io/projected/15f24c58-7bae-48d2-80da-7e21cc8ad514-kube-api-access-nv64x\") on node \"crc\" DevicePath \"\"" Dec 11 10:11:29 crc kubenswrapper[4881]: I1211 10:11:29.217454 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-rwlhh/crc-debug-cs55t" event={"ID":"15f24c58-7bae-48d2-80da-7e21cc8ad514","Type":"ContainerDied","Data":"0fa734473447919459f8267128b9d701bb60d2d3a2b921ee7665ce70bce67459"} Dec 11 10:11:29 crc kubenswrapper[4881]: I1211 10:11:29.217499 4881 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0fa734473447919459f8267128b9d701bb60d2d3a2b921ee7665ce70bce67459" Dec 11 10:11:29 crc kubenswrapper[4881]: I1211 10:11:29.217530 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-rwlhh/crc-debug-cs55t" Dec 11 10:11:29 crc kubenswrapper[4881]: I1211 10:11:29.633785 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-rwlhh/crc-debug-cs55t"] Dec 11 10:11:29 crc kubenswrapper[4881]: I1211 10:11:29.651969 4881 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-rwlhh/crc-debug-cs55t"] Dec 11 10:11:30 crc kubenswrapper[4881]: I1211 10:11:30.830020 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-rwlhh/crc-debug-hn8hk"] Dec 11 10:11:30 crc kubenswrapper[4881]: E1211 10:11:30.830807 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="15f24c58-7bae-48d2-80da-7e21cc8ad514" containerName="container-00" Dec 11 10:11:30 crc kubenswrapper[4881]: I1211 10:11:30.830821 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="15f24c58-7bae-48d2-80da-7e21cc8ad514" containerName="container-00" Dec 11 10:11:30 crc kubenswrapper[4881]: I1211 10:11:30.831166 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="15f24c58-7bae-48d2-80da-7e21cc8ad514" containerName="container-00" Dec 11 10:11:30 crc kubenswrapper[4881]: I1211 10:11:30.831954 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-rwlhh/crc-debug-hn8hk" Dec 11 10:11:30 crc kubenswrapper[4881]: I1211 10:11:30.834315 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-must-gather-rwlhh"/"default-dockercfg-dsftk" Dec 11 10:11:30 crc kubenswrapper[4881]: I1211 10:11:30.929503 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/3f5aa9e6-8a50-4d77-a1bc-f6b0012c73c0-host\") pod \"crc-debug-hn8hk\" (UID: \"3f5aa9e6-8a50-4d77-a1bc-f6b0012c73c0\") " pod="openshift-must-gather-rwlhh/crc-debug-hn8hk" Dec 11 10:11:30 crc kubenswrapper[4881]: I1211 10:11:30.930294 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rxdgr\" (UniqueName: \"kubernetes.io/projected/3f5aa9e6-8a50-4d77-a1bc-f6b0012c73c0-kube-api-access-rxdgr\") pod \"crc-debug-hn8hk\" (UID: \"3f5aa9e6-8a50-4d77-a1bc-f6b0012c73c0\") " pod="openshift-must-gather-rwlhh/crc-debug-hn8hk" Dec 11 10:11:31 crc kubenswrapper[4881]: I1211 10:11:31.017870 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="15f24c58-7bae-48d2-80da-7e21cc8ad514" path="/var/lib/kubelet/pods/15f24c58-7bae-48d2-80da-7e21cc8ad514/volumes" Dec 11 10:11:31 crc kubenswrapper[4881]: I1211 10:11:31.034567 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rxdgr\" (UniqueName: \"kubernetes.io/projected/3f5aa9e6-8a50-4d77-a1bc-f6b0012c73c0-kube-api-access-rxdgr\") pod \"crc-debug-hn8hk\" (UID: \"3f5aa9e6-8a50-4d77-a1bc-f6b0012c73c0\") " pod="openshift-must-gather-rwlhh/crc-debug-hn8hk" Dec 11 10:11:31 crc kubenswrapper[4881]: I1211 10:11:31.034710 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/3f5aa9e6-8a50-4d77-a1bc-f6b0012c73c0-host\") pod \"crc-debug-hn8hk\" (UID: \"3f5aa9e6-8a50-4d77-a1bc-f6b0012c73c0\") " pod="openshift-must-gather-rwlhh/crc-debug-hn8hk" Dec 11 10:11:31 crc kubenswrapper[4881]: I1211 10:11:31.034937 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/3f5aa9e6-8a50-4d77-a1bc-f6b0012c73c0-host\") pod \"crc-debug-hn8hk\" (UID: \"3f5aa9e6-8a50-4d77-a1bc-f6b0012c73c0\") " pod="openshift-must-gather-rwlhh/crc-debug-hn8hk" Dec 11 10:11:31 crc kubenswrapper[4881]: I1211 10:11:31.056082 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rxdgr\" (UniqueName: \"kubernetes.io/projected/3f5aa9e6-8a50-4d77-a1bc-f6b0012c73c0-kube-api-access-rxdgr\") pod \"crc-debug-hn8hk\" (UID: \"3f5aa9e6-8a50-4d77-a1bc-f6b0012c73c0\") " pod="openshift-must-gather-rwlhh/crc-debug-hn8hk" Dec 11 10:11:31 crc kubenswrapper[4881]: I1211 10:11:31.163747 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-rwlhh/crc-debug-hn8hk" Dec 11 10:11:31 crc kubenswrapper[4881]: I1211 10:11:31.301711 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-rwlhh/crc-debug-hn8hk" event={"ID":"3f5aa9e6-8a50-4d77-a1bc-f6b0012c73c0","Type":"ContainerStarted","Data":"dd443b57ffbc25e1a4d16d100e838ffcc0bab852f93c67c8bc092614e6156fdd"} Dec 11 10:11:32 crc kubenswrapper[4881]: I1211 10:11:32.313272 4881 generic.go:334] "Generic (PLEG): container finished" podID="3f5aa9e6-8a50-4d77-a1bc-f6b0012c73c0" containerID="3d6c8948fdfb8acf87938d05a8920f7952037b5b210507a4484581a185534961" exitCode=0 Dec 11 10:11:32 crc kubenswrapper[4881]: I1211 10:11:32.313348 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-rwlhh/crc-debug-hn8hk" event={"ID":"3f5aa9e6-8a50-4d77-a1bc-f6b0012c73c0","Type":"ContainerDied","Data":"3d6c8948fdfb8acf87938d05a8920f7952037b5b210507a4484581a185534961"} Dec 11 10:11:32 crc kubenswrapper[4881]: I1211 10:11:32.360679 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-rwlhh/crc-debug-hn8hk"] Dec 11 10:11:32 crc kubenswrapper[4881]: I1211 10:11:32.371597 4881 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-rwlhh/crc-debug-hn8hk"] Dec 11 10:11:33 crc kubenswrapper[4881]: I1211 10:11:33.477240 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-rwlhh/crc-debug-hn8hk" Dec 11 10:11:33 crc kubenswrapper[4881]: I1211 10:11:33.495494 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/3f5aa9e6-8a50-4d77-a1bc-f6b0012c73c0-host\") pod \"3f5aa9e6-8a50-4d77-a1bc-f6b0012c73c0\" (UID: \"3f5aa9e6-8a50-4d77-a1bc-f6b0012c73c0\") " Dec 11 10:11:33 crc kubenswrapper[4881]: I1211 10:11:33.495589 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rxdgr\" (UniqueName: \"kubernetes.io/projected/3f5aa9e6-8a50-4d77-a1bc-f6b0012c73c0-kube-api-access-rxdgr\") pod \"3f5aa9e6-8a50-4d77-a1bc-f6b0012c73c0\" (UID: \"3f5aa9e6-8a50-4d77-a1bc-f6b0012c73c0\") " Dec 11 10:11:33 crc kubenswrapper[4881]: I1211 10:11:33.496997 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3f5aa9e6-8a50-4d77-a1bc-f6b0012c73c0-host" (OuterVolumeSpecName: "host") pod "3f5aa9e6-8a50-4d77-a1bc-f6b0012c73c0" (UID: "3f5aa9e6-8a50-4d77-a1bc-f6b0012c73c0"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 11 10:11:33 crc kubenswrapper[4881]: I1211 10:11:33.502664 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3f5aa9e6-8a50-4d77-a1bc-f6b0012c73c0-kube-api-access-rxdgr" (OuterVolumeSpecName: "kube-api-access-rxdgr") pod "3f5aa9e6-8a50-4d77-a1bc-f6b0012c73c0" (UID: "3f5aa9e6-8a50-4d77-a1bc-f6b0012c73c0"). InnerVolumeSpecName "kube-api-access-rxdgr". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:11:33 crc kubenswrapper[4881]: I1211 10:11:33.597857 4881 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/3f5aa9e6-8a50-4d77-a1bc-f6b0012c73c0-host\") on node \"crc\" DevicePath \"\"" Dec 11 10:11:33 crc kubenswrapper[4881]: I1211 10:11:33.597895 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rxdgr\" (UniqueName: \"kubernetes.io/projected/3f5aa9e6-8a50-4d77-a1bc-f6b0012c73c0-kube-api-access-rxdgr\") on node \"crc\" DevicePath \"\"" Dec 11 10:11:34 crc kubenswrapper[4881]: I1211 10:11:34.346365 4881 scope.go:117] "RemoveContainer" containerID="3d6c8948fdfb8acf87938d05a8920f7952037b5b210507a4484581a185534961" Dec 11 10:11:34 crc kubenswrapper[4881]: I1211 10:11:34.346586 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-rwlhh/crc-debug-hn8hk" Dec 11 10:11:35 crc kubenswrapper[4881]: I1211 10:11:35.018893 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3f5aa9e6-8a50-4d77-a1bc-f6b0012c73c0" path="/var/lib/kubelet/pods/3f5aa9e6-8a50-4d77-a1bc-f6b0012c73c0/volumes" Dec 11 10:12:07 crc kubenswrapper[4881]: I1211 10:12:07.293619 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_aodh-0_e25df4be-6d20-469b-999e-ae4ffe346be8/aodh-api/0.log" Dec 11 10:12:07 crc kubenswrapper[4881]: I1211 10:12:07.524122 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_aodh-0_e25df4be-6d20-469b-999e-ae4ffe346be8/aodh-evaluator/0.log" Dec 11 10:12:07 crc kubenswrapper[4881]: I1211 10:12:07.615436 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_aodh-0_e25df4be-6d20-469b-999e-ae4ffe346be8/aodh-listener/0.log" Dec 11 10:12:07 crc kubenswrapper[4881]: I1211 10:12:07.656897 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_aodh-0_e25df4be-6d20-469b-999e-ae4ffe346be8/aodh-notifier/0.log" Dec 11 10:12:07 crc kubenswrapper[4881]: I1211 10:12:07.775169 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-api-67b8f6bb8b-gk4v6_24358cec-f24b-4eeb-ad37-069245596b56/barbican-api/0.log" Dec 11 10:12:07 crc kubenswrapper[4881]: I1211 10:12:07.908081 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-api-67b8f6bb8b-gk4v6_24358cec-f24b-4eeb-ad37-069245596b56/barbican-api-log/0.log" Dec 11 10:12:08 crc kubenswrapper[4881]: I1211 10:12:08.008993 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-keystone-listener-5766655fb4-qmcpd_b3f6375d-3379-4a1a-b875-286687315947/barbican-keystone-listener/0.log" Dec 11 10:12:08 crc kubenswrapper[4881]: I1211 10:12:08.149539 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-keystone-listener-5766655fb4-qmcpd_b3f6375d-3379-4a1a-b875-286687315947/barbican-keystone-listener-log/0.log" Dec 11 10:12:08 crc kubenswrapper[4881]: I1211 10:12:08.376887 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-worker-9778dfbb5-mls2j_0fec4cf8-f794-4f69-9645-38b0dd1ef593/barbican-worker/0.log" Dec 11 10:12:08 crc kubenswrapper[4881]: I1211 10:12:08.410361 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-worker-9778dfbb5-mls2j_0fec4cf8-f794-4f69-9645-38b0dd1ef593/barbican-worker-log/0.log" Dec 11 10:12:08 crc kubenswrapper[4881]: I1211 10:12:08.599318 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_bootstrap-edpm-deployment-openstack-edpm-ipam-4n2kr_e741b94a-ed71-4819-ba06-943aa25aaaf8/bootstrap-edpm-deployment-openstack-edpm-ipam/0.log" Dec 11 10:12:08 crc kubenswrapper[4881]: I1211 10:12:08.815176 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_b51fa237-35ec-47d6-b61d-c3e50dc8450f/ceilometer-central-agent/0.log" Dec 11 10:12:08 crc kubenswrapper[4881]: I1211 10:12:08.848624 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_b51fa237-35ec-47d6-b61d-c3e50dc8450f/ceilometer-notification-agent/0.log" Dec 11 10:12:08 crc kubenswrapper[4881]: I1211 10:12:08.919038 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_b51fa237-35ec-47d6-b61d-c3e50dc8450f/proxy-httpd/0.log" Dec 11 10:12:08 crc kubenswrapper[4881]: I1211 10:12:08.965281 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_b51fa237-35ec-47d6-b61d-c3e50dc8450f/sg-core/0.log" Dec 11 10:12:09 crc kubenswrapper[4881]: I1211 10:12:09.200954 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-api-0_98c33c2d-b3e5-450d-8c52-544acac89c74/cinder-api-log/0.log" Dec 11 10:12:09 crc kubenswrapper[4881]: I1211 10:12:09.268715 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-api-0_98c33c2d-b3e5-450d-8c52-544acac89c74/cinder-api/0.log" Dec 11 10:12:09 crc kubenswrapper[4881]: I1211 10:12:09.523352 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-scheduler-0_e9aa88e0-71a6-40a0-92ec-88084b425df9/cinder-scheduler/0.log" Dec 11 10:12:09 crc kubenswrapper[4881]: I1211 10:12:09.637325 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-scheduler-0_e9aa88e0-71a6-40a0-92ec-88084b425df9/probe/0.log" Dec 11 10:12:09 crc kubenswrapper[4881]: I1211 10:12:09.643542 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_configure-network-edpm-deployment-openstack-edpm-ipam-6mhvp_7dd4872c-380b-4dcc-bd46-ad6a624a2d34/configure-network-edpm-deployment-openstack-edpm-ipam/0.log" Dec 11 10:12:09 crc kubenswrapper[4881]: I1211 10:12:09.883591 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_configure-os-edpm-deployment-openstack-edpm-ipam-98j7p_97426780-cfa1-43ea-9cba-e4268c17b4c3/configure-os-edpm-deployment-openstack-edpm-ipam/0.log" Dec 11 10:12:10 crc kubenswrapper[4881]: I1211 10:12:10.032493 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-bb85b8995-xpw6s_ff97623d-cd72-4130-b08b-aa41fb1f3e55/init/0.log" Dec 11 10:12:10 crc kubenswrapper[4881]: I1211 10:12:10.335821 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-bb85b8995-xpw6s_ff97623d-cd72-4130-b08b-aa41fb1f3e55/init/0.log" Dec 11 10:12:10 crc kubenswrapper[4881]: I1211 10:12:10.381503 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_download-cache-edpm-deployment-openstack-edpm-ipam-nrjgn_532ceac4-3c2d-4d4a-900f-498fa41192b1/download-cache-edpm-deployment-openstack-edpm-ipam/0.log" Dec 11 10:12:10 crc kubenswrapper[4881]: I1211 10:12:10.426925 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-bb85b8995-xpw6s_ff97623d-cd72-4130-b08b-aa41fb1f3e55/dnsmasq-dns/0.log" Dec 11 10:12:10 crc kubenswrapper[4881]: I1211 10:12:10.646410 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-external-api-0_03bd1873-0976-4da1-a4f0-4bc1ab183cda/glance-log/0.log" Dec 11 10:12:10 crc kubenswrapper[4881]: I1211 10:12:10.705626 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-external-api-0_03bd1873-0976-4da1-a4f0-4bc1ab183cda/glance-httpd/0.log" Dec 11 10:12:11 crc kubenswrapper[4881]: I1211 10:12:11.009485 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-internal-api-0_4cde142d-aa3a-4c3b-9e63-efcbce032089/glance-httpd/0.log" Dec 11 10:12:11 crc kubenswrapper[4881]: I1211 10:12:11.073180 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-internal-api-0_4cde142d-aa3a-4c3b-9e63-efcbce032089/glance-log/0.log" Dec 11 10:12:11 crc kubenswrapper[4881]: I1211 10:12:11.638128 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_heat-engine-559447f984-4bfr5_6b4ff921-94ce-4083-ad5d-783a59c7fb4d/heat-engine/0.log" Dec 11 10:12:11 crc kubenswrapper[4881]: I1211 10:12:11.844922 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_install-certs-edpm-deployment-openstack-edpm-ipam-pvld9_a4c5efad-5566-4a8d-85d8-c897f04fcb46/install-certs-edpm-deployment-openstack-edpm-ipam/0.log" Dec 11 10:12:11 crc kubenswrapper[4881]: I1211 10:12:11.924575 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_heat-api-77bc5fff7b-clttx_916eca8d-ca13-4db2-a350-b39a66bdee84/heat-api/0.log" Dec 11 10:12:12 crc kubenswrapper[4881]: I1211 10:12:12.042511 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_install-os-edpm-deployment-openstack-edpm-ipam-p978t_482d67c1-02c6-4526-99fb-2bc546471c4d/install-os-edpm-deployment-openstack-edpm-ipam/0.log" Dec 11 10:12:12 crc kubenswrapper[4881]: I1211 10:12:12.184713 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_heat-cfnapi-5f8f5d98d5-xmfmx_059dc22b-b46b-482a-9a29-ded125bc4dac/heat-cfnapi/0.log" Dec 11 10:12:12 crc kubenswrapper[4881]: I1211 10:12:12.324056 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-cron-29424061-884r9_5cfd9f27-8ab3-4f72-8fb8-fa1b11a82b7a/keystone-cron/0.log" Dec 11 10:12:12 crc kubenswrapper[4881]: I1211 10:12:12.468114 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-cron-29424121-wd29n_b7f4b194-07cf-4a48-ab95-f0aece6d4576/keystone-cron/0.log" Dec 11 10:12:12 crc kubenswrapper[4881]: I1211 10:12:12.581979 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_kube-state-metrics-0_dad35c24-846e-4c89-aa50-20ccea9fd132/kube-state-metrics/0.log" Dec 11 10:12:12 crc kubenswrapper[4881]: I1211 10:12:12.670895 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-7fbbb6db6c-bqwjn_cae51d9b-e997-4228-af25-872a6e16df8d/keystone-api/0.log" Dec 11 10:12:13 crc kubenswrapper[4881]: I1211 10:12:13.053628 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_libvirt-edpm-deployment-openstack-edpm-ipam-xdlgc_1ad81113-10d1-4110-81ad-abd39146b84c/libvirt-edpm-deployment-openstack-edpm-ipam/0.log" Dec 11 10:12:13 crc kubenswrapper[4881]: I1211 10:12:13.138925 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_logging-edpm-deployment-openstack-edpm-ipam-wqjnk_7afa083c-c63d-4f07-9a8f-15b00a918860/logging-edpm-deployment-openstack-edpm-ipam/0.log" Dec 11 10:12:13 crc kubenswrapper[4881]: I1211 10:12:13.375522 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_mysqld-exporter-0_598ba082-c5c4-4dc3-b4ec-5db6677fdb61/mysqld-exporter/0.log" Dec 11 10:12:13 crc kubenswrapper[4881]: I1211 10:12:13.690566 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-6986b4b8b9-dlx84_8edd456d-09d4-46fc-97ef-68c44cb5320c/neutron-api/0.log" Dec 11 10:12:13 crc kubenswrapper[4881]: I1211 10:12:13.810468 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-6986b4b8b9-dlx84_8edd456d-09d4-46fc-97ef-68c44cb5320c/neutron-httpd/0.log" Dec 11 10:12:13 crc kubenswrapper[4881]: I1211 10:12:13.833702 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-metadata-edpm-deployment-openstack-edpm-ipam-tkvg4_0a91cc42-4d2c-4527-81b9-7bfe0432f4f4/neutron-metadata-edpm-deployment-openstack-edpm-ipam/0.log" Dec 11 10:12:14 crc kubenswrapper[4881]: I1211 10:12:14.635192 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell0-conductor-0_5bbd6724-7a9f-4aac-8ca7-199f8cba6223/nova-cell0-conductor-conductor/0.log" Dec 11 10:12:15 crc kubenswrapper[4881]: I1211 10:12:15.028146 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-conductor-0_7b6eaf58-bd08-44d7-bc98-0b11d1cea0b3/nova-cell1-conductor-conductor/0.log" Dec 11 10:12:15 crc kubenswrapper[4881]: I1211 10:12:15.088560 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-api-0_a461b235-8929-4c28-a4bc-fcc40fe9ede9/nova-api-log/0.log" Dec 11 10:12:15 crc kubenswrapper[4881]: I1211 10:12:15.448843 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-edpm-deployment-openstack-edpm-ipam-hq67p_84b496b0-b36c-4ece-ba2d-e73423d502cd/nova-edpm-deployment-openstack-edpm-ipam/0.log" Dec 11 10:12:15 crc kubenswrapper[4881]: I1211 10:12:15.457617 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-novncproxy-0_bf7c31d0-4b0e-4161-9ed5-3dc62c4388b9/nova-cell1-novncproxy-novncproxy/0.log" Dec 11 10:12:15 crc kubenswrapper[4881]: I1211 10:12:15.817788 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-metadata-0_af5edf02-ece3-42cd-b5da-a84f734d2505/nova-metadata-log/0.log" Dec 11 10:12:15 crc kubenswrapper[4881]: I1211 10:12:15.847399 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-api-0_a461b235-8929-4c28-a4bc-fcc40fe9ede9/nova-api-api/0.log" Dec 11 10:12:16 crc kubenswrapper[4881]: I1211 10:12:16.271036 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_70402eec-968d-4ceb-b259-5e2508ee21a0/mysql-bootstrap/0.log" Dec 11 10:12:16 crc kubenswrapper[4881]: I1211 10:12:16.408090 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_70402eec-968d-4ceb-b259-5e2508ee21a0/mysql-bootstrap/0.log" Dec 11 10:12:16 crc kubenswrapper[4881]: I1211 10:12:16.486419 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-scheduler-0_b12d1174-33a2-4075-8cc6-bd591d290563/nova-scheduler-scheduler/0.log" Dec 11 10:12:16 crc kubenswrapper[4881]: I1211 10:12:16.553176 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_70402eec-968d-4ceb-b259-5e2508ee21a0/galera/0.log" Dec 11 10:12:16 crc kubenswrapper[4881]: I1211 10:12:16.781786 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_7a825abb-23ec-4f51-940d-2500da233e14/mysql-bootstrap/0.log" Dec 11 10:12:17 crc kubenswrapper[4881]: I1211 10:12:17.358244 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_7a825abb-23ec-4f51-940d-2500da233e14/mysql-bootstrap/0.log" Dec 11 10:12:17 crc kubenswrapper[4881]: I1211 10:12:17.376022 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_7a825abb-23ec-4f51-940d-2500da233e14/galera/0.log" Dec 11 10:12:17 crc kubenswrapper[4881]: I1211 10:12:17.596210 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstackclient_5e8c400d-4f6c-4e16-8ed3-45d19b56a0bf/openstackclient/0.log" Dec 11 10:12:17 crc kubenswrapper[4881]: I1211 10:12:17.607567 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-metrics-rmmgk_cec096de-5459-4769-9e87-9a3f54d3e8dc/openstack-network-exporter/0.log" Dec 11 10:12:17 crc kubenswrapper[4881]: I1211 10:12:17.905022 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-5gqh8_ede1ec9d-4207-4a9c-ba57-3f2037f68632/ovsdb-server-init/0.log" Dec 11 10:12:18 crc kubenswrapper[4881]: I1211 10:12:18.051228 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-5gqh8_ede1ec9d-4207-4a9c-ba57-3f2037f68632/ovsdb-server-init/0.log" Dec 11 10:12:18 crc kubenswrapper[4881]: I1211 10:12:18.103743 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-5gqh8_ede1ec9d-4207-4a9c-ba57-3f2037f68632/ovs-vswitchd/0.log" Dec 11 10:12:18 crc kubenswrapper[4881]: I1211 10:12:18.187730 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-5gqh8_ede1ec9d-4207-4a9c-ba57-3f2037f68632/ovsdb-server/0.log" Dec 11 10:12:18 crc kubenswrapper[4881]: I1211 10:12:18.308511 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-xfltd_49d6015f-9f76-4e77-821e-2a11887e497c/ovn-controller/0.log" Dec 11 10:12:18 crc kubenswrapper[4881]: I1211 10:12:18.649934 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-edpm-deployment-openstack-edpm-ipam-tvdms_9b70ae00-542c-47d9-b985-5fc2433218a5/ovn-edpm-deployment-openstack-edpm-ipam/0.log" Dec 11 10:12:18 crc kubenswrapper[4881]: I1211 10:12:18.781197 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_ac1565ce-1297-4689-b199-d88c339feb68/openstack-network-exporter/0.log" Dec 11 10:12:18 crc kubenswrapper[4881]: I1211 10:12:18.863713 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-metadata-0_af5edf02-ece3-42cd-b5da-a84f734d2505/nova-metadata-metadata/0.log" Dec 11 10:12:18 crc kubenswrapper[4881]: I1211 10:12:18.906805 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_ac1565ce-1297-4689-b199-d88c339feb68/ovn-northd/0.log" Dec 11 10:12:19 crc kubenswrapper[4881]: I1211 10:12:19.055851 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_7f0aa090-3aac-4da8-9efa-a31a7b3b130f/openstack-network-exporter/0.log" Dec 11 10:12:19 crc kubenswrapper[4881]: I1211 10:12:19.176390 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_7f0aa090-3aac-4da8-9efa-a31a7b3b130f/ovsdbserver-nb/0.log" Dec 11 10:12:19 crc kubenswrapper[4881]: I1211 10:12:19.290248 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_b9b67c1c-0e11-4c19-8d1f-6c046375659c/openstack-network-exporter/0.log" Dec 11 10:12:19 crc kubenswrapper[4881]: I1211 10:12:19.382664 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_b9b67c1c-0e11-4c19-8d1f-6c046375659c/ovsdbserver-sb/0.log" Dec 11 10:12:19 crc kubenswrapper[4881]: I1211 10:12:19.869573 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_placement-55fbb6c694-gw7p4_30f691dc-faf6-411b-8cb8-db57047199b0/placement-log/0.log" Dec 11 10:12:19 crc kubenswrapper[4881]: I1211 10:12:19.884813 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_b515a685-da3e-4d92-a8e5-60561e9de83f/init-config-reloader/0.log" Dec 11 10:12:19 crc kubenswrapper[4881]: I1211 10:12:19.942241 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_placement-55fbb6c694-gw7p4_30f691dc-faf6-411b-8cb8-db57047199b0/placement-api/0.log" Dec 11 10:12:20 crc kubenswrapper[4881]: I1211 10:12:20.196765 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_b515a685-da3e-4d92-a8e5-60561e9de83f/config-reloader/0.log" Dec 11 10:12:20 crc kubenswrapper[4881]: I1211 10:12:20.261234 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_b515a685-da3e-4d92-a8e5-60561e9de83f/init-config-reloader/0.log" Dec 11 10:12:20 crc kubenswrapper[4881]: I1211 10:12:20.275955 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_b515a685-da3e-4d92-a8e5-60561e9de83f/thanos-sidecar/0.log" Dec 11 10:12:20 crc kubenswrapper[4881]: I1211 10:12:20.278483 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_b515a685-da3e-4d92-a8e5-60561e9de83f/prometheus/0.log" Dec 11 10:12:20 crc kubenswrapper[4881]: I1211 10:12:20.524676 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_acc39512-3a6a-4e4c-a2a2-a13ad13b11f0/setup-container/0.log" Dec 11 10:12:21 crc kubenswrapper[4881]: I1211 10:12:21.016553 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_acc39512-3a6a-4e4c-a2a2-a13ad13b11f0/setup-container/0.log" Dec 11 10:12:21 crc kubenswrapper[4881]: I1211 10:12:21.026848 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_d52ebbc7-03f0-4f73-827b-8f8066e83146/setup-container/0.log" Dec 11 10:12:21 crc kubenswrapper[4881]: I1211 10:12:21.069615 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_acc39512-3a6a-4e4c-a2a2-a13ad13b11f0/rabbitmq/0.log" Dec 11 10:12:21 crc kubenswrapper[4881]: I1211 10:12:21.336455 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_d52ebbc7-03f0-4f73-827b-8f8066e83146/setup-container/0.log" Dec 11 10:12:21 crc kubenswrapper[4881]: I1211 10:12:21.425463 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_d52ebbc7-03f0-4f73-827b-8f8066e83146/rabbitmq/0.log" Dec 11 10:12:21 crc kubenswrapper[4881]: I1211 10:12:21.504355 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_reboot-os-edpm-deployment-openstack-edpm-ipam-bsvm6_55357646-b980-4023-b886-5365ec6fd85f/reboot-os-edpm-deployment-openstack-edpm-ipam/0.log" Dec 11 10:12:21 crc kubenswrapper[4881]: I1211 10:12:21.803226 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_repo-setup-edpm-deployment-openstack-edpm-ipam-t24nn_cde02f48-eb61-4053-b321-3ab152bafeaa/repo-setup-edpm-deployment-openstack-edpm-ipam/0.log" Dec 11 10:12:21 crc kubenswrapper[4881]: I1211 10:12:21.813459 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_redhat-edpm-deployment-openstack-edpm-ipam-bfzwp_5c0b2b12-1f8f-4bd7-a3ea-3a078f43f178/redhat-edpm-deployment-openstack-edpm-ipam/0.log" Dec 11 10:12:22 crc kubenswrapper[4881]: I1211 10:12:22.091860 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_run-os-edpm-deployment-openstack-edpm-ipam-xjxv2_0b72500e-98a3-4e2a-895b-422da6f81a8c/run-os-edpm-deployment-openstack-edpm-ipam/0.log" Dec 11 10:12:22 crc kubenswrapper[4881]: I1211 10:12:22.193161 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ssh-known-hosts-edpm-deployment-f79qk_1e3366c8-0354-47fb-af1a-f579ed757f2b/ssh-known-hosts-edpm-deployment/0.log" Dec 11 10:12:22 crc kubenswrapper[4881]: I1211 10:12:22.398872 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-proxy-5f6c547b6c-rjk9h_910014af-7b9e-49b8-99e3-b80a15d72faf/proxy-server/0.log" Dec 11 10:12:22 crc kubenswrapper[4881]: I1211 10:12:22.487615 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-ring-rebalance-qt8lj_d40e3cbd-c017-4b42-94ee-dea2565d55a3/swift-ring-rebalance/0.log" Dec 11 10:12:22 crc kubenswrapper[4881]: I1211 10:12:22.630126 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-proxy-5f6c547b6c-rjk9h_910014af-7b9e-49b8-99e3-b80a15d72faf/proxy-httpd/0.log" Dec 11 10:12:22 crc kubenswrapper[4881]: I1211 10:12:22.675979 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_1af483c2-ea3f-45cd-971d-797c06f5c6e2/account-auditor/0.log" Dec 11 10:12:22 crc kubenswrapper[4881]: I1211 10:12:22.806211 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_1af483c2-ea3f-45cd-971d-797c06f5c6e2/account-reaper/0.log" Dec 11 10:12:22 crc kubenswrapper[4881]: I1211 10:12:22.918972 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_1af483c2-ea3f-45cd-971d-797c06f5c6e2/account-replicator/0.log" Dec 11 10:12:23 crc kubenswrapper[4881]: I1211 10:12:23.003374 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_1af483c2-ea3f-45cd-971d-797c06f5c6e2/account-server/0.log" Dec 11 10:12:23 crc kubenswrapper[4881]: I1211 10:12:23.054101 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_1af483c2-ea3f-45cd-971d-797c06f5c6e2/container-auditor/0.log" Dec 11 10:12:23 crc kubenswrapper[4881]: I1211 10:12:23.094714 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_1af483c2-ea3f-45cd-971d-797c06f5c6e2/container-replicator/0.log" Dec 11 10:12:23 crc kubenswrapper[4881]: I1211 10:12:23.191377 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_1af483c2-ea3f-45cd-971d-797c06f5c6e2/container-server/0.log" Dec 11 10:12:23 crc kubenswrapper[4881]: I1211 10:12:23.290378 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_1af483c2-ea3f-45cd-971d-797c06f5c6e2/container-updater/0.log" Dec 11 10:12:23 crc kubenswrapper[4881]: I1211 10:12:23.371994 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_1af483c2-ea3f-45cd-971d-797c06f5c6e2/object-expirer/0.log" Dec 11 10:12:23 crc kubenswrapper[4881]: I1211 10:12:23.377624 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_1af483c2-ea3f-45cd-971d-797c06f5c6e2/object-auditor/0.log" Dec 11 10:12:23 crc kubenswrapper[4881]: I1211 10:12:23.512209 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_1af483c2-ea3f-45cd-971d-797c06f5c6e2/object-replicator/0.log" Dec 11 10:12:23 crc kubenswrapper[4881]: I1211 10:12:23.561569 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_1af483c2-ea3f-45cd-971d-797c06f5c6e2/object-server/0.log" Dec 11 10:12:23 crc kubenswrapper[4881]: I1211 10:12:23.675779 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_1af483c2-ea3f-45cd-971d-797c06f5c6e2/object-updater/0.log" Dec 11 10:12:23 crc kubenswrapper[4881]: I1211 10:12:23.736947 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_1af483c2-ea3f-45cd-971d-797c06f5c6e2/rsync/0.log" Dec 11 10:12:23 crc kubenswrapper[4881]: I1211 10:12:23.756024 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_1af483c2-ea3f-45cd-971d-797c06f5c6e2/swift-recon-cron/0.log" Dec 11 10:12:24 crc kubenswrapper[4881]: I1211 10:12:24.047563 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_telemetry-edpm-deployment-openstack-edpm-ipam-plpct_e87175a4-03cc-472f-90ac-18cb8573131f/telemetry-edpm-deployment-openstack-edpm-ipam/0.log" Dec 11 10:12:24 crc kubenswrapper[4881]: I1211 10:12:24.093910 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_telemetry-power-monitoring-edpm-deployment-openstack-edpm-h628s_1272adc3-399f-4c39-b62c-3bc18dda3b59/telemetry-power-monitoring-edpm-deployment-openstack-edpm-ipam/0.log" Dec 11 10:12:24 crc kubenswrapper[4881]: I1211 10:12:24.609717 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_test-operator-logs-pod-tempest-tempest-tests-tempest_443704cb-4132-4086-9c08-edc325a2bbc5/test-operator-logs-container/0.log" Dec 11 10:12:24 crc kubenswrapper[4881]: I1211 10:12:24.883729 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_validate-network-edpm-deployment-openstack-edpm-ipam-ktsnq_f4e51bed-808f-4037-b472-88fbe64bd15f/validate-network-edpm-deployment-openstack-edpm-ipam/0.log" Dec 11 10:12:25 crc kubenswrapper[4881]: I1211 10:12:25.686386 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_tempest-tests-tempest_44483fe0-748e-4e0e-9591-f5c14c4cd3f8/tempest-tests-tempest-tests-runner/0.log" Dec 11 10:12:34 crc kubenswrapper[4881]: I1211 10:12:34.446113 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_memcached-0_06499ad1-4a0e-46e2-b0fa-7583b8958148/memcached/0.log" Dec 11 10:12:55 crc kubenswrapper[4881]: I1211 10:12:55.645841 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_b69ceae0a3beb1917c3cf44da9e05673c5eae51f07b46c3ef0a964a4f8l65q5_63d42923-a2ed-4b92-82ab-e9ca4ad98e55/util/0.log" Dec 11 10:12:55 crc kubenswrapper[4881]: I1211 10:12:55.870929 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_b69ceae0a3beb1917c3cf44da9e05673c5eae51f07b46c3ef0a964a4f8l65q5_63d42923-a2ed-4b92-82ab-e9ca4ad98e55/util/0.log" Dec 11 10:12:55 crc kubenswrapper[4881]: I1211 10:12:55.898466 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_b69ceae0a3beb1917c3cf44da9e05673c5eae51f07b46c3ef0a964a4f8l65q5_63d42923-a2ed-4b92-82ab-e9ca4ad98e55/pull/0.log" Dec 11 10:12:55 crc kubenswrapper[4881]: I1211 10:12:55.905077 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_b69ceae0a3beb1917c3cf44da9e05673c5eae51f07b46c3ef0a964a4f8l65q5_63d42923-a2ed-4b92-82ab-e9ca4ad98e55/pull/0.log" Dec 11 10:12:56 crc kubenswrapper[4881]: I1211 10:12:56.093965 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_b69ceae0a3beb1917c3cf44da9e05673c5eae51f07b46c3ef0a964a4f8l65q5_63d42923-a2ed-4b92-82ab-e9ca4ad98e55/util/0.log" Dec 11 10:12:56 crc kubenswrapper[4881]: I1211 10:12:56.186584 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_b69ceae0a3beb1917c3cf44da9e05673c5eae51f07b46c3ef0a964a4f8l65q5_63d42923-a2ed-4b92-82ab-e9ca4ad98e55/pull/0.log" Dec 11 10:12:56 crc kubenswrapper[4881]: I1211 10:12:56.198467 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_b69ceae0a3beb1917c3cf44da9e05673c5eae51f07b46c3ef0a964a4f8l65q5_63d42923-a2ed-4b92-82ab-e9ca4ad98e55/extract/0.log" Dec 11 10:12:56 crc kubenswrapper[4881]: I1211 10:12:56.272561 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-5bfbbb859d-d4dsr_e5f3ecaa-a91f-4dc8-9baf-2866cf8df0f4/kube-rbac-proxy/0.log" Dec 11 10:12:56 crc kubenswrapper[4881]: I1211 10:12:56.465687 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-748967c98-lzm95_80435f25-efd8-482d-9a9b-1c6caafd655e/kube-rbac-proxy/0.log" Dec 11 10:12:56 crc kubenswrapper[4881]: I1211 10:12:56.470215 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-5bfbbb859d-d4dsr_e5f3ecaa-a91f-4dc8-9baf-2866cf8df0f4/manager/0.log" Dec 11 10:12:56 crc kubenswrapper[4881]: I1211 10:12:56.548454 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-748967c98-lzm95_80435f25-efd8-482d-9a9b-1c6caafd655e/manager/0.log" Dec 11 10:12:56 crc kubenswrapper[4881]: I1211 10:12:56.736735 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-6788cc6d75-6gtkq_e07ec193-8583-4299-9370-ce788e2e1ae1/kube-rbac-proxy/0.log" Dec 11 10:12:56 crc kubenswrapper[4881]: I1211 10:12:56.752521 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-6788cc6d75-6gtkq_e07ec193-8583-4299-9370-ce788e2e1ae1/manager/0.log" Dec 11 10:12:56 crc kubenswrapper[4881]: I1211 10:12:56.877016 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-85fbd69fcd-twbpm_d46284f5-997e-4ce9-a607-254c3ce33f31/kube-rbac-proxy/0.log" Dec 11 10:12:57 crc kubenswrapper[4881]: I1211 10:12:57.117544 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-85fbd69fcd-twbpm_d46284f5-997e-4ce9-a607-254c3ce33f31/manager/0.log" Dec 11 10:12:57 crc kubenswrapper[4881]: I1211 10:12:57.152938 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-698d6fd7d6-rs5fd_d4850972-2a52-4030-9822-af3de9cc647a/manager/0.log" Dec 11 10:12:57 crc kubenswrapper[4881]: I1211 10:12:57.157309 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-698d6fd7d6-rs5fd_d4850972-2a52-4030-9822-af3de9cc647a/kube-rbac-proxy/0.log" Dec 11 10:12:57 crc kubenswrapper[4881]: I1211 10:12:57.584316 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-7d5d9fd47f-dqhqx_2dab1d4f-2c9a-4b32-a666-4b0802e51576/manager/0.log" Dec 11 10:12:57 crc kubenswrapper[4881]: I1211 10:12:57.630862 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-7d5d9fd47f-dqhqx_2dab1d4f-2c9a-4b32-a666-4b0802e51576/kube-rbac-proxy/0.log" Dec 11 10:12:57 crc kubenswrapper[4881]: I1211 10:12:57.788736 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-6c55d8d69b-44khn_867bc48e-c043-4428-b201-0ce4dd830f3f/kube-rbac-proxy/0.log" Dec 11 10:12:57 crc kubenswrapper[4881]: I1211 10:12:57.937255 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-54485f899-mvlsx_2fd323b1-8fa8-456c-bcd8-d89872682762/kube-rbac-proxy/0.log" Dec 11 10:12:57 crc kubenswrapper[4881]: I1211 10:12:57.963215 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-6c55d8d69b-44khn_867bc48e-c043-4428-b201-0ce4dd830f3f/manager/0.log" Dec 11 10:12:58 crc kubenswrapper[4881]: I1211 10:12:58.022678 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-54485f899-mvlsx_2fd323b1-8fa8-456c-bcd8-d89872682762/manager/0.log" Dec 11 10:12:58 crc kubenswrapper[4881]: I1211 10:12:58.247652 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-79cc9d59f5-qh8s8_9b3ee431-6c33-4b49-8fdb-27056597fbe8/kube-rbac-proxy/0.log" Dec 11 10:12:58 crc kubenswrapper[4881]: I1211 10:12:58.274471 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-79cc9d59f5-qh8s8_9b3ee431-6c33-4b49-8fdb-27056597fbe8/manager/0.log" Dec 11 10:12:58 crc kubenswrapper[4881]: I1211 10:12:58.434524 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-5cbc8c7f96-ww55g_da8fe0e3-3416-453d-80b7-47d4ab23c610/kube-rbac-proxy/0.log" Dec 11 10:12:58 crc kubenswrapper[4881]: I1211 10:12:58.439740 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-5cbc8c7f96-ww55g_da8fe0e3-3416-453d-80b7-47d4ab23c610/manager/0.log" Dec 11 10:12:58 crc kubenswrapper[4881]: I1211 10:12:58.552268 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-64d7c556cd-qhz8n_21cbbd1f-7cfe-481a-b02a-f72c9d052519/kube-rbac-proxy/0.log" Dec 11 10:12:58 crc kubenswrapper[4881]: I1211 10:12:58.651314 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-64d7c556cd-qhz8n_21cbbd1f-7cfe-481a-b02a-f72c9d052519/manager/0.log" Dec 11 10:12:58 crc kubenswrapper[4881]: I1211 10:12:58.734245 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-58879495c-kmpzx_5079b14d-bd2f-4151-898d-91362a4b24c2/kube-rbac-proxy/0.log" Dec 11 10:12:58 crc kubenswrapper[4881]: I1211 10:12:58.774554 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-58879495c-kmpzx_5079b14d-bd2f-4151-898d-91362a4b24c2/manager/0.log" Dec 11 10:12:58 crc kubenswrapper[4881]: I1211 10:12:58.959563 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-79d658b66d-2dxhd_05ef8d73-6d8a-4d91-83a3-93ec0fc14ae1/kube-rbac-proxy/0.log" Dec 11 10:12:59 crc kubenswrapper[4881]: I1211 10:12:59.140527 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-79d658b66d-2dxhd_05ef8d73-6d8a-4d91-83a3-93ec0fc14ae1/manager/0.log" Dec 11 10:12:59 crc kubenswrapper[4881]: I1211 10:12:59.233564 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-d5fb87cb8-q22gz_ac8a74d8-c81e-4154-b2dc-7ebb23d13aa7/manager/0.log" Dec 11 10:12:59 crc kubenswrapper[4881]: I1211 10:12:59.246250 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-d5fb87cb8-q22gz_ac8a74d8-c81e-4154-b2dc-7ebb23d13aa7/kube-rbac-proxy/0.log" Dec 11 10:12:59 crc kubenswrapper[4881]: I1211 10:12:59.396793 4881 patch_prober.go:28] interesting pod/machine-config-daemon-z9nnh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 11 10:12:59 crc kubenswrapper[4881]: I1211 10:12:59.396861 4881 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 11 10:12:59 crc kubenswrapper[4881]: I1211 10:12:59.410754 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-77868f484-kgwhp_2621fa0b-89fb-4d65-aef3-98de0e9a8106/kube-rbac-proxy/0.log" Dec 11 10:12:59 crc kubenswrapper[4881]: I1211 10:12:59.506647 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-77868f484-kgwhp_2621fa0b-89fb-4d65-aef3-98de0e9a8106/manager/0.log" Dec 11 10:12:59 crc kubenswrapper[4881]: I1211 10:12:59.718176 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-manager-b95f4d4f8-phlkr_128ea8d0-53b4-410c-8587-165aa960d46c/kube-rbac-proxy/0.log" Dec 11 10:12:59 crc kubenswrapper[4881]: I1211 10:12:59.871887 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-operator-768c6dc6d6-plgdc_36714c9d-bfd8-4c2e-9d06-971da594217f/kube-rbac-proxy/0.log" Dec 11 10:13:00 crc kubenswrapper[4881]: I1211 10:13:00.178728 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-index-pgrfm_c8209c30-f66b-4b47-a663-a0dac2ea36dd/registry-server/0.log" Dec 11 10:13:00 crc kubenswrapper[4881]: I1211 10:13:00.275133 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-operator-768c6dc6d6-plgdc_36714c9d-bfd8-4c2e-9d06-971da594217f/operator/0.log" Dec 11 10:13:00 crc kubenswrapper[4881]: I1211 10:13:00.503078 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-5b67cfc8fb-nnlz4_91ca18da-4852-496b-bf77-558e8010aabe/kube-rbac-proxy/0.log" Dec 11 10:13:00 crc kubenswrapper[4881]: I1211 10:13:00.581116 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-5b67cfc8fb-nnlz4_91ca18da-4852-496b-bf77-558e8010aabe/manager/0.log" Dec 11 10:13:00 crc kubenswrapper[4881]: I1211 10:13:00.743245 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-867d87977b-ftqqr_02aa2201-6757-40f8-b24d-fbad39b79069/kube-rbac-proxy/0.log" Dec 11 10:13:00 crc kubenswrapper[4881]: I1211 10:13:00.794186 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-867d87977b-ftqqr_02aa2201-6757-40f8-b24d-fbad39b79069/manager/0.log" Dec 11 10:13:00 crc kubenswrapper[4881]: I1211 10:13:00.879145 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_rabbitmq-cluster-operator-manager-5f97d8c699-22zs6_14d65f13-7dce-49b7-9c8e-0a6ea9b57132/operator/0.log" Dec 11 10:13:01 crc kubenswrapper[4881]: I1211 10:13:01.093464 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-8f6687c44-zkl8b_246f8ac7-b65e-40b1-aba1-ba1defde43ef/kube-rbac-proxy/0.log" Dec 11 10:13:01 crc kubenswrapper[4881]: I1211 10:13:01.137813 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-manager-b95f4d4f8-phlkr_128ea8d0-53b4-410c-8587-165aa960d46c/manager/0.log" Dec 11 10:13:01 crc kubenswrapper[4881]: I1211 10:13:01.179733 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-8f6687c44-zkl8b_246f8ac7-b65e-40b1-aba1-ba1defde43ef/manager/0.log" Dec 11 10:13:01 crc kubenswrapper[4881]: I1211 10:13:01.289044 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-fb56f4744-vgmrx_de3e8077-0bfa-4e55-aba0-0e5dca0e598d/kube-rbac-proxy/0.log" Dec 11 10:13:01 crc kubenswrapper[4881]: I1211 10:13:01.361119 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-bb86466d8-6mz9j_06cd21f3-b69e-4238-9894-8c4f0f77ee53/kube-rbac-proxy/0.log" Dec 11 10:13:01 crc kubenswrapper[4881]: I1211 10:13:01.478379 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-bb86466d8-6mz9j_06cd21f3-b69e-4238-9894-8c4f0f77ee53/manager/0.log" Dec 11 10:13:01 crc kubenswrapper[4881]: I1211 10:13:01.644133 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-6b56b8849f-mj8lx_dd07d5b4-cfe3-4580-a859-64558daab601/kube-rbac-proxy/0.log" Dec 11 10:13:01 crc kubenswrapper[4881]: I1211 10:13:01.656758 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-fb56f4744-vgmrx_de3e8077-0bfa-4e55-aba0-0e5dca0e598d/manager/0.log" Dec 11 10:13:01 crc kubenswrapper[4881]: I1211 10:13:01.710821 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-6b56b8849f-mj8lx_dd07d5b4-cfe3-4580-a859-64558daab601/manager/0.log" Dec 11 10:13:20 crc kubenswrapper[4881]: I1211 10:13:20.204403 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_control-plane-machine-set-operator-78cbb6b69f-dw769_a658d244-5927-4518-b8bb-0685d0e40a07/control-plane-machine-set-operator/0.log" Dec 11 10:13:20 crc kubenswrapper[4881]: I1211 10:13:20.419249 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-vk42n_70bb46be-cfc1-411d-9a3b-55e040e1c2c5/kube-rbac-proxy/0.log" Dec 11 10:13:20 crc kubenswrapper[4881]: I1211 10:13:20.464976 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-vk42n_70bb46be-cfc1-411d-9a3b-55e040e1c2c5/machine-api-operator/0.log" Dec 11 10:13:29 crc kubenswrapper[4881]: I1211 10:13:29.396726 4881 patch_prober.go:28] interesting pod/machine-config-daemon-z9nnh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 11 10:13:29 crc kubenswrapper[4881]: I1211 10:13:29.397507 4881 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 11 10:13:33 crc kubenswrapper[4881]: I1211 10:13:33.146966 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-5b446d88c5-9wlsj_39798ebd-f640-4134-881a-1fc8aae8caf2/cert-manager-controller/0.log" Dec 11 10:13:33 crc kubenswrapper[4881]: I1211 10:13:33.347266 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-webhook-5655c58dd6-k8mbh_b5ad6193-6ecc-4146-ba6a-b16704219c0b/cert-manager-webhook/0.log" Dec 11 10:13:33 crc kubenswrapper[4881]: I1211 10:13:33.360124 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-cainjector-7f985d654d-m6cfc_e18d3098-b003-42c6-bba8-0fdeff9222d4/cert-manager-cainjector/0.log" Dec 11 10:13:47 crc kubenswrapper[4881]: I1211 10:13:47.501494 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-console-plugin-6ff7998486-nj8ls_809482cd-c05d-41df-96db-84149e666743/nmstate-console-plugin/0.log" Dec 11 10:13:47 crc kubenswrapper[4881]: I1211 10:13:47.749848 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-handler-qx6gc_3ef011ad-0329-41b6-89d0-bbe3c976576b/nmstate-handler/0.log" Dec 11 10:13:47 crc kubenswrapper[4881]: I1211 10:13:47.920345 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-7f7f7578db-m5zx5_74e6048b-0d2d-418c-907c-5858077de213/nmstate-metrics/0.log" Dec 11 10:13:47 crc kubenswrapper[4881]: I1211 10:13:47.937846 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-7f7f7578db-m5zx5_74e6048b-0d2d-418c-907c-5858077de213/kube-rbac-proxy/0.log" Dec 11 10:13:48 crc kubenswrapper[4881]: I1211 10:13:48.038386 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-operator-6769fb99d-g9bf9_ae9ad369-6e2e-4c6c-a12a-cf228edaa48c/nmstate-operator/0.log" Dec 11 10:13:48 crc kubenswrapper[4881]: I1211 10:13:48.162451 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-webhook-f8fb84555-mfvb7_dbbccd2c-ccc0-4501-b4b4-b85621051f5f/nmstate-webhook/0.log" Dec 11 10:13:59 crc kubenswrapper[4881]: I1211 10:13:59.397166 4881 patch_prober.go:28] interesting pod/machine-config-daemon-z9nnh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 11 10:13:59 crc kubenswrapper[4881]: I1211 10:13:59.397602 4881 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 11 10:13:59 crc kubenswrapper[4881]: I1211 10:13:59.397646 4881 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" Dec 11 10:13:59 crc kubenswrapper[4881]: I1211 10:13:59.399290 4881 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"7e4db498eb5765963dae9ad2606e02f73e963ed76b1fbfa5ecaf703742a6a415"} pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Dec 11 10:13:59 crc kubenswrapper[4881]: I1211 10:13:59.399381 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" containerName="machine-config-daemon" containerID="cri-o://7e4db498eb5765963dae9ad2606e02f73e963ed76b1fbfa5ecaf703742a6a415" gracePeriod=600 Dec 11 10:13:59 crc kubenswrapper[4881]: I1211 10:13:59.543434 4881 generic.go:334] "Generic (PLEG): container finished" podID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" containerID="7e4db498eb5765963dae9ad2606e02f73e963ed76b1fbfa5ecaf703742a6a415" exitCode=0 Dec 11 10:13:59 crc kubenswrapper[4881]: I1211 10:13:59.543563 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" event={"ID":"56d69133-af36-4cbd-af7d-3a58cc4dd8ca","Type":"ContainerDied","Data":"7e4db498eb5765963dae9ad2606e02f73e963ed76b1fbfa5ecaf703742a6a415"} Dec 11 10:13:59 crc kubenswrapper[4881]: I1211 10:13:59.543872 4881 scope.go:117] "RemoveContainer" containerID="e1b977d6cf38148e3fb0a2375af7359048b9dcb9e237c41bd315b667b8afd334" Dec 11 10:14:00 crc kubenswrapper[4881]: I1211 10:14:00.559927 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" event={"ID":"56d69133-af36-4cbd-af7d-3a58cc4dd8ca","Type":"ContainerStarted","Data":"d128e0dff02b3825dd79b2d490e4268b7eaa7e6a607bbc27d6da7c90b6fbf705"} Dec 11 10:14:02 crc kubenswrapper[4881]: I1211 10:14:02.575991 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators-redhat_loki-operator-controller-manager-5895ddbf9f-qxpgx_71e1a3d0-ed67-45c6-8bfd-95237910c5c9/kube-rbac-proxy/0.log" Dec 11 10:14:02 crc kubenswrapper[4881]: I1211 10:14:02.701022 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators-redhat_loki-operator-controller-manager-5895ddbf9f-qxpgx_71e1a3d0-ed67-45c6-8bfd-95237910c5c9/manager/0.log" Dec 11 10:14:18 crc kubenswrapper[4881]: I1211 10:14:18.141950 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_cluster-logging-operator-ff9846bd-w8lh7_e9a4de57-461a-4db3-b12e-5d9eb9fd0a60/cluster-logging-operator/0.log" Dec 11 10:14:18 crc kubenswrapper[4881]: I1211 10:14:18.328922 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_collector-bwkfh_c095e07a-478b-41db-8ca3-a6b29c79756c/collector/0.log" Dec 11 10:14:18 crc kubenswrapper[4881]: I1211 10:14:18.366589 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_logging-loki-compactor-0_19c28f0c-c6b1-4192-b769-35ce88232323/loki-compactor/0.log" Dec 11 10:14:18 crc kubenswrapper[4881]: I1211 10:14:18.576728 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_logging-loki-distributor-76cc67bf56-7wtq5_3d172162-6309-4035-b574-842fa40d6db6/loki-distributor/0.log" Dec 11 10:14:18 crc kubenswrapper[4881]: I1211 10:14:18.590009 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_logging-loki-gateway-5f65744c89-srnq9_3470b561-e428-417b-bd76-92642ba561d8/gateway/0.log" Dec 11 10:14:18 crc kubenswrapper[4881]: I1211 10:14:18.694401 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_logging-loki-gateway-5f65744c89-srnq9_3470b561-e428-417b-bd76-92642ba561d8/opa/0.log" Dec 11 10:14:18 crc kubenswrapper[4881]: I1211 10:14:18.765877 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_logging-loki-gateway-5f65744c89-tvqmf_166ecd73-e9b9-4aa0-b09c-7ad373aea239/opa/0.log" Dec 11 10:14:18 crc kubenswrapper[4881]: I1211 10:14:18.804719 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_logging-loki-gateway-5f65744c89-tvqmf_166ecd73-e9b9-4aa0-b09c-7ad373aea239/gateway/0.log" Dec 11 10:14:18 crc kubenswrapper[4881]: I1211 10:14:18.937790 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_logging-loki-index-gateway-0_7d91da1c-1fcb-4d73-b7ad-838bd4b3eb6b/loki-index-gateway/0.log" Dec 11 10:14:19 crc kubenswrapper[4881]: I1211 10:14:19.041844 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_logging-loki-ingester-0_76043698-27ae-4a6d-af81-a7da0a14902d/loki-ingester/0.log" Dec 11 10:14:19 crc kubenswrapper[4881]: I1211 10:14:19.232582 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_logging-loki-querier-5895d59bb8-87hfb_b4ff84df-3a3b-4346-84a2-56f79c1aac44/loki-querier/0.log" Dec 11 10:14:19 crc kubenswrapper[4881]: I1211 10:14:19.317013 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_logging-loki-query-frontend-84558f7c9f-sswft_b4e3cba0-44c9-46ee-97f1-fb3b34b37cb3/loki-query-frontend/0.log" Dec 11 10:14:35 crc kubenswrapper[4881]: I1211 10:14:35.444325 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-5bddd4b946-mzlq2_1f002df3-6d4a-4f05-8ef8-07bc16590076/kube-rbac-proxy/0.log" Dec 11 10:14:35 crc kubenswrapper[4881]: I1211 10:14:35.605023 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-5bddd4b946-mzlq2_1f002df3-6d4a-4f05-8ef8-07bc16590076/controller/0.log" Dec 11 10:14:35 crc kubenswrapper[4881]: I1211 10:14:35.781521 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-webhook-server-7784b6fcf-nkdkx_52d03d01-bd10-4a71-993f-284fa256ebae/frr-k8s-webhook-server/0.log" Dec 11 10:14:35 crc kubenswrapper[4881]: I1211 10:14:35.876802 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-xj6xk_30393d1e-8b58-4cb7-9e45-23b5b79f235e/cp-frr-files/0.log" Dec 11 10:14:36 crc kubenswrapper[4881]: I1211 10:14:36.053226 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-xj6xk_30393d1e-8b58-4cb7-9e45-23b5b79f235e/cp-reloader/0.log" Dec 11 10:14:36 crc kubenswrapper[4881]: I1211 10:14:36.066153 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-xj6xk_30393d1e-8b58-4cb7-9e45-23b5b79f235e/cp-frr-files/0.log" Dec 11 10:14:36 crc kubenswrapper[4881]: I1211 10:14:36.084892 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-xj6xk_30393d1e-8b58-4cb7-9e45-23b5b79f235e/cp-metrics/0.log" Dec 11 10:14:36 crc kubenswrapper[4881]: I1211 10:14:36.365626 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-xj6xk_30393d1e-8b58-4cb7-9e45-23b5b79f235e/cp-reloader/0.log" Dec 11 10:14:36 crc kubenswrapper[4881]: I1211 10:14:36.498203 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-xj6xk_30393d1e-8b58-4cb7-9e45-23b5b79f235e/cp-frr-files/0.log" Dec 11 10:14:36 crc kubenswrapper[4881]: I1211 10:14:36.552419 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-xj6xk_30393d1e-8b58-4cb7-9e45-23b5b79f235e/cp-metrics/0.log" Dec 11 10:14:36 crc kubenswrapper[4881]: I1211 10:14:36.572160 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-xj6xk_30393d1e-8b58-4cb7-9e45-23b5b79f235e/cp-reloader/0.log" Dec 11 10:14:36 crc kubenswrapper[4881]: I1211 10:14:36.588407 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-xj6xk_30393d1e-8b58-4cb7-9e45-23b5b79f235e/cp-metrics/0.log" Dec 11 10:14:36 crc kubenswrapper[4881]: I1211 10:14:36.780172 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-xj6xk_30393d1e-8b58-4cb7-9e45-23b5b79f235e/cp-frr-files/0.log" Dec 11 10:14:36 crc kubenswrapper[4881]: I1211 10:14:36.817680 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-xj6xk_30393d1e-8b58-4cb7-9e45-23b5b79f235e/cp-reloader/0.log" Dec 11 10:14:36 crc kubenswrapper[4881]: I1211 10:14:36.854314 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-xj6xk_30393d1e-8b58-4cb7-9e45-23b5b79f235e/cp-metrics/0.log" Dec 11 10:14:36 crc kubenswrapper[4881]: I1211 10:14:36.855676 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-xj6xk_30393d1e-8b58-4cb7-9e45-23b5b79f235e/controller/0.log" Dec 11 10:14:37 crc kubenswrapper[4881]: I1211 10:14:37.096234 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-xj6xk_30393d1e-8b58-4cb7-9e45-23b5b79f235e/frr-metrics/0.log" Dec 11 10:14:37 crc kubenswrapper[4881]: I1211 10:14:37.129641 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-xj6xk_30393d1e-8b58-4cb7-9e45-23b5b79f235e/kube-rbac-proxy/0.log" Dec 11 10:14:37 crc kubenswrapper[4881]: I1211 10:14:37.140099 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-xj6xk_30393d1e-8b58-4cb7-9e45-23b5b79f235e/kube-rbac-proxy-frr/0.log" Dec 11 10:14:37 crc kubenswrapper[4881]: I1211 10:14:37.345908 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-xj6xk_30393d1e-8b58-4cb7-9e45-23b5b79f235e/reloader/0.log" Dec 11 10:14:37 crc kubenswrapper[4881]: I1211 10:14:37.447356 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-controller-manager-5646b5c6f5-clxl4_fda9a059-2ee6-41ae-ad81-e4f694080990/manager/0.log" Dec 11 10:14:37 crc kubenswrapper[4881]: I1211 10:14:37.666296 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-webhook-server-65f54b9948-tf47z_56dc9d55-54e9-44bc-9a4b-3a1eb32f4f04/webhook-server/0.log" Dec 11 10:14:38 crc kubenswrapper[4881]: I1211 10:14:38.040267 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-x559l_3a0e36ce-d2bd-4725-bc4a-e37ea057b5c3/kube-rbac-proxy/0.log" Dec 11 10:14:38 crc kubenswrapper[4881]: I1211 10:14:38.638019 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-x559l_3a0e36ce-d2bd-4725-bc4a-e37ea057b5c3/speaker/0.log" Dec 11 10:14:39 crc kubenswrapper[4881]: I1211 10:14:39.118628 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-xj6xk_30393d1e-8b58-4cb7-9e45-23b5b79f235e/frr/0.log" Dec 11 10:14:52 crc kubenswrapper[4881]: I1211 10:14:52.481024 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8sbtpr_e703c073-e40b-4293-9138-648e3d24c648/util/0.log" Dec 11 10:14:52 crc kubenswrapper[4881]: I1211 10:14:52.609233 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8sbtpr_e703c073-e40b-4293-9138-648e3d24c648/util/0.log" Dec 11 10:14:52 crc kubenswrapper[4881]: I1211 10:14:52.683170 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8sbtpr_e703c073-e40b-4293-9138-648e3d24c648/pull/0.log" Dec 11 10:14:52 crc kubenswrapper[4881]: I1211 10:14:52.689842 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8sbtpr_e703c073-e40b-4293-9138-648e3d24c648/pull/0.log" Dec 11 10:14:52 crc kubenswrapper[4881]: I1211 10:14:52.841798 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8sbtpr_e703c073-e40b-4293-9138-648e3d24c648/pull/0.log" Dec 11 10:14:52 crc kubenswrapper[4881]: I1211 10:14:52.862998 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8sbtpr_e703c073-e40b-4293-9138-648e3d24c648/util/0.log" Dec 11 10:14:52 crc kubenswrapper[4881]: I1211 10:14:52.895109 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8sbtpr_e703c073-e40b-4293-9138-648e3d24c648/extract/0.log" Dec 11 10:14:53 crc kubenswrapper[4881]: I1211 10:14:53.036914 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d472jx7_8a6e4a4a-8f04-4f2a-87bd-07f36e56a51e/util/0.log" Dec 11 10:14:53 crc kubenswrapper[4881]: I1211 10:14:53.273991 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d472jx7_8a6e4a4a-8f04-4f2a-87bd-07f36e56a51e/util/0.log" Dec 11 10:14:53 crc kubenswrapper[4881]: I1211 10:14:53.275457 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d472jx7_8a6e4a4a-8f04-4f2a-87bd-07f36e56a51e/pull/0.log" Dec 11 10:14:53 crc kubenswrapper[4881]: I1211 10:14:53.304004 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d472jx7_8a6e4a4a-8f04-4f2a-87bd-07f36e56a51e/pull/0.log" Dec 11 10:14:53 crc kubenswrapper[4881]: I1211 10:14:53.479097 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d472jx7_8a6e4a4a-8f04-4f2a-87bd-07f36e56a51e/util/0.log" Dec 11 10:14:53 crc kubenswrapper[4881]: I1211 10:14:53.514362 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d472jx7_8a6e4a4a-8f04-4f2a-87bd-07f36e56a51e/pull/0.log" Dec 11 10:14:53 crc kubenswrapper[4881]: I1211 10:14:53.517246 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d472jx7_8a6e4a4a-8f04-4f2a-87bd-07f36e56a51e/extract/0.log" Dec 11 10:14:53 crc kubenswrapper[4881]: I1211 10:14:53.697572 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210zj5zk_61c61666-4cf4-40df-bc54-202ef93cf87d/util/0.log" Dec 11 10:14:53 crc kubenswrapper[4881]: I1211 10:14:53.940638 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210zj5zk_61c61666-4cf4-40df-bc54-202ef93cf87d/pull/0.log" Dec 11 10:14:53 crc kubenswrapper[4881]: I1211 10:14:53.942712 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210zj5zk_61c61666-4cf4-40df-bc54-202ef93cf87d/util/0.log" Dec 11 10:14:53 crc kubenswrapper[4881]: I1211 10:14:53.986548 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210zj5zk_61c61666-4cf4-40df-bc54-202ef93cf87d/pull/0.log" Dec 11 10:14:54 crc kubenswrapper[4881]: I1211 10:14:54.118152 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210zj5zk_61c61666-4cf4-40df-bc54-202ef93cf87d/util/0.log" Dec 11 10:14:54 crc kubenswrapper[4881]: I1211 10:14:54.156291 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210zj5zk_61c61666-4cf4-40df-bc54-202ef93cf87d/pull/0.log" Dec 11 10:14:54 crc kubenswrapper[4881]: I1211 10:14:54.233616 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210zj5zk_61c61666-4cf4-40df-bc54-202ef93cf87d/extract/0.log" Dec 11 10:14:54 crc kubenswrapper[4881]: I1211 10:14:54.357753 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8q9msf_6d84ce93-6f0f-4248-a431-dd1692ff2ba8/util/0.log" Dec 11 10:14:54 crc kubenswrapper[4881]: I1211 10:14:54.571829 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8q9msf_6d84ce93-6f0f-4248-a431-dd1692ff2ba8/util/0.log" Dec 11 10:14:54 crc kubenswrapper[4881]: I1211 10:14:54.639825 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8q9msf_6d84ce93-6f0f-4248-a431-dd1692ff2ba8/pull/0.log" Dec 11 10:14:54 crc kubenswrapper[4881]: I1211 10:14:54.655472 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8q9msf_6d84ce93-6f0f-4248-a431-dd1692ff2ba8/pull/0.log" Dec 11 10:14:54 crc kubenswrapper[4881]: I1211 10:14:54.795321 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8q9msf_6d84ce93-6f0f-4248-a431-dd1692ff2ba8/util/0.log" Dec 11 10:14:54 crc kubenswrapper[4881]: I1211 10:14:54.885079 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8q9msf_6d84ce93-6f0f-4248-a431-dd1692ff2ba8/pull/0.log" Dec 11 10:14:54 crc kubenswrapper[4881]: I1211 10:14:54.911966 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8q9msf_6d84ce93-6f0f-4248-a431-dd1692ff2ba8/extract/0.log" Dec 11 10:14:55 crc kubenswrapper[4881]: I1211 10:14:55.000470 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463f2wqng_b1985fae-8a44-4865-b0bc-7d9e8d197a02/util/0.log" Dec 11 10:14:55 crc kubenswrapper[4881]: I1211 10:14:55.178676 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463f2wqng_b1985fae-8a44-4865-b0bc-7d9e8d197a02/util/0.log" Dec 11 10:14:55 crc kubenswrapper[4881]: I1211 10:14:55.197868 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463f2wqng_b1985fae-8a44-4865-b0bc-7d9e8d197a02/pull/0.log" Dec 11 10:14:55 crc kubenswrapper[4881]: I1211 10:14:55.210872 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463f2wqng_b1985fae-8a44-4865-b0bc-7d9e8d197a02/pull/0.log" Dec 11 10:14:55 crc kubenswrapper[4881]: I1211 10:14:55.383053 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463f2wqng_b1985fae-8a44-4865-b0bc-7d9e8d197a02/pull/0.log" Dec 11 10:14:55 crc kubenswrapper[4881]: I1211 10:14:55.431345 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463f2wqng_b1985fae-8a44-4865-b0bc-7d9e8d197a02/util/0.log" Dec 11 10:14:55 crc kubenswrapper[4881]: I1211 10:14:55.446907 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463f2wqng_b1985fae-8a44-4865-b0bc-7d9e8d197a02/extract/0.log" Dec 11 10:14:55 crc kubenswrapper[4881]: I1211 10:14:55.564211 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-qnffx_18611892-d199-4d6c-a3b6-391c8c78511c/extract-utilities/0.log" Dec 11 10:14:55 crc kubenswrapper[4881]: I1211 10:14:55.750976 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-qnffx_18611892-d199-4d6c-a3b6-391c8c78511c/extract-utilities/0.log" Dec 11 10:14:55 crc kubenswrapper[4881]: I1211 10:14:55.785519 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-qnffx_18611892-d199-4d6c-a3b6-391c8c78511c/extract-content/0.log" Dec 11 10:14:55 crc kubenswrapper[4881]: I1211 10:14:55.803737 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-qnffx_18611892-d199-4d6c-a3b6-391c8c78511c/extract-content/0.log" Dec 11 10:14:56 crc kubenswrapper[4881]: I1211 10:14:56.019438 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-qnffx_18611892-d199-4d6c-a3b6-391c8c78511c/extract-content/0.log" Dec 11 10:14:56 crc kubenswrapper[4881]: I1211 10:14:56.056395 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-qnffx_18611892-d199-4d6c-a3b6-391c8c78511c/extract-utilities/0.log" Dec 11 10:14:56 crc kubenswrapper[4881]: I1211 10:14:56.237033 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-xn9vx_368b103b-ef6b-4607-ae3b-8efd42f4ef3f/extract-utilities/0.log" Dec 11 10:14:56 crc kubenswrapper[4881]: I1211 10:14:56.475488 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-qnffx_18611892-d199-4d6c-a3b6-391c8c78511c/registry-server/0.log" Dec 11 10:14:56 crc kubenswrapper[4881]: I1211 10:14:56.478415 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-xn9vx_368b103b-ef6b-4607-ae3b-8efd42f4ef3f/extract-content/0.log" Dec 11 10:14:56 crc kubenswrapper[4881]: I1211 10:14:56.521865 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-xn9vx_368b103b-ef6b-4607-ae3b-8efd42f4ef3f/extract-content/0.log" Dec 11 10:14:56 crc kubenswrapper[4881]: I1211 10:14:56.534569 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-xn9vx_368b103b-ef6b-4607-ae3b-8efd42f4ef3f/extract-utilities/0.log" Dec 11 10:14:56 crc kubenswrapper[4881]: I1211 10:14:56.685212 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-xn9vx_368b103b-ef6b-4607-ae3b-8efd42f4ef3f/extract-utilities/0.log" Dec 11 10:14:56 crc kubenswrapper[4881]: I1211 10:14:56.689838 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-xn9vx_368b103b-ef6b-4607-ae3b-8efd42f4ef3f/extract-content/0.log" Dec 11 10:14:56 crc kubenswrapper[4881]: I1211 10:14:56.851172 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-xn9vx_368b103b-ef6b-4607-ae3b-8efd42f4ef3f/registry-server/0.log" Dec 11 10:14:56 crc kubenswrapper[4881]: I1211 10:14:56.858768 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-79b997595-tbhw6_a98dd8bc-f2be-42b3-94c9-5eb725bfb1bf/marketplace-operator/1.log" Dec 11 10:14:56 crc kubenswrapper[4881]: I1211 10:14:56.992499 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-79b997595-tbhw6_a98dd8bc-f2be-42b3-94c9-5eb725bfb1bf/marketplace-operator/0.log" Dec 11 10:14:57 crc kubenswrapper[4881]: I1211 10:14:57.361548 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-j9mc4_e1b26f03-480c-45ab-b37e-c2971f8e117a/extract-utilities/0.log" Dec 11 10:14:57 crc kubenswrapper[4881]: I1211 10:14:57.489852 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-j9mc4_e1b26f03-480c-45ab-b37e-c2971f8e117a/extract-content/0.log" Dec 11 10:14:57 crc kubenswrapper[4881]: I1211 10:14:57.498236 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-j9mc4_e1b26f03-480c-45ab-b37e-c2971f8e117a/extract-content/0.log" Dec 11 10:14:57 crc kubenswrapper[4881]: I1211 10:14:57.506761 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-j9mc4_e1b26f03-480c-45ab-b37e-c2971f8e117a/extract-utilities/0.log" Dec 11 10:14:57 crc kubenswrapper[4881]: I1211 10:14:57.721738 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-j9mc4_e1b26f03-480c-45ab-b37e-c2971f8e117a/extract-content/0.log" Dec 11 10:14:57 crc kubenswrapper[4881]: I1211 10:14:57.739295 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-j9mc4_e1b26f03-480c-45ab-b37e-c2971f8e117a/extract-utilities/0.log" Dec 11 10:14:57 crc kubenswrapper[4881]: I1211 10:14:57.783012 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-kfk9x_23d4f6b6-7fa3-4c1b-afc8-ce176ef3b69f/extract-utilities/0.log" Dec 11 10:14:57 crc kubenswrapper[4881]: I1211 10:14:57.997393 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-kfk9x_23d4f6b6-7fa3-4c1b-afc8-ce176ef3b69f/extract-content/0.log" Dec 11 10:14:58 crc kubenswrapper[4881]: I1211 10:14:58.066115 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-kfk9x_23d4f6b6-7fa3-4c1b-afc8-ce176ef3b69f/extract-utilities/0.log" Dec 11 10:14:58 crc kubenswrapper[4881]: I1211 10:14:58.071429 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-j9mc4_e1b26f03-480c-45ab-b37e-c2971f8e117a/registry-server/0.log" Dec 11 10:14:58 crc kubenswrapper[4881]: I1211 10:14:58.114708 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-kfk9x_23d4f6b6-7fa3-4c1b-afc8-ce176ef3b69f/extract-content/0.log" Dec 11 10:14:58 crc kubenswrapper[4881]: I1211 10:14:58.297464 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-kfk9x_23d4f6b6-7fa3-4c1b-afc8-ce176ef3b69f/extract-utilities/0.log" Dec 11 10:14:58 crc kubenswrapper[4881]: I1211 10:14:58.331380 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-kfk9x_23d4f6b6-7fa3-4c1b-afc8-ce176ef3b69f/extract-content/0.log" Dec 11 10:14:59 crc kubenswrapper[4881]: I1211 10:14:59.211615 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-kfk9x_23d4f6b6-7fa3-4c1b-afc8-ce176ef3b69f/registry-server/0.log" Dec 11 10:15:00 crc kubenswrapper[4881]: I1211 10:15:00.290052 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29424135-kzlhz"] Dec 11 10:15:00 crc kubenswrapper[4881]: E1211 10:15:00.291570 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3f5aa9e6-8a50-4d77-a1bc-f6b0012c73c0" containerName="container-00" Dec 11 10:15:00 crc kubenswrapper[4881]: I1211 10:15:00.291641 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="3f5aa9e6-8a50-4d77-a1bc-f6b0012c73c0" containerName="container-00" Dec 11 10:15:00 crc kubenswrapper[4881]: I1211 10:15:00.295849 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="3f5aa9e6-8a50-4d77-a1bc-f6b0012c73c0" containerName="container-00" Dec 11 10:15:00 crc kubenswrapper[4881]: I1211 10:15:00.307630 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29424135-kzlhz" Dec 11 10:15:00 crc kubenswrapper[4881]: I1211 10:15:00.312541 4881 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Dec 11 10:15:00 crc kubenswrapper[4881]: I1211 10:15:00.327700 4881 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Dec 11 10:15:00 crc kubenswrapper[4881]: I1211 10:15:00.361286 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29424135-kzlhz"] Dec 11 10:15:00 crc kubenswrapper[4881]: I1211 10:15:00.471074 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/136e626f-7cf6-4563-939c-2923f4e08296-secret-volume\") pod \"collect-profiles-29424135-kzlhz\" (UID: \"136e626f-7cf6-4563-939c-2923f4e08296\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29424135-kzlhz" Dec 11 10:15:00 crc kubenswrapper[4881]: I1211 10:15:00.471204 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/136e626f-7cf6-4563-939c-2923f4e08296-config-volume\") pod \"collect-profiles-29424135-kzlhz\" (UID: \"136e626f-7cf6-4563-939c-2923f4e08296\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29424135-kzlhz" Dec 11 10:15:00 crc kubenswrapper[4881]: I1211 10:15:00.471735 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8bm8l\" (UniqueName: \"kubernetes.io/projected/136e626f-7cf6-4563-939c-2923f4e08296-kube-api-access-8bm8l\") pod \"collect-profiles-29424135-kzlhz\" (UID: \"136e626f-7cf6-4563-939c-2923f4e08296\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29424135-kzlhz" Dec 11 10:15:00 crc kubenswrapper[4881]: I1211 10:15:00.574212 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8bm8l\" (UniqueName: \"kubernetes.io/projected/136e626f-7cf6-4563-939c-2923f4e08296-kube-api-access-8bm8l\") pod \"collect-profiles-29424135-kzlhz\" (UID: \"136e626f-7cf6-4563-939c-2923f4e08296\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29424135-kzlhz" Dec 11 10:15:00 crc kubenswrapper[4881]: I1211 10:15:00.574374 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/136e626f-7cf6-4563-939c-2923f4e08296-secret-volume\") pod \"collect-profiles-29424135-kzlhz\" (UID: \"136e626f-7cf6-4563-939c-2923f4e08296\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29424135-kzlhz" Dec 11 10:15:00 crc kubenswrapper[4881]: I1211 10:15:00.574441 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/136e626f-7cf6-4563-939c-2923f4e08296-config-volume\") pod \"collect-profiles-29424135-kzlhz\" (UID: \"136e626f-7cf6-4563-939c-2923f4e08296\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29424135-kzlhz" Dec 11 10:15:00 crc kubenswrapper[4881]: I1211 10:15:00.575514 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/136e626f-7cf6-4563-939c-2923f4e08296-config-volume\") pod \"collect-profiles-29424135-kzlhz\" (UID: \"136e626f-7cf6-4563-939c-2923f4e08296\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29424135-kzlhz" Dec 11 10:15:00 crc kubenswrapper[4881]: I1211 10:15:00.588324 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/136e626f-7cf6-4563-939c-2923f4e08296-secret-volume\") pod \"collect-profiles-29424135-kzlhz\" (UID: \"136e626f-7cf6-4563-939c-2923f4e08296\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29424135-kzlhz" Dec 11 10:15:00 crc kubenswrapper[4881]: I1211 10:15:00.591212 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8bm8l\" (UniqueName: \"kubernetes.io/projected/136e626f-7cf6-4563-939c-2923f4e08296-kube-api-access-8bm8l\") pod \"collect-profiles-29424135-kzlhz\" (UID: \"136e626f-7cf6-4563-939c-2923f4e08296\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29424135-kzlhz" Dec 11 10:15:00 crc kubenswrapper[4881]: I1211 10:15:00.661267 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29424135-kzlhz" Dec 11 10:15:01 crc kubenswrapper[4881]: I1211 10:15:01.451707 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29424135-kzlhz"] Dec 11 10:15:02 crc kubenswrapper[4881]: I1211 10:15:02.235419 4881 generic.go:334] "Generic (PLEG): container finished" podID="136e626f-7cf6-4563-939c-2923f4e08296" containerID="a03269cf82ca8d3e5e227daac935f4d25aa7d822491efa2ca00d27a703c0dff2" exitCode=0 Dec 11 10:15:02 crc kubenswrapper[4881]: I1211 10:15:02.235727 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29424135-kzlhz" event={"ID":"136e626f-7cf6-4563-939c-2923f4e08296","Type":"ContainerDied","Data":"a03269cf82ca8d3e5e227daac935f4d25aa7d822491efa2ca00d27a703c0dff2"} Dec 11 10:15:02 crc kubenswrapper[4881]: I1211 10:15:02.235760 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29424135-kzlhz" event={"ID":"136e626f-7cf6-4563-939c-2923f4e08296","Type":"ContainerStarted","Data":"a1ce94746bdc9ee4bca756c2e5c03e7ab92d6b94234115bf304b7a5df855fa87"} Dec 11 10:15:03 crc kubenswrapper[4881]: I1211 10:15:03.665735 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29424135-kzlhz" Dec 11 10:15:03 crc kubenswrapper[4881]: I1211 10:15:03.753715 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/136e626f-7cf6-4563-939c-2923f4e08296-secret-volume\") pod \"136e626f-7cf6-4563-939c-2923f4e08296\" (UID: \"136e626f-7cf6-4563-939c-2923f4e08296\") " Dec 11 10:15:03 crc kubenswrapper[4881]: I1211 10:15:03.753814 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/136e626f-7cf6-4563-939c-2923f4e08296-config-volume\") pod \"136e626f-7cf6-4563-939c-2923f4e08296\" (UID: \"136e626f-7cf6-4563-939c-2923f4e08296\") " Dec 11 10:15:03 crc kubenswrapper[4881]: I1211 10:15:03.753904 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8bm8l\" (UniqueName: \"kubernetes.io/projected/136e626f-7cf6-4563-939c-2923f4e08296-kube-api-access-8bm8l\") pod \"136e626f-7cf6-4563-939c-2923f4e08296\" (UID: \"136e626f-7cf6-4563-939c-2923f4e08296\") " Dec 11 10:15:03 crc kubenswrapper[4881]: I1211 10:15:03.754664 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/136e626f-7cf6-4563-939c-2923f4e08296-config-volume" (OuterVolumeSpecName: "config-volume") pod "136e626f-7cf6-4563-939c-2923f4e08296" (UID: "136e626f-7cf6-4563-939c-2923f4e08296"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:15:03 crc kubenswrapper[4881]: I1211 10:15:03.761852 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/136e626f-7cf6-4563-939c-2923f4e08296-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "136e626f-7cf6-4563-939c-2923f4e08296" (UID: "136e626f-7cf6-4563-939c-2923f4e08296"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:15:03 crc kubenswrapper[4881]: I1211 10:15:03.762783 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/136e626f-7cf6-4563-939c-2923f4e08296-kube-api-access-8bm8l" (OuterVolumeSpecName: "kube-api-access-8bm8l") pod "136e626f-7cf6-4563-939c-2923f4e08296" (UID: "136e626f-7cf6-4563-939c-2923f4e08296"). InnerVolumeSpecName "kube-api-access-8bm8l". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:15:03 crc kubenswrapper[4881]: I1211 10:15:03.860130 4881 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/136e626f-7cf6-4563-939c-2923f4e08296-secret-volume\") on node \"crc\" DevicePath \"\"" Dec 11 10:15:03 crc kubenswrapper[4881]: I1211 10:15:03.860181 4881 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/136e626f-7cf6-4563-939c-2923f4e08296-config-volume\") on node \"crc\" DevicePath \"\"" Dec 11 10:15:03 crc kubenswrapper[4881]: I1211 10:15:03.860194 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8bm8l\" (UniqueName: \"kubernetes.io/projected/136e626f-7cf6-4563-939c-2923f4e08296-kube-api-access-8bm8l\") on node \"crc\" DevicePath \"\"" Dec 11 10:15:04 crc kubenswrapper[4881]: I1211 10:15:04.261184 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29424135-kzlhz" event={"ID":"136e626f-7cf6-4563-939c-2923f4e08296","Type":"ContainerDied","Data":"a1ce94746bdc9ee4bca756c2e5c03e7ab92d6b94234115bf304b7a5df855fa87"} Dec 11 10:15:04 crc kubenswrapper[4881]: I1211 10:15:04.261555 4881 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a1ce94746bdc9ee4bca756c2e5c03e7ab92d6b94234115bf304b7a5df855fa87" Dec 11 10:15:04 crc kubenswrapper[4881]: I1211 10:15:04.261261 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29424135-kzlhz" Dec 11 10:15:04 crc kubenswrapper[4881]: I1211 10:15:04.742933 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29424090-df5hw"] Dec 11 10:15:04 crc kubenswrapper[4881]: I1211 10:15:04.755280 4881 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29424090-df5hw"] Dec 11 10:15:05 crc kubenswrapper[4881]: I1211 10:15:05.020989 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2503315f-4203-483b-811e-d2a520564f97" path="/var/lib/kubelet/pods/2503315f-4203-483b-811e-d2a520564f97/volumes" Dec 11 10:15:12 crc kubenswrapper[4881]: I1211 10:15:12.751875 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-668cf9dfbb-qwshf_518448cb-2c51-4398-a75a-3d2c0d26905e/prometheus-operator/0.log" Dec 11 10:15:12 crc kubenswrapper[4881]: I1211 10:15:12.920966 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-admission-webhook-77767bb68c-n4z89_91763e1b-8187-4c07-be69-34a7330afb73/prometheus-operator-admission-webhook/0.log" Dec 11 10:15:13 crc kubenswrapper[4881]: I1211 10:15:13.004805 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-admission-webhook-77767bb68c-7w7p7_6e8d1991-85d6-4f79-8233-98a8c9be9b32/prometheus-operator-admission-webhook/0.log" Dec 11 10:15:13 crc kubenswrapper[4881]: I1211 10:15:13.096822 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_observability-operator-d8bb48f5d-zdvsr_578f637c-c2d8-46be-9838-f2a0b587b0c6/operator/0.log" Dec 11 10:15:13 crc kubenswrapper[4881]: I1211 10:15:13.225467 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_observability-ui-dashboards-7d5fb4cbfb-qvsbb_3c5fa886-8b43-4ef2-9f4b-d4724c4efa56/observability-ui-dashboards/0.log" Dec 11 10:15:13 crc kubenswrapper[4881]: I1211 10:15:13.325403 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_perses-operator-5446b9c989-clg8t_8df2f7b3-931a-4e09-b473-f71d8ee210d8/perses-operator/0.log" Dec 11 10:15:14 crc kubenswrapper[4881]: I1211 10:15:14.572325 4881 scope.go:117] "RemoveContainer" containerID="f2757a69b2a1cc2b2a50ce0f130770c5bcbc99ac1e083bcf64b3ecd958c73ef9" Dec 11 10:15:27 crc kubenswrapper[4881]: I1211 10:15:27.023846 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators-redhat_loki-operator-controller-manager-5895ddbf9f-qxpgx_71e1a3d0-ed67-45c6-8bfd-95237910c5c9/manager/0.log" Dec 11 10:15:27 crc kubenswrapper[4881]: I1211 10:15:27.068546 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators-redhat_loki-operator-controller-manager-5895ddbf9f-qxpgx_71e1a3d0-ed67-45c6-8bfd-95237910c5c9/kube-rbac-proxy/0.log" Dec 11 10:15:51 crc kubenswrapper[4881]: E1211 10:15:51.023610 4881 upgradeaware.go:427] Error proxying data from client to backend: readfrom tcp 38.102.83.20:52622->38.102.83.20:39683: write tcp 38.102.83.20:52622->38.102.83.20:39683: write: broken pipe Dec 11 10:15:59 crc kubenswrapper[4881]: I1211 10:15:59.396774 4881 patch_prober.go:28] interesting pod/machine-config-daemon-z9nnh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 11 10:15:59 crc kubenswrapper[4881]: I1211 10:15:59.397276 4881 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 11 10:16:29 crc kubenswrapper[4881]: I1211 10:16:29.397661 4881 patch_prober.go:28] interesting pod/machine-config-daemon-z9nnh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 11 10:16:29 crc kubenswrapper[4881]: I1211 10:16:29.398374 4881 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 11 10:16:42 crc kubenswrapper[4881]: I1211 10:16:42.756137 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-d67bt"] Dec 11 10:16:42 crc kubenswrapper[4881]: E1211 10:16:42.757436 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="136e626f-7cf6-4563-939c-2923f4e08296" containerName="collect-profiles" Dec 11 10:16:42 crc kubenswrapper[4881]: I1211 10:16:42.757454 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="136e626f-7cf6-4563-939c-2923f4e08296" containerName="collect-profiles" Dec 11 10:16:42 crc kubenswrapper[4881]: I1211 10:16:42.757754 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="136e626f-7cf6-4563-939c-2923f4e08296" containerName="collect-profiles" Dec 11 10:16:42 crc kubenswrapper[4881]: I1211 10:16:42.761399 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-d67bt" Dec 11 10:16:42 crc kubenswrapper[4881]: I1211 10:16:42.774989 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-d67bt"] Dec 11 10:16:42 crc kubenswrapper[4881]: I1211 10:16:42.890241 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/35b8c61b-1d92-4390-86f7-b577536dde61-utilities\") pod \"redhat-marketplace-d67bt\" (UID: \"35b8c61b-1d92-4390-86f7-b577536dde61\") " pod="openshift-marketplace/redhat-marketplace-d67bt" Dec 11 10:16:42 crc kubenswrapper[4881]: I1211 10:16:42.890367 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6rn9s\" (UniqueName: \"kubernetes.io/projected/35b8c61b-1d92-4390-86f7-b577536dde61-kube-api-access-6rn9s\") pod \"redhat-marketplace-d67bt\" (UID: \"35b8c61b-1d92-4390-86f7-b577536dde61\") " pod="openshift-marketplace/redhat-marketplace-d67bt" Dec 11 10:16:42 crc kubenswrapper[4881]: I1211 10:16:42.890446 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/35b8c61b-1d92-4390-86f7-b577536dde61-catalog-content\") pod \"redhat-marketplace-d67bt\" (UID: \"35b8c61b-1d92-4390-86f7-b577536dde61\") " pod="openshift-marketplace/redhat-marketplace-d67bt" Dec 11 10:16:42 crc kubenswrapper[4881]: I1211 10:16:42.993815 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/35b8c61b-1d92-4390-86f7-b577536dde61-utilities\") pod \"redhat-marketplace-d67bt\" (UID: \"35b8c61b-1d92-4390-86f7-b577536dde61\") " pod="openshift-marketplace/redhat-marketplace-d67bt" Dec 11 10:16:42 crc kubenswrapper[4881]: I1211 10:16:42.994217 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6rn9s\" (UniqueName: \"kubernetes.io/projected/35b8c61b-1d92-4390-86f7-b577536dde61-kube-api-access-6rn9s\") pod \"redhat-marketplace-d67bt\" (UID: \"35b8c61b-1d92-4390-86f7-b577536dde61\") " pod="openshift-marketplace/redhat-marketplace-d67bt" Dec 11 10:16:42 crc kubenswrapper[4881]: I1211 10:16:42.994619 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/35b8c61b-1d92-4390-86f7-b577536dde61-utilities\") pod \"redhat-marketplace-d67bt\" (UID: \"35b8c61b-1d92-4390-86f7-b577536dde61\") " pod="openshift-marketplace/redhat-marketplace-d67bt" Dec 11 10:16:42 crc kubenswrapper[4881]: I1211 10:16:42.994659 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/35b8c61b-1d92-4390-86f7-b577536dde61-catalog-content\") pod \"redhat-marketplace-d67bt\" (UID: \"35b8c61b-1d92-4390-86f7-b577536dde61\") " pod="openshift-marketplace/redhat-marketplace-d67bt" Dec 11 10:16:42 crc kubenswrapper[4881]: I1211 10:16:42.994694 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/35b8c61b-1d92-4390-86f7-b577536dde61-catalog-content\") pod \"redhat-marketplace-d67bt\" (UID: \"35b8c61b-1d92-4390-86f7-b577536dde61\") " pod="openshift-marketplace/redhat-marketplace-d67bt" Dec 11 10:16:43 crc kubenswrapper[4881]: I1211 10:16:43.013726 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6rn9s\" (UniqueName: \"kubernetes.io/projected/35b8c61b-1d92-4390-86f7-b577536dde61-kube-api-access-6rn9s\") pod \"redhat-marketplace-d67bt\" (UID: \"35b8c61b-1d92-4390-86f7-b577536dde61\") " pod="openshift-marketplace/redhat-marketplace-d67bt" Dec 11 10:16:43 crc kubenswrapper[4881]: I1211 10:16:43.105080 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-d67bt" Dec 11 10:16:43 crc kubenswrapper[4881]: I1211 10:16:43.666161 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-d67bt"] Dec 11 10:16:44 crc kubenswrapper[4881]: I1211 10:16:44.309321 4881 generic.go:334] "Generic (PLEG): container finished" podID="35b8c61b-1d92-4390-86f7-b577536dde61" containerID="8c748cc314fde79b7cdfa72c37037acc933d84c6176f936c5f256338d176df29" exitCode=0 Dec 11 10:16:44 crc kubenswrapper[4881]: I1211 10:16:44.309565 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-d67bt" event={"ID":"35b8c61b-1d92-4390-86f7-b577536dde61","Type":"ContainerDied","Data":"8c748cc314fde79b7cdfa72c37037acc933d84c6176f936c5f256338d176df29"} Dec 11 10:16:44 crc kubenswrapper[4881]: I1211 10:16:44.309589 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-d67bt" event={"ID":"35b8c61b-1d92-4390-86f7-b577536dde61","Type":"ContainerStarted","Data":"156caa1c5389d08c9bd343f1aafe94a3f6152b040e21bce93c387b6c3f099d8d"} Dec 11 10:16:44 crc kubenswrapper[4881]: I1211 10:16:44.315224 4881 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Dec 11 10:16:46 crc kubenswrapper[4881]: I1211 10:16:46.336622 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-d67bt" event={"ID":"35b8c61b-1d92-4390-86f7-b577536dde61","Type":"ContainerStarted","Data":"22a2db464f2fa618507fd91ea22b07e17bb49f78637871ba7a0f410a16265775"} Dec 11 10:16:47 crc kubenswrapper[4881]: I1211 10:16:47.350147 4881 generic.go:334] "Generic (PLEG): container finished" podID="35b8c61b-1d92-4390-86f7-b577536dde61" containerID="22a2db464f2fa618507fd91ea22b07e17bb49f78637871ba7a0f410a16265775" exitCode=0 Dec 11 10:16:47 crc kubenswrapper[4881]: I1211 10:16:47.350517 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-d67bt" event={"ID":"35b8c61b-1d92-4390-86f7-b577536dde61","Type":"ContainerDied","Data":"22a2db464f2fa618507fd91ea22b07e17bb49f78637871ba7a0f410a16265775"} Dec 11 10:16:48 crc kubenswrapper[4881]: I1211 10:16:48.363841 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-d67bt" event={"ID":"35b8c61b-1d92-4390-86f7-b577536dde61","Type":"ContainerStarted","Data":"c78f489cdf89287163323210f527534a0eac87816d2c84c9b2bfb1885c1b9962"} Dec 11 10:16:48 crc kubenswrapper[4881]: I1211 10:16:48.394800 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-d67bt" podStartSLOduration=2.637399623 podStartE2EDuration="6.394435824s" podCreationTimestamp="2025-12-11 10:16:42 +0000 UTC" firstStartedPulling="2025-12-11 10:16:44.313100587 +0000 UTC m=+7252.690469284" lastFinishedPulling="2025-12-11 10:16:48.070136788 +0000 UTC m=+7256.447505485" observedRunningTime="2025-12-11 10:16:48.387142453 +0000 UTC m=+7256.764511150" watchObservedRunningTime="2025-12-11 10:16:48.394435824 +0000 UTC m=+7256.771804521" Dec 11 10:16:53 crc kubenswrapper[4881]: I1211 10:16:53.110058 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-d67bt" Dec 11 10:16:53 crc kubenswrapper[4881]: I1211 10:16:53.110659 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-d67bt" Dec 11 10:16:53 crc kubenswrapper[4881]: I1211 10:16:53.169577 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-d67bt" Dec 11 10:16:53 crc kubenswrapper[4881]: I1211 10:16:53.471233 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-d67bt" Dec 11 10:16:53 crc kubenswrapper[4881]: I1211 10:16:53.533922 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-d67bt"] Dec 11 10:16:55 crc kubenswrapper[4881]: I1211 10:16:55.450429 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-d67bt" podUID="35b8c61b-1d92-4390-86f7-b577536dde61" containerName="registry-server" containerID="cri-o://c78f489cdf89287163323210f527534a0eac87816d2c84c9b2bfb1885c1b9962" gracePeriod=2 Dec 11 10:16:55 crc kubenswrapper[4881]: I1211 10:16:55.985044 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-d67bt" Dec 11 10:16:56 crc kubenswrapper[4881]: I1211 10:16:56.053060 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6rn9s\" (UniqueName: \"kubernetes.io/projected/35b8c61b-1d92-4390-86f7-b577536dde61-kube-api-access-6rn9s\") pod \"35b8c61b-1d92-4390-86f7-b577536dde61\" (UID: \"35b8c61b-1d92-4390-86f7-b577536dde61\") " Dec 11 10:16:56 crc kubenswrapper[4881]: I1211 10:16:56.053168 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/35b8c61b-1d92-4390-86f7-b577536dde61-catalog-content\") pod \"35b8c61b-1d92-4390-86f7-b577536dde61\" (UID: \"35b8c61b-1d92-4390-86f7-b577536dde61\") " Dec 11 10:16:56 crc kubenswrapper[4881]: I1211 10:16:56.053471 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/35b8c61b-1d92-4390-86f7-b577536dde61-utilities\") pod \"35b8c61b-1d92-4390-86f7-b577536dde61\" (UID: \"35b8c61b-1d92-4390-86f7-b577536dde61\") " Dec 11 10:16:56 crc kubenswrapper[4881]: I1211 10:16:56.054294 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/35b8c61b-1d92-4390-86f7-b577536dde61-utilities" (OuterVolumeSpecName: "utilities") pod "35b8c61b-1d92-4390-86f7-b577536dde61" (UID: "35b8c61b-1d92-4390-86f7-b577536dde61"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 10:16:56 crc kubenswrapper[4881]: I1211 10:16:56.058692 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/35b8c61b-1d92-4390-86f7-b577536dde61-kube-api-access-6rn9s" (OuterVolumeSpecName: "kube-api-access-6rn9s") pod "35b8c61b-1d92-4390-86f7-b577536dde61" (UID: "35b8c61b-1d92-4390-86f7-b577536dde61"). InnerVolumeSpecName "kube-api-access-6rn9s". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:16:56 crc kubenswrapper[4881]: I1211 10:16:56.074706 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/35b8c61b-1d92-4390-86f7-b577536dde61-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "35b8c61b-1d92-4390-86f7-b577536dde61" (UID: "35b8c61b-1d92-4390-86f7-b577536dde61"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 10:16:56 crc kubenswrapper[4881]: I1211 10:16:56.156710 4881 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/35b8c61b-1d92-4390-86f7-b577536dde61-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 11 10:16:56 crc kubenswrapper[4881]: I1211 10:16:56.156740 4881 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/35b8c61b-1d92-4390-86f7-b577536dde61-utilities\") on node \"crc\" DevicePath \"\"" Dec 11 10:16:56 crc kubenswrapper[4881]: I1211 10:16:56.156749 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6rn9s\" (UniqueName: \"kubernetes.io/projected/35b8c61b-1d92-4390-86f7-b577536dde61-kube-api-access-6rn9s\") on node \"crc\" DevicePath \"\"" Dec 11 10:16:56 crc kubenswrapper[4881]: I1211 10:16:56.469293 4881 generic.go:334] "Generic (PLEG): container finished" podID="35b8c61b-1d92-4390-86f7-b577536dde61" containerID="c78f489cdf89287163323210f527534a0eac87816d2c84c9b2bfb1885c1b9962" exitCode=0 Dec 11 10:16:56 crc kubenswrapper[4881]: I1211 10:16:56.469368 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-d67bt" Dec 11 10:16:56 crc kubenswrapper[4881]: I1211 10:16:56.469370 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-d67bt" event={"ID":"35b8c61b-1d92-4390-86f7-b577536dde61","Type":"ContainerDied","Data":"c78f489cdf89287163323210f527534a0eac87816d2c84c9b2bfb1885c1b9962"} Dec 11 10:16:56 crc kubenswrapper[4881]: I1211 10:16:56.469904 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-d67bt" event={"ID":"35b8c61b-1d92-4390-86f7-b577536dde61","Type":"ContainerDied","Data":"156caa1c5389d08c9bd343f1aafe94a3f6152b040e21bce93c387b6c3f099d8d"} Dec 11 10:16:56 crc kubenswrapper[4881]: I1211 10:16:56.469925 4881 scope.go:117] "RemoveContainer" containerID="c78f489cdf89287163323210f527534a0eac87816d2c84c9b2bfb1885c1b9962" Dec 11 10:16:56 crc kubenswrapper[4881]: I1211 10:16:56.496151 4881 scope.go:117] "RemoveContainer" containerID="22a2db464f2fa618507fd91ea22b07e17bb49f78637871ba7a0f410a16265775" Dec 11 10:16:56 crc kubenswrapper[4881]: I1211 10:16:56.529492 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-d67bt"] Dec 11 10:16:56 crc kubenswrapper[4881]: I1211 10:16:56.546816 4881 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-d67bt"] Dec 11 10:16:56 crc kubenswrapper[4881]: I1211 10:16:56.550419 4881 scope.go:117] "RemoveContainer" containerID="8c748cc314fde79b7cdfa72c37037acc933d84c6176f936c5f256338d176df29" Dec 11 10:16:56 crc kubenswrapper[4881]: I1211 10:16:56.611128 4881 scope.go:117] "RemoveContainer" containerID="c78f489cdf89287163323210f527534a0eac87816d2c84c9b2bfb1885c1b9962" Dec 11 10:16:56 crc kubenswrapper[4881]: E1211 10:16:56.612664 4881 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c78f489cdf89287163323210f527534a0eac87816d2c84c9b2bfb1885c1b9962\": container with ID starting with c78f489cdf89287163323210f527534a0eac87816d2c84c9b2bfb1885c1b9962 not found: ID does not exist" containerID="c78f489cdf89287163323210f527534a0eac87816d2c84c9b2bfb1885c1b9962" Dec 11 10:16:56 crc kubenswrapper[4881]: I1211 10:16:56.612778 4881 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c78f489cdf89287163323210f527534a0eac87816d2c84c9b2bfb1885c1b9962"} err="failed to get container status \"c78f489cdf89287163323210f527534a0eac87816d2c84c9b2bfb1885c1b9962\": rpc error: code = NotFound desc = could not find container \"c78f489cdf89287163323210f527534a0eac87816d2c84c9b2bfb1885c1b9962\": container with ID starting with c78f489cdf89287163323210f527534a0eac87816d2c84c9b2bfb1885c1b9962 not found: ID does not exist" Dec 11 10:16:56 crc kubenswrapper[4881]: I1211 10:16:56.612875 4881 scope.go:117] "RemoveContainer" containerID="22a2db464f2fa618507fd91ea22b07e17bb49f78637871ba7a0f410a16265775" Dec 11 10:16:56 crc kubenswrapper[4881]: E1211 10:16:56.613472 4881 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"22a2db464f2fa618507fd91ea22b07e17bb49f78637871ba7a0f410a16265775\": container with ID starting with 22a2db464f2fa618507fd91ea22b07e17bb49f78637871ba7a0f410a16265775 not found: ID does not exist" containerID="22a2db464f2fa618507fd91ea22b07e17bb49f78637871ba7a0f410a16265775" Dec 11 10:16:56 crc kubenswrapper[4881]: I1211 10:16:56.613507 4881 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"22a2db464f2fa618507fd91ea22b07e17bb49f78637871ba7a0f410a16265775"} err="failed to get container status \"22a2db464f2fa618507fd91ea22b07e17bb49f78637871ba7a0f410a16265775\": rpc error: code = NotFound desc = could not find container \"22a2db464f2fa618507fd91ea22b07e17bb49f78637871ba7a0f410a16265775\": container with ID starting with 22a2db464f2fa618507fd91ea22b07e17bb49f78637871ba7a0f410a16265775 not found: ID does not exist" Dec 11 10:16:56 crc kubenswrapper[4881]: I1211 10:16:56.613531 4881 scope.go:117] "RemoveContainer" containerID="8c748cc314fde79b7cdfa72c37037acc933d84c6176f936c5f256338d176df29" Dec 11 10:16:56 crc kubenswrapper[4881]: E1211 10:16:56.613874 4881 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8c748cc314fde79b7cdfa72c37037acc933d84c6176f936c5f256338d176df29\": container with ID starting with 8c748cc314fde79b7cdfa72c37037acc933d84c6176f936c5f256338d176df29 not found: ID does not exist" containerID="8c748cc314fde79b7cdfa72c37037acc933d84c6176f936c5f256338d176df29" Dec 11 10:16:56 crc kubenswrapper[4881]: I1211 10:16:56.613985 4881 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8c748cc314fde79b7cdfa72c37037acc933d84c6176f936c5f256338d176df29"} err="failed to get container status \"8c748cc314fde79b7cdfa72c37037acc933d84c6176f936c5f256338d176df29\": rpc error: code = NotFound desc = could not find container \"8c748cc314fde79b7cdfa72c37037acc933d84c6176f936c5f256338d176df29\": container with ID starting with 8c748cc314fde79b7cdfa72c37037acc933d84c6176f936c5f256338d176df29 not found: ID does not exist" Dec 11 10:16:57 crc kubenswrapper[4881]: I1211 10:16:57.018504 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="35b8c61b-1d92-4390-86f7-b577536dde61" path="/var/lib/kubelet/pods/35b8c61b-1d92-4390-86f7-b577536dde61/volumes" Dec 11 10:16:59 crc kubenswrapper[4881]: I1211 10:16:59.396794 4881 patch_prober.go:28] interesting pod/machine-config-daemon-z9nnh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 11 10:16:59 crc kubenswrapper[4881]: I1211 10:16:59.397445 4881 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 11 10:16:59 crc kubenswrapper[4881]: I1211 10:16:59.397486 4881 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" Dec 11 10:16:59 crc kubenswrapper[4881]: I1211 10:16:59.398376 4881 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"d128e0dff02b3825dd79b2d490e4268b7eaa7e6a607bbc27d6da7c90b6fbf705"} pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Dec 11 10:16:59 crc kubenswrapper[4881]: I1211 10:16:59.398430 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" containerName="machine-config-daemon" containerID="cri-o://d128e0dff02b3825dd79b2d490e4268b7eaa7e6a607bbc27d6da7c90b6fbf705" gracePeriod=600 Dec 11 10:16:59 crc kubenswrapper[4881]: E1211 10:16:59.525857 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 10:17:00 crc kubenswrapper[4881]: I1211 10:17:00.517059 4881 generic.go:334] "Generic (PLEG): container finished" podID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" containerID="d128e0dff02b3825dd79b2d490e4268b7eaa7e6a607bbc27d6da7c90b6fbf705" exitCode=0 Dec 11 10:17:00 crc kubenswrapper[4881]: I1211 10:17:00.517157 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" event={"ID":"56d69133-af36-4cbd-af7d-3a58cc4dd8ca","Type":"ContainerDied","Data":"d128e0dff02b3825dd79b2d490e4268b7eaa7e6a607bbc27d6da7c90b6fbf705"} Dec 11 10:17:00 crc kubenswrapper[4881]: I1211 10:17:00.517464 4881 scope.go:117] "RemoveContainer" containerID="7e4db498eb5765963dae9ad2606e02f73e963ed76b1fbfa5ecaf703742a6a415" Dec 11 10:17:00 crc kubenswrapper[4881]: I1211 10:17:00.518315 4881 scope.go:117] "RemoveContainer" containerID="d128e0dff02b3825dd79b2d490e4268b7eaa7e6a607bbc27d6da7c90b6fbf705" Dec 11 10:17:00 crc kubenswrapper[4881]: E1211 10:17:00.518723 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 10:17:13 crc kubenswrapper[4881]: I1211 10:17:13.016787 4881 scope.go:117] "RemoveContainer" containerID="d128e0dff02b3825dd79b2d490e4268b7eaa7e6a607bbc27d6da7c90b6fbf705" Dec 11 10:17:13 crc kubenswrapper[4881]: E1211 10:17:13.018149 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 10:17:21 crc kubenswrapper[4881]: I1211 10:17:21.549467 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-zhczh"] Dec 11 10:17:21 crc kubenswrapper[4881]: E1211 10:17:21.550684 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="35b8c61b-1d92-4390-86f7-b577536dde61" containerName="extract-content" Dec 11 10:17:21 crc kubenswrapper[4881]: I1211 10:17:21.550713 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="35b8c61b-1d92-4390-86f7-b577536dde61" containerName="extract-content" Dec 11 10:17:21 crc kubenswrapper[4881]: E1211 10:17:21.550735 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="35b8c61b-1d92-4390-86f7-b577536dde61" containerName="registry-server" Dec 11 10:17:21 crc kubenswrapper[4881]: I1211 10:17:21.550742 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="35b8c61b-1d92-4390-86f7-b577536dde61" containerName="registry-server" Dec 11 10:17:21 crc kubenswrapper[4881]: E1211 10:17:21.550778 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="35b8c61b-1d92-4390-86f7-b577536dde61" containerName="extract-utilities" Dec 11 10:17:21 crc kubenswrapper[4881]: I1211 10:17:21.550787 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="35b8c61b-1d92-4390-86f7-b577536dde61" containerName="extract-utilities" Dec 11 10:17:21 crc kubenswrapper[4881]: I1211 10:17:21.551073 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="35b8c61b-1d92-4390-86f7-b577536dde61" containerName="registry-server" Dec 11 10:17:21 crc kubenswrapper[4881]: I1211 10:17:21.552904 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-zhczh" Dec 11 10:17:21 crc kubenswrapper[4881]: I1211 10:17:21.570659 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-zhczh"] Dec 11 10:17:21 crc kubenswrapper[4881]: I1211 10:17:21.624301 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/20d03676-bb0d-476e-9ca0-ca11c3f4fe24-utilities\") pod \"certified-operators-zhczh\" (UID: \"20d03676-bb0d-476e-9ca0-ca11c3f4fe24\") " pod="openshift-marketplace/certified-operators-zhczh" Dec 11 10:17:21 crc kubenswrapper[4881]: I1211 10:17:21.624641 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kbsms\" (UniqueName: \"kubernetes.io/projected/20d03676-bb0d-476e-9ca0-ca11c3f4fe24-kube-api-access-kbsms\") pod \"certified-operators-zhczh\" (UID: \"20d03676-bb0d-476e-9ca0-ca11c3f4fe24\") " pod="openshift-marketplace/certified-operators-zhczh" Dec 11 10:17:21 crc kubenswrapper[4881]: I1211 10:17:21.624765 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/20d03676-bb0d-476e-9ca0-ca11c3f4fe24-catalog-content\") pod \"certified-operators-zhczh\" (UID: \"20d03676-bb0d-476e-9ca0-ca11c3f4fe24\") " pod="openshift-marketplace/certified-operators-zhczh" Dec 11 10:17:21 crc kubenswrapper[4881]: I1211 10:17:21.726899 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kbsms\" (UniqueName: \"kubernetes.io/projected/20d03676-bb0d-476e-9ca0-ca11c3f4fe24-kube-api-access-kbsms\") pod \"certified-operators-zhczh\" (UID: \"20d03676-bb0d-476e-9ca0-ca11c3f4fe24\") " pod="openshift-marketplace/certified-operators-zhczh" Dec 11 10:17:21 crc kubenswrapper[4881]: I1211 10:17:21.726998 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/20d03676-bb0d-476e-9ca0-ca11c3f4fe24-catalog-content\") pod \"certified-operators-zhczh\" (UID: \"20d03676-bb0d-476e-9ca0-ca11c3f4fe24\") " pod="openshift-marketplace/certified-operators-zhczh" Dec 11 10:17:21 crc kubenswrapper[4881]: I1211 10:17:21.727066 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/20d03676-bb0d-476e-9ca0-ca11c3f4fe24-utilities\") pod \"certified-operators-zhczh\" (UID: \"20d03676-bb0d-476e-9ca0-ca11c3f4fe24\") " pod="openshift-marketplace/certified-operators-zhczh" Dec 11 10:17:21 crc kubenswrapper[4881]: I1211 10:17:21.727599 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/20d03676-bb0d-476e-9ca0-ca11c3f4fe24-utilities\") pod \"certified-operators-zhczh\" (UID: \"20d03676-bb0d-476e-9ca0-ca11c3f4fe24\") " pod="openshift-marketplace/certified-operators-zhczh" Dec 11 10:17:21 crc kubenswrapper[4881]: I1211 10:17:21.727692 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/20d03676-bb0d-476e-9ca0-ca11c3f4fe24-catalog-content\") pod \"certified-operators-zhczh\" (UID: \"20d03676-bb0d-476e-9ca0-ca11c3f4fe24\") " pod="openshift-marketplace/certified-operators-zhczh" Dec 11 10:17:21 crc kubenswrapper[4881]: I1211 10:17:21.753533 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kbsms\" (UniqueName: \"kubernetes.io/projected/20d03676-bb0d-476e-9ca0-ca11c3f4fe24-kube-api-access-kbsms\") pod \"certified-operators-zhczh\" (UID: \"20d03676-bb0d-476e-9ca0-ca11c3f4fe24\") " pod="openshift-marketplace/certified-operators-zhczh" Dec 11 10:17:21 crc kubenswrapper[4881]: I1211 10:17:21.884047 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-zhczh" Dec 11 10:17:22 crc kubenswrapper[4881]: I1211 10:17:22.425722 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-zhczh"] Dec 11 10:17:22 crc kubenswrapper[4881]: I1211 10:17:22.796017 4881 generic.go:334] "Generic (PLEG): container finished" podID="20d03676-bb0d-476e-9ca0-ca11c3f4fe24" containerID="624e8db923009f322d3ed19e905b4b484a03f0368e4efa798950d95ca6d25a95" exitCode=0 Dec 11 10:17:22 crc kubenswrapper[4881]: I1211 10:17:22.796191 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zhczh" event={"ID":"20d03676-bb0d-476e-9ca0-ca11c3f4fe24","Type":"ContainerDied","Data":"624e8db923009f322d3ed19e905b4b484a03f0368e4efa798950d95ca6d25a95"} Dec 11 10:17:22 crc kubenswrapper[4881]: I1211 10:17:22.796352 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zhczh" event={"ID":"20d03676-bb0d-476e-9ca0-ca11c3f4fe24","Type":"ContainerStarted","Data":"657ad49a9ea4fe77861e7404a217b2725f982922e73083e95297d18652fa78c0"} Dec 11 10:17:23 crc kubenswrapper[4881]: I1211 10:17:23.807998 4881 generic.go:334] "Generic (PLEG): container finished" podID="44db5ba3-cedf-4bbb-a754-6c51ca4b42a7" containerID="c594583f26f1a7905e8575575884170a039c243b84a9ae6093eeeffb24859d2c" exitCode=0 Dec 11 10:17:23 crc kubenswrapper[4881]: I1211 10:17:23.808289 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-rwlhh/must-gather-n5brg" event={"ID":"44db5ba3-cedf-4bbb-a754-6c51ca4b42a7","Type":"ContainerDied","Data":"c594583f26f1a7905e8575575884170a039c243b84a9ae6093eeeffb24859d2c"} Dec 11 10:17:23 crc kubenswrapper[4881]: I1211 10:17:23.809210 4881 scope.go:117] "RemoveContainer" containerID="c594583f26f1a7905e8575575884170a039c243b84a9ae6093eeeffb24859d2c" Dec 11 10:17:24 crc kubenswrapper[4881]: I1211 10:17:24.014855 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-rwlhh_must-gather-n5brg_44db5ba3-cedf-4bbb-a754-6c51ca4b42a7/gather/0.log" Dec 11 10:17:25 crc kubenswrapper[4881]: I1211 10:17:25.006101 4881 scope.go:117] "RemoveContainer" containerID="d128e0dff02b3825dd79b2d490e4268b7eaa7e6a607bbc27d6da7c90b6fbf705" Dec 11 10:17:25 crc kubenswrapper[4881]: E1211 10:17:25.006842 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 10:17:25 crc kubenswrapper[4881]: I1211 10:17:25.958577 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-76rn7"] Dec 11 10:17:25 crc kubenswrapper[4881]: I1211 10:17:25.961648 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-76rn7" Dec 11 10:17:25 crc kubenswrapper[4881]: I1211 10:17:25.975177 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-76rn7"] Dec 11 10:17:26 crc kubenswrapper[4881]: I1211 10:17:26.043103 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/38f011f9-bf6e-40d3-bfd5-e2d7ddba4df2-utilities\") pod \"redhat-operators-76rn7\" (UID: \"38f011f9-bf6e-40d3-bfd5-e2d7ddba4df2\") " pod="openshift-marketplace/redhat-operators-76rn7" Dec 11 10:17:26 crc kubenswrapper[4881]: I1211 10:17:26.043230 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bl9tw\" (UniqueName: \"kubernetes.io/projected/38f011f9-bf6e-40d3-bfd5-e2d7ddba4df2-kube-api-access-bl9tw\") pod \"redhat-operators-76rn7\" (UID: \"38f011f9-bf6e-40d3-bfd5-e2d7ddba4df2\") " pod="openshift-marketplace/redhat-operators-76rn7" Dec 11 10:17:26 crc kubenswrapper[4881]: I1211 10:17:26.043397 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/38f011f9-bf6e-40d3-bfd5-e2d7ddba4df2-catalog-content\") pod \"redhat-operators-76rn7\" (UID: \"38f011f9-bf6e-40d3-bfd5-e2d7ddba4df2\") " pod="openshift-marketplace/redhat-operators-76rn7" Dec 11 10:17:26 crc kubenswrapper[4881]: I1211 10:17:26.149182 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/38f011f9-bf6e-40d3-bfd5-e2d7ddba4df2-utilities\") pod \"redhat-operators-76rn7\" (UID: \"38f011f9-bf6e-40d3-bfd5-e2d7ddba4df2\") " pod="openshift-marketplace/redhat-operators-76rn7" Dec 11 10:17:26 crc kubenswrapper[4881]: I1211 10:17:26.162429 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bl9tw\" (UniqueName: \"kubernetes.io/projected/38f011f9-bf6e-40d3-bfd5-e2d7ddba4df2-kube-api-access-bl9tw\") pod \"redhat-operators-76rn7\" (UID: \"38f011f9-bf6e-40d3-bfd5-e2d7ddba4df2\") " pod="openshift-marketplace/redhat-operators-76rn7" Dec 11 10:17:26 crc kubenswrapper[4881]: I1211 10:17:26.151431 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/38f011f9-bf6e-40d3-bfd5-e2d7ddba4df2-utilities\") pod \"redhat-operators-76rn7\" (UID: \"38f011f9-bf6e-40d3-bfd5-e2d7ddba4df2\") " pod="openshift-marketplace/redhat-operators-76rn7" Dec 11 10:17:26 crc kubenswrapper[4881]: I1211 10:17:26.163115 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/38f011f9-bf6e-40d3-bfd5-e2d7ddba4df2-catalog-content\") pod \"redhat-operators-76rn7\" (UID: \"38f011f9-bf6e-40d3-bfd5-e2d7ddba4df2\") " pod="openshift-marketplace/redhat-operators-76rn7" Dec 11 10:17:26 crc kubenswrapper[4881]: I1211 10:17:26.164256 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/38f011f9-bf6e-40d3-bfd5-e2d7ddba4df2-catalog-content\") pod \"redhat-operators-76rn7\" (UID: \"38f011f9-bf6e-40d3-bfd5-e2d7ddba4df2\") " pod="openshift-marketplace/redhat-operators-76rn7" Dec 11 10:17:26 crc kubenswrapper[4881]: I1211 10:17:26.189440 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bl9tw\" (UniqueName: \"kubernetes.io/projected/38f011f9-bf6e-40d3-bfd5-e2d7ddba4df2-kube-api-access-bl9tw\") pod \"redhat-operators-76rn7\" (UID: \"38f011f9-bf6e-40d3-bfd5-e2d7ddba4df2\") " pod="openshift-marketplace/redhat-operators-76rn7" Dec 11 10:17:26 crc kubenswrapper[4881]: I1211 10:17:26.288111 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-76rn7" Dec 11 10:17:29 crc kubenswrapper[4881]: W1211 10:17:29.477848 4881 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod38f011f9_bf6e_40d3_bfd5_e2d7ddba4df2.slice/crio-d1379708c3c89de3531f3d8c86222eeee27e19f1e77e715027f1e8fcf41716df WatchSource:0}: Error finding container d1379708c3c89de3531f3d8c86222eeee27e19f1e77e715027f1e8fcf41716df: Status 404 returned error can't find the container with id d1379708c3c89de3531f3d8c86222eeee27e19f1e77e715027f1e8fcf41716df Dec 11 10:17:29 crc kubenswrapper[4881]: I1211 10:17:29.480959 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-76rn7"] Dec 11 10:17:29 crc kubenswrapper[4881]: I1211 10:17:29.880745 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-76rn7" event={"ID":"38f011f9-bf6e-40d3-bfd5-e2d7ddba4df2","Type":"ContainerStarted","Data":"d1379708c3c89de3531f3d8c86222eeee27e19f1e77e715027f1e8fcf41716df"} Dec 11 10:17:29 crc kubenswrapper[4881]: I1211 10:17:29.887223 4881 generic.go:334] "Generic (PLEG): container finished" podID="20d03676-bb0d-476e-9ca0-ca11c3f4fe24" containerID="ffce42d434b3daad2a093c5cd6056d8d72c8f256baff23ad88d9fe5fc3241ee3" exitCode=0 Dec 11 10:17:29 crc kubenswrapper[4881]: I1211 10:17:29.887294 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zhczh" event={"ID":"20d03676-bb0d-476e-9ca0-ca11c3f4fe24","Type":"ContainerDied","Data":"ffce42d434b3daad2a093c5cd6056d8d72c8f256baff23ad88d9fe5fc3241ee3"} Dec 11 10:17:30 crc kubenswrapper[4881]: I1211 10:17:30.902809 4881 generic.go:334] "Generic (PLEG): container finished" podID="38f011f9-bf6e-40d3-bfd5-e2d7ddba4df2" containerID="4f77b4e65c0edd4f0f00088bb97190fe798f9125d884cddfc56ddc0ce4586062" exitCode=0 Dec 11 10:17:30 crc kubenswrapper[4881]: I1211 10:17:30.902892 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-76rn7" event={"ID":"38f011f9-bf6e-40d3-bfd5-e2d7ddba4df2","Type":"ContainerDied","Data":"4f77b4e65c0edd4f0f00088bb97190fe798f9125d884cddfc56ddc0ce4586062"} Dec 11 10:17:31 crc kubenswrapper[4881]: I1211 10:17:31.914683 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zhczh" event={"ID":"20d03676-bb0d-476e-9ca0-ca11c3f4fe24","Type":"ContainerStarted","Data":"6f01a616d7eff5a0639b91126ea22924de66f294305c662cfe59100c92ceda22"} Dec 11 10:17:31 crc kubenswrapper[4881]: I1211 10:17:31.940903 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-zhczh" podStartSLOduration=3.003886473 podStartE2EDuration="10.940885739s" podCreationTimestamp="2025-12-11 10:17:21 +0000 UTC" firstStartedPulling="2025-12-11 10:17:22.798313577 +0000 UTC m=+7291.175682284" lastFinishedPulling="2025-12-11 10:17:30.735312853 +0000 UTC m=+7299.112681550" observedRunningTime="2025-12-11 10:17:31.932095263 +0000 UTC m=+7300.309463960" watchObservedRunningTime="2025-12-11 10:17:31.940885739 +0000 UTC m=+7300.318254436" Dec 11 10:17:32 crc kubenswrapper[4881]: I1211 10:17:32.931184 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-76rn7" event={"ID":"38f011f9-bf6e-40d3-bfd5-e2d7ddba4df2","Type":"ContainerStarted","Data":"fa0224907cc3b49455a9f648944cad7c6828e9174360188e78dd7198c1425a9f"} Dec 11 10:17:35 crc kubenswrapper[4881]: I1211 10:17:35.993309 4881 generic.go:334] "Generic (PLEG): container finished" podID="38f011f9-bf6e-40d3-bfd5-e2d7ddba4df2" containerID="fa0224907cc3b49455a9f648944cad7c6828e9174360188e78dd7198c1425a9f" exitCode=0 Dec 11 10:17:35 crc kubenswrapper[4881]: I1211 10:17:35.993465 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-76rn7" event={"ID":"38f011f9-bf6e-40d3-bfd5-e2d7ddba4df2","Type":"ContainerDied","Data":"fa0224907cc3b49455a9f648944cad7c6828e9174360188e78dd7198c1425a9f"} Dec 11 10:17:37 crc kubenswrapper[4881]: I1211 10:17:37.025450 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-76rn7" event={"ID":"38f011f9-bf6e-40d3-bfd5-e2d7ddba4df2","Type":"ContainerStarted","Data":"c1e65aac80e30ac8cc001271214715ec22c3225f0d2f7b21c8c5b789224af7a7"} Dec 11 10:17:37 crc kubenswrapper[4881]: I1211 10:17:37.062758 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-76rn7" podStartSLOduration=6.554781963 podStartE2EDuration="12.062731597s" podCreationTimestamp="2025-12-11 10:17:25 +0000 UTC" firstStartedPulling="2025-12-11 10:17:30.905549336 +0000 UTC m=+7299.282918053" lastFinishedPulling="2025-12-11 10:17:36.41349899 +0000 UTC m=+7304.790867687" observedRunningTime="2025-12-11 10:17:37.048185069 +0000 UTC m=+7305.425553776" watchObservedRunningTime="2025-12-11 10:17:37.062731597 +0000 UTC m=+7305.440100304" Dec 11 10:17:39 crc kubenswrapper[4881]: I1211 10:17:39.006036 4881 scope.go:117] "RemoveContainer" containerID="d128e0dff02b3825dd79b2d490e4268b7eaa7e6a607bbc27d6da7c90b6fbf705" Dec 11 10:17:39 crc kubenswrapper[4881]: E1211 10:17:39.007107 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 10:17:40 crc kubenswrapper[4881]: I1211 10:17:40.962095 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-rwlhh/must-gather-n5brg"] Dec 11 10:17:40 crc kubenswrapper[4881]: I1211 10:17:40.962785 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-must-gather-rwlhh/must-gather-n5brg" podUID="44db5ba3-cedf-4bbb-a754-6c51ca4b42a7" containerName="copy" containerID="cri-o://aeaf4cc765f3edd2da56fbbb122adad53d5f4346c6bc8090f62732e8deb96834" gracePeriod=2 Dec 11 10:17:40 crc kubenswrapper[4881]: I1211 10:17:40.978940 4881 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-rwlhh/must-gather-n5brg"] Dec 11 10:17:41 crc kubenswrapper[4881]: I1211 10:17:41.444029 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-rwlhh_must-gather-n5brg_44db5ba3-cedf-4bbb-a754-6c51ca4b42a7/copy/0.log" Dec 11 10:17:41 crc kubenswrapper[4881]: I1211 10:17:41.445248 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-rwlhh/must-gather-n5brg" Dec 11 10:17:41 crc kubenswrapper[4881]: I1211 10:17:41.531690 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/44db5ba3-cedf-4bbb-a754-6c51ca4b42a7-must-gather-output\") pod \"44db5ba3-cedf-4bbb-a754-6c51ca4b42a7\" (UID: \"44db5ba3-cedf-4bbb-a754-6c51ca4b42a7\") " Dec 11 10:17:41 crc kubenswrapper[4881]: I1211 10:17:41.531805 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qmj9k\" (UniqueName: \"kubernetes.io/projected/44db5ba3-cedf-4bbb-a754-6c51ca4b42a7-kube-api-access-qmj9k\") pod \"44db5ba3-cedf-4bbb-a754-6c51ca4b42a7\" (UID: \"44db5ba3-cedf-4bbb-a754-6c51ca4b42a7\") " Dec 11 10:17:41 crc kubenswrapper[4881]: I1211 10:17:41.540071 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/44db5ba3-cedf-4bbb-a754-6c51ca4b42a7-kube-api-access-qmj9k" (OuterVolumeSpecName: "kube-api-access-qmj9k") pod "44db5ba3-cedf-4bbb-a754-6c51ca4b42a7" (UID: "44db5ba3-cedf-4bbb-a754-6c51ca4b42a7"). InnerVolumeSpecName "kube-api-access-qmj9k". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:17:41 crc kubenswrapper[4881]: I1211 10:17:41.635411 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qmj9k\" (UniqueName: \"kubernetes.io/projected/44db5ba3-cedf-4bbb-a754-6c51ca4b42a7-kube-api-access-qmj9k\") on node \"crc\" DevicePath \"\"" Dec 11 10:17:41 crc kubenswrapper[4881]: I1211 10:17:41.679772 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/44db5ba3-cedf-4bbb-a754-6c51ca4b42a7-must-gather-output" (OuterVolumeSpecName: "must-gather-output") pod "44db5ba3-cedf-4bbb-a754-6c51ca4b42a7" (UID: "44db5ba3-cedf-4bbb-a754-6c51ca4b42a7"). InnerVolumeSpecName "must-gather-output". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 10:17:41 crc kubenswrapper[4881]: I1211 10:17:41.737841 4881 reconciler_common.go:293] "Volume detached for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/44db5ba3-cedf-4bbb-a754-6c51ca4b42a7-must-gather-output\") on node \"crc\" DevicePath \"\"" Dec 11 10:17:41 crc kubenswrapper[4881]: I1211 10:17:41.881215 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-zhczh" Dec 11 10:17:41 crc kubenswrapper[4881]: I1211 10:17:41.884949 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-zhczh" Dec 11 10:17:41 crc kubenswrapper[4881]: I1211 10:17:41.937760 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-zhczh" Dec 11 10:17:42 crc kubenswrapper[4881]: I1211 10:17:42.062130 4881 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-rwlhh_must-gather-n5brg_44db5ba3-cedf-4bbb-a754-6c51ca4b42a7/copy/0.log" Dec 11 10:17:42 crc kubenswrapper[4881]: I1211 10:17:42.062606 4881 generic.go:334] "Generic (PLEG): container finished" podID="44db5ba3-cedf-4bbb-a754-6c51ca4b42a7" containerID="aeaf4cc765f3edd2da56fbbb122adad53d5f4346c6bc8090f62732e8deb96834" exitCode=143 Dec 11 10:17:42 crc kubenswrapper[4881]: I1211 10:17:42.062667 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-rwlhh/must-gather-n5brg" Dec 11 10:17:42 crc kubenswrapper[4881]: I1211 10:17:42.062724 4881 scope.go:117] "RemoveContainer" containerID="aeaf4cc765f3edd2da56fbbb122adad53d5f4346c6bc8090f62732e8deb96834" Dec 11 10:17:42 crc kubenswrapper[4881]: I1211 10:17:42.086641 4881 scope.go:117] "RemoveContainer" containerID="c594583f26f1a7905e8575575884170a039c243b84a9ae6093eeeffb24859d2c" Dec 11 10:17:42 crc kubenswrapper[4881]: I1211 10:17:42.133250 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-zhczh" Dec 11 10:17:42 crc kubenswrapper[4881]: I1211 10:17:42.158600 4881 scope.go:117] "RemoveContainer" containerID="aeaf4cc765f3edd2da56fbbb122adad53d5f4346c6bc8090f62732e8deb96834" Dec 11 10:17:42 crc kubenswrapper[4881]: E1211 10:17:42.159115 4881 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"aeaf4cc765f3edd2da56fbbb122adad53d5f4346c6bc8090f62732e8deb96834\": container with ID starting with aeaf4cc765f3edd2da56fbbb122adad53d5f4346c6bc8090f62732e8deb96834 not found: ID does not exist" containerID="aeaf4cc765f3edd2da56fbbb122adad53d5f4346c6bc8090f62732e8deb96834" Dec 11 10:17:42 crc kubenswrapper[4881]: I1211 10:17:42.159149 4881 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"aeaf4cc765f3edd2da56fbbb122adad53d5f4346c6bc8090f62732e8deb96834"} err="failed to get container status \"aeaf4cc765f3edd2da56fbbb122adad53d5f4346c6bc8090f62732e8deb96834\": rpc error: code = NotFound desc = could not find container \"aeaf4cc765f3edd2da56fbbb122adad53d5f4346c6bc8090f62732e8deb96834\": container with ID starting with aeaf4cc765f3edd2da56fbbb122adad53d5f4346c6bc8090f62732e8deb96834 not found: ID does not exist" Dec 11 10:17:42 crc kubenswrapper[4881]: I1211 10:17:42.159170 4881 scope.go:117] "RemoveContainer" containerID="c594583f26f1a7905e8575575884170a039c243b84a9ae6093eeeffb24859d2c" Dec 11 10:17:42 crc kubenswrapper[4881]: E1211 10:17:42.159584 4881 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c594583f26f1a7905e8575575884170a039c243b84a9ae6093eeeffb24859d2c\": container with ID starting with c594583f26f1a7905e8575575884170a039c243b84a9ae6093eeeffb24859d2c not found: ID does not exist" containerID="c594583f26f1a7905e8575575884170a039c243b84a9ae6093eeeffb24859d2c" Dec 11 10:17:42 crc kubenswrapper[4881]: I1211 10:17:42.159642 4881 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c594583f26f1a7905e8575575884170a039c243b84a9ae6093eeeffb24859d2c"} err="failed to get container status \"c594583f26f1a7905e8575575884170a039c243b84a9ae6093eeeffb24859d2c\": rpc error: code = NotFound desc = could not find container \"c594583f26f1a7905e8575575884170a039c243b84a9ae6093eeeffb24859d2c\": container with ID starting with c594583f26f1a7905e8575575884170a039c243b84a9ae6093eeeffb24859d2c not found: ID does not exist" Dec 11 10:17:42 crc kubenswrapper[4881]: I1211 10:17:42.257159 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-zhczh"] Dec 11 10:17:42 crc kubenswrapper[4881]: I1211 10:17:42.312159 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-qnffx"] Dec 11 10:17:42 crc kubenswrapper[4881]: I1211 10:17:42.312475 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-qnffx" podUID="18611892-d199-4d6c-a3b6-391c8c78511c" containerName="registry-server" containerID="cri-o://27dbf9ee9d832288c0d25cfa1054884dbfa1cae929ee7bcdc2323aea0ea009e4" gracePeriod=2 Dec 11 10:17:42 crc kubenswrapper[4881]: I1211 10:17:42.873000 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-qnffx" Dec 11 10:17:42 crc kubenswrapper[4881]: I1211 10:17:42.881697 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lwv6k\" (UniqueName: \"kubernetes.io/projected/18611892-d199-4d6c-a3b6-391c8c78511c-kube-api-access-lwv6k\") pod \"18611892-d199-4d6c-a3b6-391c8c78511c\" (UID: \"18611892-d199-4d6c-a3b6-391c8c78511c\") " Dec 11 10:17:42 crc kubenswrapper[4881]: I1211 10:17:42.881840 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/18611892-d199-4d6c-a3b6-391c8c78511c-utilities\") pod \"18611892-d199-4d6c-a3b6-391c8c78511c\" (UID: \"18611892-d199-4d6c-a3b6-391c8c78511c\") " Dec 11 10:17:42 crc kubenswrapper[4881]: I1211 10:17:42.882029 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/18611892-d199-4d6c-a3b6-391c8c78511c-catalog-content\") pod \"18611892-d199-4d6c-a3b6-391c8c78511c\" (UID: \"18611892-d199-4d6c-a3b6-391c8c78511c\") " Dec 11 10:17:42 crc kubenswrapper[4881]: I1211 10:17:42.882697 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/18611892-d199-4d6c-a3b6-391c8c78511c-utilities" (OuterVolumeSpecName: "utilities") pod "18611892-d199-4d6c-a3b6-391c8c78511c" (UID: "18611892-d199-4d6c-a3b6-391c8c78511c"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 10:17:42 crc kubenswrapper[4881]: I1211 10:17:42.883804 4881 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/18611892-d199-4d6c-a3b6-391c8c78511c-utilities\") on node \"crc\" DevicePath \"\"" Dec 11 10:17:42 crc kubenswrapper[4881]: I1211 10:17:42.892016 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/18611892-d199-4d6c-a3b6-391c8c78511c-kube-api-access-lwv6k" (OuterVolumeSpecName: "kube-api-access-lwv6k") pod "18611892-d199-4d6c-a3b6-391c8c78511c" (UID: "18611892-d199-4d6c-a3b6-391c8c78511c"). InnerVolumeSpecName "kube-api-access-lwv6k". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:17:42 crc kubenswrapper[4881]: I1211 10:17:42.967970 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/18611892-d199-4d6c-a3b6-391c8c78511c-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "18611892-d199-4d6c-a3b6-391c8c78511c" (UID: "18611892-d199-4d6c-a3b6-391c8c78511c"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 10:17:42 crc kubenswrapper[4881]: I1211 10:17:42.986597 4881 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/18611892-d199-4d6c-a3b6-391c8c78511c-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 11 10:17:42 crc kubenswrapper[4881]: I1211 10:17:42.986901 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lwv6k\" (UniqueName: \"kubernetes.io/projected/18611892-d199-4d6c-a3b6-391c8c78511c-kube-api-access-lwv6k\") on node \"crc\" DevicePath \"\"" Dec 11 10:17:43 crc kubenswrapper[4881]: I1211 10:17:43.043294 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="44db5ba3-cedf-4bbb-a754-6c51ca4b42a7" path="/var/lib/kubelet/pods/44db5ba3-cedf-4bbb-a754-6c51ca4b42a7/volumes" Dec 11 10:17:43 crc kubenswrapper[4881]: I1211 10:17:43.110038 4881 generic.go:334] "Generic (PLEG): container finished" podID="18611892-d199-4d6c-a3b6-391c8c78511c" containerID="27dbf9ee9d832288c0d25cfa1054884dbfa1cae929ee7bcdc2323aea0ea009e4" exitCode=0 Dec 11 10:17:43 crc kubenswrapper[4881]: I1211 10:17:43.111275 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-qnffx" Dec 11 10:17:43 crc kubenswrapper[4881]: I1211 10:17:43.111453 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qnffx" event={"ID":"18611892-d199-4d6c-a3b6-391c8c78511c","Type":"ContainerDied","Data":"27dbf9ee9d832288c0d25cfa1054884dbfa1cae929ee7bcdc2323aea0ea009e4"} Dec 11 10:17:43 crc kubenswrapper[4881]: I1211 10:17:43.112037 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qnffx" event={"ID":"18611892-d199-4d6c-a3b6-391c8c78511c","Type":"ContainerDied","Data":"4e32154deaa9969ad013f87943862b0e043cec2d2253c929b574a99fc05417e2"} Dec 11 10:17:43 crc kubenswrapper[4881]: I1211 10:17:43.112057 4881 scope.go:117] "RemoveContainer" containerID="27dbf9ee9d832288c0d25cfa1054884dbfa1cae929ee7bcdc2323aea0ea009e4" Dec 11 10:17:43 crc kubenswrapper[4881]: I1211 10:17:43.148811 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-qnffx"] Dec 11 10:17:43 crc kubenswrapper[4881]: I1211 10:17:43.169067 4881 scope.go:117] "RemoveContainer" containerID="856ac1a98e884829e1e4342f1291bdf1644fa7f56aea003a15f4b3759e226818" Dec 11 10:17:43 crc kubenswrapper[4881]: I1211 10:17:43.174505 4881 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-qnffx"] Dec 11 10:17:43 crc kubenswrapper[4881]: I1211 10:17:43.207543 4881 scope.go:117] "RemoveContainer" containerID="3f3bad3e4c68da49042cc569b5837d7a86ec6647a796869ab3b5a4796a6b42ab" Dec 11 10:17:43 crc kubenswrapper[4881]: I1211 10:17:43.265592 4881 scope.go:117] "RemoveContainer" containerID="27dbf9ee9d832288c0d25cfa1054884dbfa1cae929ee7bcdc2323aea0ea009e4" Dec 11 10:17:43 crc kubenswrapper[4881]: E1211 10:17:43.266211 4881 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"27dbf9ee9d832288c0d25cfa1054884dbfa1cae929ee7bcdc2323aea0ea009e4\": container with ID starting with 27dbf9ee9d832288c0d25cfa1054884dbfa1cae929ee7bcdc2323aea0ea009e4 not found: ID does not exist" containerID="27dbf9ee9d832288c0d25cfa1054884dbfa1cae929ee7bcdc2323aea0ea009e4" Dec 11 10:17:43 crc kubenswrapper[4881]: I1211 10:17:43.266258 4881 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"27dbf9ee9d832288c0d25cfa1054884dbfa1cae929ee7bcdc2323aea0ea009e4"} err="failed to get container status \"27dbf9ee9d832288c0d25cfa1054884dbfa1cae929ee7bcdc2323aea0ea009e4\": rpc error: code = NotFound desc = could not find container \"27dbf9ee9d832288c0d25cfa1054884dbfa1cae929ee7bcdc2323aea0ea009e4\": container with ID starting with 27dbf9ee9d832288c0d25cfa1054884dbfa1cae929ee7bcdc2323aea0ea009e4 not found: ID does not exist" Dec 11 10:17:43 crc kubenswrapper[4881]: I1211 10:17:43.266284 4881 scope.go:117] "RemoveContainer" containerID="856ac1a98e884829e1e4342f1291bdf1644fa7f56aea003a15f4b3759e226818" Dec 11 10:17:43 crc kubenswrapper[4881]: E1211 10:17:43.266618 4881 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"856ac1a98e884829e1e4342f1291bdf1644fa7f56aea003a15f4b3759e226818\": container with ID starting with 856ac1a98e884829e1e4342f1291bdf1644fa7f56aea003a15f4b3759e226818 not found: ID does not exist" containerID="856ac1a98e884829e1e4342f1291bdf1644fa7f56aea003a15f4b3759e226818" Dec 11 10:17:43 crc kubenswrapper[4881]: I1211 10:17:43.266642 4881 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"856ac1a98e884829e1e4342f1291bdf1644fa7f56aea003a15f4b3759e226818"} err="failed to get container status \"856ac1a98e884829e1e4342f1291bdf1644fa7f56aea003a15f4b3759e226818\": rpc error: code = NotFound desc = could not find container \"856ac1a98e884829e1e4342f1291bdf1644fa7f56aea003a15f4b3759e226818\": container with ID starting with 856ac1a98e884829e1e4342f1291bdf1644fa7f56aea003a15f4b3759e226818 not found: ID does not exist" Dec 11 10:17:43 crc kubenswrapper[4881]: I1211 10:17:43.266657 4881 scope.go:117] "RemoveContainer" containerID="3f3bad3e4c68da49042cc569b5837d7a86ec6647a796869ab3b5a4796a6b42ab" Dec 11 10:17:43 crc kubenswrapper[4881]: E1211 10:17:43.267480 4881 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3f3bad3e4c68da49042cc569b5837d7a86ec6647a796869ab3b5a4796a6b42ab\": container with ID starting with 3f3bad3e4c68da49042cc569b5837d7a86ec6647a796869ab3b5a4796a6b42ab not found: ID does not exist" containerID="3f3bad3e4c68da49042cc569b5837d7a86ec6647a796869ab3b5a4796a6b42ab" Dec 11 10:17:43 crc kubenswrapper[4881]: I1211 10:17:43.267499 4881 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3f3bad3e4c68da49042cc569b5837d7a86ec6647a796869ab3b5a4796a6b42ab"} err="failed to get container status \"3f3bad3e4c68da49042cc569b5837d7a86ec6647a796869ab3b5a4796a6b42ab\": rpc error: code = NotFound desc = could not find container \"3f3bad3e4c68da49042cc569b5837d7a86ec6647a796869ab3b5a4796a6b42ab\": container with ID starting with 3f3bad3e4c68da49042cc569b5837d7a86ec6647a796869ab3b5a4796a6b42ab not found: ID does not exist" Dec 11 10:17:45 crc kubenswrapper[4881]: I1211 10:17:45.019844 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="18611892-d199-4d6c-a3b6-391c8c78511c" path="/var/lib/kubelet/pods/18611892-d199-4d6c-a3b6-391c8c78511c/volumes" Dec 11 10:17:46 crc kubenswrapper[4881]: I1211 10:17:46.288837 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-76rn7" Dec 11 10:17:46 crc kubenswrapper[4881]: I1211 10:17:46.289205 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-76rn7" Dec 11 10:17:47 crc kubenswrapper[4881]: I1211 10:17:47.351176 4881 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-76rn7" podUID="38f011f9-bf6e-40d3-bfd5-e2d7ddba4df2" containerName="registry-server" probeResult="failure" output=< Dec 11 10:17:47 crc kubenswrapper[4881]: timeout: failed to connect service ":50051" within 1s Dec 11 10:17:47 crc kubenswrapper[4881]: > Dec 11 10:17:53 crc kubenswrapper[4881]: I1211 10:17:53.015302 4881 scope.go:117] "RemoveContainer" containerID="d128e0dff02b3825dd79b2d490e4268b7eaa7e6a607bbc27d6da7c90b6fbf705" Dec 11 10:17:53 crc kubenswrapper[4881]: E1211 10:17:53.017157 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 10:17:56 crc kubenswrapper[4881]: I1211 10:17:56.387037 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-76rn7" Dec 11 10:17:56 crc kubenswrapper[4881]: I1211 10:17:56.444618 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-76rn7" Dec 11 10:17:57 crc kubenswrapper[4881]: I1211 10:17:57.147636 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-76rn7"] Dec 11 10:17:58 crc kubenswrapper[4881]: I1211 10:17:58.347542 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-76rn7" podUID="38f011f9-bf6e-40d3-bfd5-e2d7ddba4df2" containerName="registry-server" containerID="cri-o://c1e65aac80e30ac8cc001271214715ec22c3225f0d2f7b21c8c5b789224af7a7" gracePeriod=2 Dec 11 10:17:58 crc kubenswrapper[4881]: E1211 10:17:58.439019 4881 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod38f011f9_bf6e_40d3_bfd5_e2d7ddba4df2.slice/crio-c1e65aac80e30ac8cc001271214715ec22c3225f0d2f7b21c8c5b789224af7a7.scope\": RecentStats: unable to find data in memory cache]" Dec 11 10:17:58 crc kubenswrapper[4881]: I1211 10:17:58.881932 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-76rn7" Dec 11 10:17:58 crc kubenswrapper[4881]: I1211 10:17:58.892475 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/38f011f9-bf6e-40d3-bfd5-e2d7ddba4df2-catalog-content\") pod \"38f011f9-bf6e-40d3-bfd5-e2d7ddba4df2\" (UID: \"38f011f9-bf6e-40d3-bfd5-e2d7ddba4df2\") " Dec 11 10:17:58 crc kubenswrapper[4881]: I1211 10:17:58.892547 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bl9tw\" (UniqueName: \"kubernetes.io/projected/38f011f9-bf6e-40d3-bfd5-e2d7ddba4df2-kube-api-access-bl9tw\") pod \"38f011f9-bf6e-40d3-bfd5-e2d7ddba4df2\" (UID: \"38f011f9-bf6e-40d3-bfd5-e2d7ddba4df2\") " Dec 11 10:17:58 crc kubenswrapper[4881]: I1211 10:17:58.892590 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/38f011f9-bf6e-40d3-bfd5-e2d7ddba4df2-utilities\") pod \"38f011f9-bf6e-40d3-bfd5-e2d7ddba4df2\" (UID: \"38f011f9-bf6e-40d3-bfd5-e2d7ddba4df2\") " Dec 11 10:17:58 crc kubenswrapper[4881]: I1211 10:17:58.894289 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/38f011f9-bf6e-40d3-bfd5-e2d7ddba4df2-utilities" (OuterVolumeSpecName: "utilities") pod "38f011f9-bf6e-40d3-bfd5-e2d7ddba4df2" (UID: "38f011f9-bf6e-40d3-bfd5-e2d7ddba4df2"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 10:17:58 crc kubenswrapper[4881]: I1211 10:17:58.899115 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/38f011f9-bf6e-40d3-bfd5-e2d7ddba4df2-kube-api-access-bl9tw" (OuterVolumeSpecName: "kube-api-access-bl9tw") pod "38f011f9-bf6e-40d3-bfd5-e2d7ddba4df2" (UID: "38f011f9-bf6e-40d3-bfd5-e2d7ddba4df2"). InnerVolumeSpecName "kube-api-access-bl9tw". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:17:58 crc kubenswrapper[4881]: I1211 10:17:58.996094 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bl9tw\" (UniqueName: \"kubernetes.io/projected/38f011f9-bf6e-40d3-bfd5-e2d7ddba4df2-kube-api-access-bl9tw\") on node \"crc\" DevicePath \"\"" Dec 11 10:17:58 crc kubenswrapper[4881]: I1211 10:17:58.996137 4881 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/38f011f9-bf6e-40d3-bfd5-e2d7ddba4df2-utilities\") on node \"crc\" DevicePath \"\"" Dec 11 10:17:59 crc kubenswrapper[4881]: I1211 10:17:59.027510 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/38f011f9-bf6e-40d3-bfd5-e2d7ddba4df2-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "38f011f9-bf6e-40d3-bfd5-e2d7ddba4df2" (UID: "38f011f9-bf6e-40d3-bfd5-e2d7ddba4df2"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 10:17:59 crc kubenswrapper[4881]: I1211 10:17:59.098728 4881 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/38f011f9-bf6e-40d3-bfd5-e2d7ddba4df2-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 11 10:17:59 crc kubenswrapper[4881]: I1211 10:17:59.389177 4881 generic.go:334] "Generic (PLEG): container finished" podID="38f011f9-bf6e-40d3-bfd5-e2d7ddba4df2" containerID="c1e65aac80e30ac8cc001271214715ec22c3225f0d2f7b21c8c5b789224af7a7" exitCode=0 Dec 11 10:17:59 crc kubenswrapper[4881]: I1211 10:17:59.389247 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-76rn7" Dec 11 10:17:59 crc kubenswrapper[4881]: I1211 10:17:59.389267 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-76rn7" event={"ID":"38f011f9-bf6e-40d3-bfd5-e2d7ddba4df2","Type":"ContainerDied","Data":"c1e65aac80e30ac8cc001271214715ec22c3225f0d2f7b21c8c5b789224af7a7"} Dec 11 10:17:59 crc kubenswrapper[4881]: I1211 10:17:59.389717 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-76rn7" event={"ID":"38f011f9-bf6e-40d3-bfd5-e2d7ddba4df2","Type":"ContainerDied","Data":"d1379708c3c89de3531f3d8c86222eeee27e19f1e77e715027f1e8fcf41716df"} Dec 11 10:17:59 crc kubenswrapper[4881]: I1211 10:17:59.389756 4881 scope.go:117] "RemoveContainer" containerID="c1e65aac80e30ac8cc001271214715ec22c3225f0d2f7b21c8c5b789224af7a7" Dec 11 10:17:59 crc kubenswrapper[4881]: I1211 10:17:59.433694 4881 scope.go:117] "RemoveContainer" containerID="fa0224907cc3b49455a9f648944cad7c6828e9174360188e78dd7198c1425a9f" Dec 11 10:17:59 crc kubenswrapper[4881]: I1211 10:17:59.439385 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-76rn7"] Dec 11 10:17:59 crc kubenswrapper[4881]: I1211 10:17:59.450722 4881 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-76rn7"] Dec 11 10:17:59 crc kubenswrapper[4881]: I1211 10:17:59.454689 4881 scope.go:117] "RemoveContainer" containerID="4f77b4e65c0edd4f0f00088bb97190fe798f9125d884cddfc56ddc0ce4586062" Dec 11 10:17:59 crc kubenswrapper[4881]: I1211 10:17:59.508895 4881 scope.go:117] "RemoveContainer" containerID="c1e65aac80e30ac8cc001271214715ec22c3225f0d2f7b21c8c5b789224af7a7" Dec 11 10:17:59 crc kubenswrapper[4881]: E1211 10:17:59.509486 4881 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c1e65aac80e30ac8cc001271214715ec22c3225f0d2f7b21c8c5b789224af7a7\": container with ID starting with c1e65aac80e30ac8cc001271214715ec22c3225f0d2f7b21c8c5b789224af7a7 not found: ID does not exist" containerID="c1e65aac80e30ac8cc001271214715ec22c3225f0d2f7b21c8c5b789224af7a7" Dec 11 10:17:59 crc kubenswrapper[4881]: I1211 10:17:59.509593 4881 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c1e65aac80e30ac8cc001271214715ec22c3225f0d2f7b21c8c5b789224af7a7"} err="failed to get container status \"c1e65aac80e30ac8cc001271214715ec22c3225f0d2f7b21c8c5b789224af7a7\": rpc error: code = NotFound desc = could not find container \"c1e65aac80e30ac8cc001271214715ec22c3225f0d2f7b21c8c5b789224af7a7\": container with ID starting with c1e65aac80e30ac8cc001271214715ec22c3225f0d2f7b21c8c5b789224af7a7 not found: ID does not exist" Dec 11 10:17:59 crc kubenswrapper[4881]: I1211 10:17:59.509674 4881 scope.go:117] "RemoveContainer" containerID="fa0224907cc3b49455a9f648944cad7c6828e9174360188e78dd7198c1425a9f" Dec 11 10:17:59 crc kubenswrapper[4881]: E1211 10:17:59.510173 4881 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fa0224907cc3b49455a9f648944cad7c6828e9174360188e78dd7198c1425a9f\": container with ID starting with fa0224907cc3b49455a9f648944cad7c6828e9174360188e78dd7198c1425a9f not found: ID does not exist" containerID="fa0224907cc3b49455a9f648944cad7c6828e9174360188e78dd7198c1425a9f" Dec 11 10:17:59 crc kubenswrapper[4881]: I1211 10:17:59.510228 4881 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fa0224907cc3b49455a9f648944cad7c6828e9174360188e78dd7198c1425a9f"} err="failed to get container status \"fa0224907cc3b49455a9f648944cad7c6828e9174360188e78dd7198c1425a9f\": rpc error: code = NotFound desc = could not find container \"fa0224907cc3b49455a9f648944cad7c6828e9174360188e78dd7198c1425a9f\": container with ID starting with fa0224907cc3b49455a9f648944cad7c6828e9174360188e78dd7198c1425a9f not found: ID does not exist" Dec 11 10:17:59 crc kubenswrapper[4881]: I1211 10:17:59.510266 4881 scope.go:117] "RemoveContainer" containerID="4f77b4e65c0edd4f0f00088bb97190fe798f9125d884cddfc56ddc0ce4586062" Dec 11 10:17:59 crc kubenswrapper[4881]: E1211 10:17:59.510669 4881 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4f77b4e65c0edd4f0f00088bb97190fe798f9125d884cddfc56ddc0ce4586062\": container with ID starting with 4f77b4e65c0edd4f0f00088bb97190fe798f9125d884cddfc56ddc0ce4586062 not found: ID does not exist" containerID="4f77b4e65c0edd4f0f00088bb97190fe798f9125d884cddfc56ddc0ce4586062" Dec 11 10:17:59 crc kubenswrapper[4881]: I1211 10:17:59.510761 4881 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4f77b4e65c0edd4f0f00088bb97190fe798f9125d884cddfc56ddc0ce4586062"} err="failed to get container status \"4f77b4e65c0edd4f0f00088bb97190fe798f9125d884cddfc56ddc0ce4586062\": rpc error: code = NotFound desc = could not find container \"4f77b4e65c0edd4f0f00088bb97190fe798f9125d884cddfc56ddc0ce4586062\": container with ID starting with 4f77b4e65c0edd4f0f00088bb97190fe798f9125d884cddfc56ddc0ce4586062 not found: ID does not exist" Dec 11 10:18:01 crc kubenswrapper[4881]: I1211 10:18:01.017729 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="38f011f9-bf6e-40d3-bfd5-e2d7ddba4df2" path="/var/lib/kubelet/pods/38f011f9-bf6e-40d3-bfd5-e2d7ddba4df2/volumes" Dec 11 10:18:07 crc kubenswrapper[4881]: I1211 10:18:07.005943 4881 scope.go:117] "RemoveContainer" containerID="d128e0dff02b3825dd79b2d490e4268b7eaa7e6a607bbc27d6da7c90b6fbf705" Dec 11 10:18:07 crc kubenswrapper[4881]: E1211 10:18:07.006923 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 10:18:14 crc kubenswrapper[4881]: I1211 10:18:14.725216 4881 scope.go:117] "RemoveContainer" containerID="e08011cdbbfb2d875611109d70882818539095be8379662ae0b0ee7b1209824a" Dec 11 10:18:22 crc kubenswrapper[4881]: I1211 10:18:22.006611 4881 scope.go:117] "RemoveContainer" containerID="d128e0dff02b3825dd79b2d490e4268b7eaa7e6a607bbc27d6da7c90b6fbf705" Dec 11 10:18:22 crc kubenswrapper[4881]: E1211 10:18:22.007779 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 10:18:35 crc kubenswrapper[4881]: I1211 10:18:35.006053 4881 scope.go:117] "RemoveContainer" containerID="d128e0dff02b3825dd79b2d490e4268b7eaa7e6a607bbc27d6da7c90b6fbf705" Dec 11 10:18:35 crc kubenswrapper[4881]: E1211 10:18:35.006936 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 10:18:46 crc kubenswrapper[4881]: I1211 10:18:46.005608 4881 scope.go:117] "RemoveContainer" containerID="d128e0dff02b3825dd79b2d490e4268b7eaa7e6a607bbc27d6da7c90b6fbf705" Dec 11 10:18:46 crc kubenswrapper[4881]: E1211 10:18:46.006456 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 10:18:57 crc kubenswrapper[4881]: I1211 10:18:57.006356 4881 scope.go:117] "RemoveContainer" containerID="d128e0dff02b3825dd79b2d490e4268b7eaa7e6a607bbc27d6da7c90b6fbf705" Dec 11 10:18:57 crc kubenswrapper[4881]: E1211 10:18:57.007291 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 10:19:12 crc kubenswrapper[4881]: I1211 10:19:12.006029 4881 scope.go:117] "RemoveContainer" containerID="d128e0dff02b3825dd79b2d490e4268b7eaa7e6a607bbc27d6da7c90b6fbf705" Dec 11 10:19:12 crc kubenswrapper[4881]: E1211 10:19:12.008290 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 10:19:26 crc kubenswrapper[4881]: I1211 10:19:26.005469 4881 scope.go:117] "RemoveContainer" containerID="d128e0dff02b3825dd79b2d490e4268b7eaa7e6a607bbc27d6da7c90b6fbf705" Dec 11 10:19:26 crc kubenswrapper[4881]: E1211 10:19:26.006299 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 10:19:41 crc kubenswrapper[4881]: I1211 10:19:41.006135 4881 scope.go:117] "RemoveContainer" containerID="d128e0dff02b3825dd79b2d490e4268b7eaa7e6a607bbc27d6da7c90b6fbf705" Dec 11 10:19:41 crc kubenswrapper[4881]: E1211 10:19:41.007019 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 10:19:54 crc kubenswrapper[4881]: I1211 10:19:54.005811 4881 scope.go:117] "RemoveContainer" containerID="d128e0dff02b3825dd79b2d490e4268b7eaa7e6a607bbc27d6da7c90b6fbf705" Dec 11 10:19:54 crc kubenswrapper[4881]: E1211 10:19:54.006688 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 10:20:09 crc kubenswrapper[4881]: I1211 10:20:09.006175 4881 scope.go:117] "RemoveContainer" containerID="d128e0dff02b3825dd79b2d490e4268b7eaa7e6a607bbc27d6da7c90b6fbf705" Dec 11 10:20:09 crc kubenswrapper[4881]: E1211 10:20:09.006992 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 10:20:22 crc kubenswrapper[4881]: I1211 10:20:22.007062 4881 scope.go:117] "RemoveContainer" containerID="d128e0dff02b3825dd79b2d490e4268b7eaa7e6a607bbc27d6da7c90b6fbf705" Dec 11 10:20:22 crc kubenswrapper[4881]: E1211 10:20:22.008372 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 10:20:35 crc kubenswrapper[4881]: I1211 10:20:35.005586 4881 scope.go:117] "RemoveContainer" containerID="d128e0dff02b3825dd79b2d490e4268b7eaa7e6a607bbc27d6da7c90b6fbf705" Dec 11 10:20:35 crc kubenswrapper[4881]: E1211 10:20:35.006709 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 10:20:46 crc kubenswrapper[4881]: I1211 10:20:46.640082 4881 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-wknqh"] Dec 11 10:20:46 crc kubenswrapper[4881]: E1211 10:20:46.641805 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="44db5ba3-cedf-4bbb-a754-6c51ca4b42a7" containerName="gather" Dec 11 10:20:46 crc kubenswrapper[4881]: I1211 10:20:46.641834 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="44db5ba3-cedf-4bbb-a754-6c51ca4b42a7" containerName="gather" Dec 11 10:20:46 crc kubenswrapper[4881]: E1211 10:20:46.641860 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="38f011f9-bf6e-40d3-bfd5-e2d7ddba4df2" containerName="registry-server" Dec 11 10:20:46 crc kubenswrapper[4881]: I1211 10:20:46.641876 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="38f011f9-bf6e-40d3-bfd5-e2d7ddba4df2" containerName="registry-server" Dec 11 10:20:46 crc kubenswrapper[4881]: E1211 10:20:46.641918 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="18611892-d199-4d6c-a3b6-391c8c78511c" containerName="extract-content" Dec 11 10:20:46 crc kubenswrapper[4881]: I1211 10:20:46.641934 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="18611892-d199-4d6c-a3b6-391c8c78511c" containerName="extract-content" Dec 11 10:20:46 crc kubenswrapper[4881]: E1211 10:20:46.641963 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="38f011f9-bf6e-40d3-bfd5-e2d7ddba4df2" containerName="extract-content" Dec 11 10:20:46 crc kubenswrapper[4881]: I1211 10:20:46.641976 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="38f011f9-bf6e-40d3-bfd5-e2d7ddba4df2" containerName="extract-content" Dec 11 10:20:46 crc kubenswrapper[4881]: E1211 10:20:46.642028 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="44db5ba3-cedf-4bbb-a754-6c51ca4b42a7" containerName="copy" Dec 11 10:20:46 crc kubenswrapper[4881]: I1211 10:20:46.642040 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="44db5ba3-cedf-4bbb-a754-6c51ca4b42a7" containerName="copy" Dec 11 10:20:46 crc kubenswrapper[4881]: E1211 10:20:46.642099 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="18611892-d199-4d6c-a3b6-391c8c78511c" containerName="registry-server" Dec 11 10:20:46 crc kubenswrapper[4881]: I1211 10:20:46.642127 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="18611892-d199-4d6c-a3b6-391c8c78511c" containerName="registry-server" Dec 11 10:20:46 crc kubenswrapper[4881]: E1211 10:20:46.642159 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="38f011f9-bf6e-40d3-bfd5-e2d7ddba4df2" containerName="extract-utilities" Dec 11 10:20:46 crc kubenswrapper[4881]: I1211 10:20:46.642173 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="38f011f9-bf6e-40d3-bfd5-e2d7ddba4df2" containerName="extract-utilities" Dec 11 10:20:46 crc kubenswrapper[4881]: E1211 10:20:46.642214 4881 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="18611892-d199-4d6c-a3b6-391c8c78511c" containerName="extract-utilities" Dec 11 10:20:46 crc kubenswrapper[4881]: I1211 10:20:46.642227 4881 state_mem.go:107] "Deleted CPUSet assignment" podUID="18611892-d199-4d6c-a3b6-391c8c78511c" containerName="extract-utilities" Dec 11 10:20:46 crc kubenswrapper[4881]: I1211 10:20:46.642832 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="44db5ba3-cedf-4bbb-a754-6c51ca4b42a7" containerName="copy" Dec 11 10:20:46 crc kubenswrapper[4881]: I1211 10:20:46.642884 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="44db5ba3-cedf-4bbb-a754-6c51ca4b42a7" containerName="gather" Dec 11 10:20:46 crc kubenswrapper[4881]: I1211 10:20:46.642935 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="18611892-d199-4d6c-a3b6-391c8c78511c" containerName="registry-server" Dec 11 10:20:46 crc kubenswrapper[4881]: I1211 10:20:46.642965 4881 memory_manager.go:354] "RemoveStaleState removing state" podUID="38f011f9-bf6e-40d3-bfd5-e2d7ddba4df2" containerName="registry-server" Dec 11 10:20:46 crc kubenswrapper[4881]: I1211 10:20:46.646627 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-wknqh" Dec 11 10:20:46 crc kubenswrapper[4881]: I1211 10:20:46.668824 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-wknqh"] Dec 11 10:20:46 crc kubenswrapper[4881]: I1211 10:20:46.720709 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b5657cd7-8da4-4285-b335-2a21f96bf14d-catalog-content\") pod \"community-operators-wknqh\" (UID: \"b5657cd7-8da4-4285-b335-2a21f96bf14d\") " pod="openshift-marketplace/community-operators-wknqh" Dec 11 10:20:46 crc kubenswrapper[4881]: I1211 10:20:46.720995 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mkzpk\" (UniqueName: \"kubernetes.io/projected/b5657cd7-8da4-4285-b335-2a21f96bf14d-kube-api-access-mkzpk\") pod \"community-operators-wknqh\" (UID: \"b5657cd7-8da4-4285-b335-2a21f96bf14d\") " pod="openshift-marketplace/community-operators-wknqh" Dec 11 10:20:46 crc kubenswrapper[4881]: I1211 10:20:46.721314 4881 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b5657cd7-8da4-4285-b335-2a21f96bf14d-utilities\") pod \"community-operators-wknqh\" (UID: \"b5657cd7-8da4-4285-b335-2a21f96bf14d\") " pod="openshift-marketplace/community-operators-wknqh" Dec 11 10:20:46 crc kubenswrapper[4881]: I1211 10:20:46.824331 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b5657cd7-8da4-4285-b335-2a21f96bf14d-utilities\") pod \"community-operators-wknqh\" (UID: \"b5657cd7-8da4-4285-b335-2a21f96bf14d\") " pod="openshift-marketplace/community-operators-wknqh" Dec 11 10:20:46 crc kubenswrapper[4881]: I1211 10:20:46.824452 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b5657cd7-8da4-4285-b335-2a21f96bf14d-catalog-content\") pod \"community-operators-wknqh\" (UID: \"b5657cd7-8da4-4285-b335-2a21f96bf14d\") " pod="openshift-marketplace/community-operators-wknqh" Dec 11 10:20:46 crc kubenswrapper[4881]: I1211 10:20:46.824565 4881 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mkzpk\" (UniqueName: \"kubernetes.io/projected/b5657cd7-8da4-4285-b335-2a21f96bf14d-kube-api-access-mkzpk\") pod \"community-operators-wknqh\" (UID: \"b5657cd7-8da4-4285-b335-2a21f96bf14d\") " pod="openshift-marketplace/community-operators-wknqh" Dec 11 10:20:46 crc kubenswrapper[4881]: I1211 10:20:46.825243 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b5657cd7-8da4-4285-b335-2a21f96bf14d-utilities\") pod \"community-operators-wknqh\" (UID: \"b5657cd7-8da4-4285-b335-2a21f96bf14d\") " pod="openshift-marketplace/community-operators-wknqh" Dec 11 10:20:46 crc kubenswrapper[4881]: I1211 10:20:46.825528 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b5657cd7-8da4-4285-b335-2a21f96bf14d-catalog-content\") pod \"community-operators-wknqh\" (UID: \"b5657cd7-8da4-4285-b335-2a21f96bf14d\") " pod="openshift-marketplace/community-operators-wknqh" Dec 11 10:20:46 crc kubenswrapper[4881]: I1211 10:20:46.850473 4881 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mkzpk\" (UniqueName: \"kubernetes.io/projected/b5657cd7-8da4-4285-b335-2a21f96bf14d-kube-api-access-mkzpk\") pod \"community-operators-wknqh\" (UID: \"b5657cd7-8da4-4285-b335-2a21f96bf14d\") " pod="openshift-marketplace/community-operators-wknqh" Dec 11 10:20:46 crc kubenswrapper[4881]: I1211 10:20:46.983265 4881 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-wknqh" Dec 11 10:20:47 crc kubenswrapper[4881]: I1211 10:20:47.653878 4881 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-wknqh"] Dec 11 10:20:48 crc kubenswrapper[4881]: I1211 10:20:48.492023 4881 generic.go:334] "Generic (PLEG): container finished" podID="b5657cd7-8da4-4285-b335-2a21f96bf14d" containerID="98044355d8d9bf0f9abe43edaf8786f54e23de9aa48ef5eaa50795a1beecc817" exitCode=0 Dec 11 10:20:48 crc kubenswrapper[4881]: I1211 10:20:48.492223 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wknqh" event={"ID":"b5657cd7-8da4-4285-b335-2a21f96bf14d","Type":"ContainerDied","Data":"98044355d8d9bf0f9abe43edaf8786f54e23de9aa48ef5eaa50795a1beecc817"} Dec 11 10:20:48 crc kubenswrapper[4881]: I1211 10:20:48.492408 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wknqh" event={"ID":"b5657cd7-8da4-4285-b335-2a21f96bf14d","Type":"ContainerStarted","Data":"ae3058d5e49ee87fa3b0201dfa3f26e8290a85059a53bca40d86db84b30b2fbc"} Dec 11 10:20:49 crc kubenswrapper[4881]: I1211 10:20:49.508901 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wknqh" event={"ID":"b5657cd7-8da4-4285-b335-2a21f96bf14d","Type":"ContainerStarted","Data":"897a9d01f0ab32be37170daea611b41b1f4dbddecbe414ba6e78a19efce96199"} Dec 11 10:20:50 crc kubenswrapper[4881]: I1211 10:20:50.006180 4881 scope.go:117] "RemoveContainer" containerID="d128e0dff02b3825dd79b2d490e4268b7eaa7e6a607bbc27d6da7c90b6fbf705" Dec 11 10:20:50 crc kubenswrapper[4881]: E1211 10:20:50.006531 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 10:20:50 crc kubenswrapper[4881]: I1211 10:20:50.522248 4881 generic.go:334] "Generic (PLEG): container finished" podID="b5657cd7-8da4-4285-b335-2a21f96bf14d" containerID="897a9d01f0ab32be37170daea611b41b1f4dbddecbe414ba6e78a19efce96199" exitCode=0 Dec 11 10:20:50 crc kubenswrapper[4881]: I1211 10:20:50.522289 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wknqh" event={"ID":"b5657cd7-8da4-4285-b335-2a21f96bf14d","Type":"ContainerDied","Data":"897a9d01f0ab32be37170daea611b41b1f4dbddecbe414ba6e78a19efce96199"} Dec 11 10:20:52 crc kubenswrapper[4881]: I1211 10:20:52.549271 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wknqh" event={"ID":"b5657cd7-8da4-4285-b335-2a21f96bf14d","Type":"ContainerStarted","Data":"fa8014e05154430b763802e74e0d06d8f1611d8b730ec2f4802dea2e00d040c6"} Dec 11 10:20:52 crc kubenswrapper[4881]: I1211 10:20:52.578620 4881 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-wknqh" podStartSLOduration=3.064849201 podStartE2EDuration="6.5785928s" podCreationTimestamp="2025-12-11 10:20:46 +0000 UTC" firstStartedPulling="2025-12-11 10:20:48.494774609 +0000 UTC m=+7496.872143306" lastFinishedPulling="2025-12-11 10:20:52.008518198 +0000 UTC m=+7500.385886905" observedRunningTime="2025-12-11 10:20:52.572148882 +0000 UTC m=+7500.949517589" watchObservedRunningTime="2025-12-11 10:20:52.5785928 +0000 UTC m=+7500.955961517" Dec 11 10:20:56 crc kubenswrapper[4881]: I1211 10:20:56.983937 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-wknqh" Dec 11 10:20:56 crc kubenswrapper[4881]: I1211 10:20:56.984631 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-wknqh" Dec 11 10:20:57 crc kubenswrapper[4881]: I1211 10:20:57.038861 4881 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-wknqh" Dec 11 10:20:57 crc kubenswrapper[4881]: I1211 10:20:57.667983 4881 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-wknqh" Dec 11 10:20:57 crc kubenswrapper[4881]: I1211 10:20:57.726396 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-wknqh"] Dec 11 10:20:59 crc kubenswrapper[4881]: I1211 10:20:59.627760 4881 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-wknqh" podUID="b5657cd7-8da4-4285-b335-2a21f96bf14d" containerName="registry-server" containerID="cri-o://fa8014e05154430b763802e74e0d06d8f1611d8b730ec2f4802dea2e00d040c6" gracePeriod=2 Dec 11 10:21:00 crc kubenswrapper[4881]: I1211 10:21:00.381780 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-wknqh" Dec 11 10:21:00 crc kubenswrapper[4881]: I1211 10:21:00.497840 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mkzpk\" (UniqueName: \"kubernetes.io/projected/b5657cd7-8da4-4285-b335-2a21f96bf14d-kube-api-access-mkzpk\") pod \"b5657cd7-8da4-4285-b335-2a21f96bf14d\" (UID: \"b5657cd7-8da4-4285-b335-2a21f96bf14d\") " Dec 11 10:21:00 crc kubenswrapper[4881]: I1211 10:21:00.497912 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b5657cd7-8da4-4285-b335-2a21f96bf14d-catalog-content\") pod \"b5657cd7-8da4-4285-b335-2a21f96bf14d\" (UID: \"b5657cd7-8da4-4285-b335-2a21f96bf14d\") " Dec 11 10:21:00 crc kubenswrapper[4881]: I1211 10:21:00.497942 4881 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b5657cd7-8da4-4285-b335-2a21f96bf14d-utilities\") pod \"b5657cd7-8da4-4285-b335-2a21f96bf14d\" (UID: \"b5657cd7-8da4-4285-b335-2a21f96bf14d\") " Dec 11 10:21:00 crc kubenswrapper[4881]: I1211 10:21:00.501108 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b5657cd7-8da4-4285-b335-2a21f96bf14d-utilities" (OuterVolumeSpecName: "utilities") pod "b5657cd7-8da4-4285-b335-2a21f96bf14d" (UID: "b5657cd7-8da4-4285-b335-2a21f96bf14d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 10:21:00 crc kubenswrapper[4881]: I1211 10:21:00.507349 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b5657cd7-8da4-4285-b335-2a21f96bf14d-kube-api-access-mkzpk" (OuterVolumeSpecName: "kube-api-access-mkzpk") pod "b5657cd7-8da4-4285-b335-2a21f96bf14d" (UID: "b5657cd7-8da4-4285-b335-2a21f96bf14d"). InnerVolumeSpecName "kube-api-access-mkzpk". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:21:00 crc kubenswrapper[4881]: I1211 10:21:00.550326 4881 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b5657cd7-8da4-4285-b335-2a21f96bf14d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b5657cd7-8da4-4285-b335-2a21f96bf14d" (UID: "b5657cd7-8da4-4285-b335-2a21f96bf14d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 10:21:00 crc kubenswrapper[4881]: I1211 10:21:00.600993 4881 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mkzpk\" (UniqueName: \"kubernetes.io/projected/b5657cd7-8da4-4285-b335-2a21f96bf14d-kube-api-access-mkzpk\") on node \"crc\" DevicePath \"\"" Dec 11 10:21:00 crc kubenswrapper[4881]: I1211 10:21:00.601042 4881 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b5657cd7-8da4-4285-b335-2a21f96bf14d-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 11 10:21:00 crc kubenswrapper[4881]: I1211 10:21:00.601054 4881 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b5657cd7-8da4-4285-b335-2a21f96bf14d-utilities\") on node \"crc\" DevicePath \"\"" Dec 11 10:21:00 crc kubenswrapper[4881]: I1211 10:21:00.665529 4881 generic.go:334] "Generic (PLEG): container finished" podID="b5657cd7-8da4-4285-b335-2a21f96bf14d" containerID="fa8014e05154430b763802e74e0d06d8f1611d8b730ec2f4802dea2e00d040c6" exitCode=0 Dec 11 10:21:00 crc kubenswrapper[4881]: I1211 10:21:00.665583 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wknqh" event={"ID":"b5657cd7-8da4-4285-b335-2a21f96bf14d","Type":"ContainerDied","Data":"fa8014e05154430b763802e74e0d06d8f1611d8b730ec2f4802dea2e00d040c6"} Dec 11 10:21:00 crc kubenswrapper[4881]: I1211 10:21:00.665612 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wknqh" event={"ID":"b5657cd7-8da4-4285-b335-2a21f96bf14d","Type":"ContainerDied","Data":"ae3058d5e49ee87fa3b0201dfa3f26e8290a85059a53bca40d86db84b30b2fbc"} Dec 11 10:21:00 crc kubenswrapper[4881]: I1211 10:21:00.665633 4881 scope.go:117] "RemoveContainer" containerID="fa8014e05154430b763802e74e0d06d8f1611d8b730ec2f4802dea2e00d040c6" Dec 11 10:21:00 crc kubenswrapper[4881]: I1211 10:21:00.665800 4881 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-wknqh" Dec 11 10:21:00 crc kubenswrapper[4881]: I1211 10:21:00.753424 4881 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-wknqh"] Dec 11 10:21:00 crc kubenswrapper[4881]: I1211 10:21:00.764569 4881 scope.go:117] "RemoveContainer" containerID="897a9d01f0ab32be37170daea611b41b1f4dbddecbe414ba6e78a19efce96199" Dec 11 10:21:00 crc kubenswrapper[4881]: I1211 10:21:00.800339 4881 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-wknqh"] Dec 11 10:21:00 crc kubenswrapper[4881]: I1211 10:21:00.858301 4881 scope.go:117] "RemoveContainer" containerID="98044355d8d9bf0f9abe43edaf8786f54e23de9aa48ef5eaa50795a1beecc817" Dec 11 10:21:00 crc kubenswrapper[4881]: I1211 10:21:00.921294 4881 scope.go:117] "RemoveContainer" containerID="fa8014e05154430b763802e74e0d06d8f1611d8b730ec2f4802dea2e00d040c6" Dec 11 10:21:00 crc kubenswrapper[4881]: E1211 10:21:00.922015 4881 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fa8014e05154430b763802e74e0d06d8f1611d8b730ec2f4802dea2e00d040c6\": container with ID starting with fa8014e05154430b763802e74e0d06d8f1611d8b730ec2f4802dea2e00d040c6 not found: ID does not exist" containerID="fa8014e05154430b763802e74e0d06d8f1611d8b730ec2f4802dea2e00d040c6" Dec 11 10:21:00 crc kubenswrapper[4881]: I1211 10:21:00.922066 4881 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fa8014e05154430b763802e74e0d06d8f1611d8b730ec2f4802dea2e00d040c6"} err="failed to get container status \"fa8014e05154430b763802e74e0d06d8f1611d8b730ec2f4802dea2e00d040c6\": rpc error: code = NotFound desc = could not find container \"fa8014e05154430b763802e74e0d06d8f1611d8b730ec2f4802dea2e00d040c6\": container with ID starting with fa8014e05154430b763802e74e0d06d8f1611d8b730ec2f4802dea2e00d040c6 not found: ID does not exist" Dec 11 10:21:00 crc kubenswrapper[4881]: I1211 10:21:00.922095 4881 scope.go:117] "RemoveContainer" containerID="897a9d01f0ab32be37170daea611b41b1f4dbddecbe414ba6e78a19efce96199" Dec 11 10:21:00 crc kubenswrapper[4881]: E1211 10:21:00.922421 4881 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"897a9d01f0ab32be37170daea611b41b1f4dbddecbe414ba6e78a19efce96199\": container with ID starting with 897a9d01f0ab32be37170daea611b41b1f4dbddecbe414ba6e78a19efce96199 not found: ID does not exist" containerID="897a9d01f0ab32be37170daea611b41b1f4dbddecbe414ba6e78a19efce96199" Dec 11 10:21:00 crc kubenswrapper[4881]: I1211 10:21:00.922446 4881 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"897a9d01f0ab32be37170daea611b41b1f4dbddecbe414ba6e78a19efce96199"} err="failed to get container status \"897a9d01f0ab32be37170daea611b41b1f4dbddecbe414ba6e78a19efce96199\": rpc error: code = NotFound desc = could not find container \"897a9d01f0ab32be37170daea611b41b1f4dbddecbe414ba6e78a19efce96199\": container with ID starting with 897a9d01f0ab32be37170daea611b41b1f4dbddecbe414ba6e78a19efce96199 not found: ID does not exist" Dec 11 10:21:00 crc kubenswrapper[4881]: I1211 10:21:00.922461 4881 scope.go:117] "RemoveContainer" containerID="98044355d8d9bf0f9abe43edaf8786f54e23de9aa48ef5eaa50795a1beecc817" Dec 11 10:21:00 crc kubenswrapper[4881]: E1211 10:21:00.922766 4881 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"98044355d8d9bf0f9abe43edaf8786f54e23de9aa48ef5eaa50795a1beecc817\": container with ID starting with 98044355d8d9bf0f9abe43edaf8786f54e23de9aa48ef5eaa50795a1beecc817 not found: ID does not exist" containerID="98044355d8d9bf0f9abe43edaf8786f54e23de9aa48ef5eaa50795a1beecc817" Dec 11 10:21:00 crc kubenswrapper[4881]: I1211 10:21:00.922805 4881 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"98044355d8d9bf0f9abe43edaf8786f54e23de9aa48ef5eaa50795a1beecc817"} err="failed to get container status \"98044355d8d9bf0f9abe43edaf8786f54e23de9aa48ef5eaa50795a1beecc817\": rpc error: code = NotFound desc = could not find container \"98044355d8d9bf0f9abe43edaf8786f54e23de9aa48ef5eaa50795a1beecc817\": container with ID starting with 98044355d8d9bf0f9abe43edaf8786f54e23de9aa48ef5eaa50795a1beecc817 not found: ID does not exist" Dec 11 10:21:01 crc kubenswrapper[4881]: I1211 10:21:01.013533 4881 scope.go:117] "RemoveContainer" containerID="d128e0dff02b3825dd79b2d490e4268b7eaa7e6a607bbc27d6da7c90b6fbf705" Dec 11 10:21:01 crc kubenswrapper[4881]: E1211 10:21:01.013789 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 10:21:01 crc kubenswrapper[4881]: I1211 10:21:01.034059 4881 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b5657cd7-8da4-4285-b335-2a21f96bf14d" path="/var/lib/kubelet/pods/b5657cd7-8da4-4285-b335-2a21f96bf14d/volumes" Dec 11 10:21:16 crc kubenswrapper[4881]: I1211 10:21:16.006276 4881 scope.go:117] "RemoveContainer" containerID="d128e0dff02b3825dd79b2d490e4268b7eaa7e6a607bbc27d6da7c90b6fbf705" Dec 11 10:21:16 crc kubenswrapper[4881]: E1211 10:21:16.007160 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 10:21:30 crc kubenswrapper[4881]: I1211 10:21:30.610039 4881 trace.go:236] Trace[239493863]: "Calculate volume metrics of ca-trust-extracted for pod openshift-image-registry/image-registry-66df7c8f76-67h8h" (11-Dec-2025 10:21:29.256) (total time: 1352ms): Dec 11 10:21:30 crc kubenswrapper[4881]: Trace[239493863]: [1.352577201s] [1.352577201s] END Dec 11 10:21:31 crc kubenswrapper[4881]: I1211 10:21:31.006036 4881 scope.go:117] "RemoveContainer" containerID="d128e0dff02b3825dd79b2d490e4268b7eaa7e6a607bbc27d6da7c90b6fbf705" Dec 11 10:21:31 crc kubenswrapper[4881]: E1211 10:21:31.006725 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 10:21:44 crc kubenswrapper[4881]: I1211 10:21:44.005200 4881 scope.go:117] "RemoveContainer" containerID="d128e0dff02b3825dd79b2d490e4268b7eaa7e6a607bbc27d6da7c90b6fbf705" Dec 11 10:21:44 crc kubenswrapper[4881]: E1211 10:21:44.006102 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 10:21:56 crc kubenswrapper[4881]: I1211 10:21:56.005468 4881 scope.go:117] "RemoveContainer" containerID="d128e0dff02b3825dd79b2d490e4268b7eaa7e6a607bbc27d6da7c90b6fbf705" Dec 11 10:21:56 crc kubenswrapper[4881]: E1211 10:21:56.006236 4881 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z9nnh_openshift-machine-config-operator(56d69133-af36-4cbd-af7d-3a58cc4dd8ca)\"" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" podUID="56d69133-af36-4cbd-af7d-3a58cc4dd8ca" Dec 11 10:22:08 crc kubenswrapper[4881]: I1211 10:22:08.006443 4881 scope.go:117] "RemoveContainer" containerID="d128e0dff02b3825dd79b2d490e4268b7eaa7e6a607bbc27d6da7c90b6fbf705" Dec 11 10:22:08 crc kubenswrapper[4881]: I1211 10:22:08.448184 4881 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-z9nnh" event={"ID":"56d69133-af36-4cbd-af7d-3a58cc4dd8ca","Type":"ContainerStarted","Data":"ba93fb8353b0253d1db3cfdd8f4a1970b0e81332b8ea0cf5f9c438ef5fe0311c"} var/home/core/zuul-output/logs/crc-cloud-workdir-crc-all-logs.tar.gz0000644000175000000000000000005515116515544024454 0ustar coreroot‹íÁ  ÷Om7 €7šÞ'(var/home/core/zuul-output/logs/crc-cloud/0000755000175000000000000000000015116515545017372 5ustar corerootvar/home/core/zuul-output/artifacts/0000755000175000017500000000000015116476354016521 5ustar corecorevar/home/core/zuul-output/docs/0000755000175000017500000000000015116476354015471 5ustar corecore